1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
|
// SPDX-License-Identifier: GPL-2.0
/*
* Channel path related status regions for vfio_ccw
*
* Copyright IBM Corp. 2020
*
* Author(s): Farhan Ali <alifm@linux.ibm.com>
* Eric Farman <farman@linux.ibm.com>
*/
#include <linux/slab.h>
#include <linux/vfio.h>
#include "vfio_ccw_private.h"
static ssize_t vfio_ccw_schib_region_read(struct vfio_ccw_private *private,
char __user *buf, size_t count,
loff_t *ppos)
{
unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_schib_region *region;
int ret;
if (pos + count > sizeof(*region))
return -EINVAL;
mutex_lock(&private->io_mutex);
region = private->region[i].data;
if (cio_update_schib(private->sch)) {
ret = -ENODEV;
goto out;
}
memcpy(region, &private->sch->schib, sizeof(*region));
if (copy_to_user(buf, (void *)region + pos, count)) {
ret = -EFAULT;
goto out;
}
ret = count;
out:
mutex_unlock(&private->io_mutex);
return ret;
}
static ssize_t vfio_ccw_schib_region_write(struct vfio_ccw_private *private,
const char __user *buf, size_t count,
loff_t *ppos)
{
return -EINVAL;
}
static void vfio_ccw_schib_region_release(struct vfio_ccw_private *private,
struct vfio_ccw_region *region)
{
}
static const struct vfio_ccw_regops vfio_ccw_schib_region_ops = {
.read = vfio_ccw_schib_region_read,
.write = vfio_ccw_schib_region_write,
.release = vfio_ccw_schib_region_release,
};
int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private)
{
return vfio_ccw_register_dev_region(private,
VFIO_REGION_SUBTYPE_CCW_SCHIB,
&vfio_ccw_schib_region_ops,
sizeof(struct ccw_schib_region),
VFIO_REGION_INFO_FLAG_READ,
private->schib_region);
}
static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
char __user *buf, size_t count,
loff_t *ppos)
{
unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_crw_region *region;
struct vfio_ccw_crw *crw;
int ret;
if (pos + count > sizeof(*region))
return -EINVAL;
crw = list_first_entry_or_null(&private->crw,
struct vfio_ccw_crw, next);
if (crw)
list_del(&crw->next);
mutex_lock(&private->io_mutex);
region = private->region[i].data;
if (crw)
memcpy(®ion->crw, &crw->crw, sizeof(region->crw));
if (copy_to_user(buf, (void *)region + pos, count))
ret = -EFAULT;
else
ret = count;
region->crw = 0;
mutex_unlock(&private->io_mutex);
kfree(crw);
/* Notify the guest if more CRWs are on our queue */
if (!list_empty(&private->crw) && private->crw_trigger)
eventfd_signal(private->crw_trigger, 1);
return ret;
}
static ssize_t vfio_ccw_crw_region_write(struct vfio_ccw_private *private,
const char __user *buf, size_t count,
loff_t *ppos)
{
return -EINVAL;
}
static void vfio_ccw_crw_region_release(struct vfio_ccw_private *private,
struct vfio_ccw_region *region)
{
}
static const struct vfio_ccw_regops vfio_ccw_crw_region_ops = {
.read = vfio_ccw_crw_region_read,
.write = vfio_ccw_crw_region_write,
.release = vfio_ccw_crw_region_release,
};
int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private)
{
return vfio_ccw_register_dev_region(private,
VFIO_REGION_SUBTYPE_CCW_CRW,
&vfio_ccw_crw_region_ops,
sizeof(struct ccw_crw_region),
VFIO_REGION_INFO_FLAG_READ,
private->crw_region);
}
|