aboutsummaryrefslogtreecommitdiff
path: root/drivers/xen/hypervisor.c
blob: 16c7c96c94e0306e542ce2f903788b785a7885dc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
// SPDX-License-Identifier: MIT License
/*
 * hypervisor.c
 *
 * Communication to/from hypervisor.
 *
 * Copyright (c) 2002-2003, K A Fraser
 * Copyright (c) 2005, Grzegorz Milos, gm281@cam.ac.uk,Intel Research Cambridge
 * Copyright (c) 2020, EPAM Systems Inc.
 */
#include <common.h>
#include <cpu_func.h>
#include <log.h>
#include <memalign.h>

#include <asm/io.h>
#include <asm/armv8/mmu.h>
#include <asm/xen/system.h>

#include <linux/bug.h>

#include <xen/hvm.h>
#include <xen/events.h>
#include <xen/gnttab.h>
#include <xen/xenbus.h>
#include <xen/interface/memory.h>

#define active_evtchns(cpu, sh, idx)	\
	((sh)->evtchn_pending[idx] &	\
	 ~(sh)->evtchn_mask[idx])

int in_callback;

/*
 * Shared page for communicating with the hypervisor.
 * Events flags go here, for example.
 */
struct shared_info *HYPERVISOR_shared_info;

static const char *param_name(int op)
{
#define PARAM(x)[HVM_PARAM_##x] = #x
	static const char *const names[] = {
		PARAM(CALLBACK_IRQ),
		PARAM(STORE_PFN),
		PARAM(STORE_EVTCHN),
		PARAM(PAE_ENABLED),
		PARAM(IOREQ_PFN),
		PARAM(VPT_ALIGN),
		PARAM(CONSOLE_PFN),
		PARAM(CONSOLE_EVTCHN),
	};
#undef PARAM

	if (op >= ARRAY_SIZE(names))
		return "unknown";

	if (!names[op])
		return "reserved";

	return names[op];
}

/**
 * hvm_get_parameter_maintain_dcache - function to obtain a HVM
 * parameter value.
 * @idx: HVM parameter index
 * @value: Value to fill in
 *
 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
 * all memory which is shared with other entities in the system
 * (including the hypervisor and other guests) must reside in memory
 * which is mapped as Normal Inner Write-Back Outer Write-Back
 * Inner-Shareable.
 *
 * Thus, page attributes must be equally set for all the entities
 * working with that page.
 *
 * Before MMU setup the data cache is turned off, so it means that
 * manual data cache maintenance is required, because of the
 * difference of page attributes.
 */
int hvm_get_parameter_maintain_dcache(int idx, uint64_t *value)
{
	struct xen_hvm_param xhv;
	int ret;

	invalidate_dcache_range((unsigned long)&xhv,
				(unsigned long)&xhv + sizeof(xhv));
	xhv.domid = DOMID_SELF;
	xhv.index = idx;
	invalidate_dcache_range((unsigned long)&xhv,
				(unsigned long)&xhv + sizeof(xhv));

	ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
	if (ret < 0) {
		pr_err("Cannot get hvm parameter %s (%d): %d!\n",
			   param_name(idx), idx, ret);
		BUG();
	}
	invalidate_dcache_range((unsigned long)&xhv,
				(unsigned long)&xhv + sizeof(xhv));

	*value = xhv.value;

	return ret;
}

int hvm_get_parameter(int idx, uint64_t *value)
{
	struct xen_hvm_param xhv;
	int ret;

	xhv.domid = DOMID_SELF;
	xhv.index = idx;
	ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
	if (ret < 0) {
		pr_err("Cannot get hvm parameter %s (%d): %d!\n",
			   param_name(idx), idx, ret);
		BUG();
	}

	*value = xhv.value;

	return ret;
}

struct shared_info *map_shared_info(void *p)
{
	struct xen_add_to_physmap xatp;

	HYPERVISOR_shared_info = (struct shared_info *)memalign(PAGE_SIZE,
								PAGE_SIZE);
	if (!HYPERVISOR_shared_info)
		BUG();

	xatp.domid = DOMID_SELF;
	xatp.idx = 0;
	xatp.space = XENMAPSPACE_shared_info;
	xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp) != 0)
		BUG();

	return HYPERVISOR_shared_info;
}

void unmap_shared_info(void)
{
	xen_pfn_t shared_info_pfn = virt_to_pfn(HYPERVISOR_shared_info);
	struct xen_remove_from_physmap xrfp = {0};
	struct xen_memory_reservation reservation = {0};
	xen_ulong_t nr_exts = 1;

	xrfp.domid = DOMID_SELF;
	xrfp.gpfn = shared_info_pfn;
	if (HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrfp) != 0)
		panic("Failed to unmap HYPERVISOR_shared_info\n");

	/*
	 * After removing from physmap there will be a hole in address space on
	 * HYPERVISOR_shared_info address, so to free memory allocated with
	 * memalign and prevent exceptions during access to this page we need to
	 * fill this 4KB hole with XENMEM_populate_physmap before jumping to Linux.
	 */
	reservation.domid = DOMID_SELF;
	reservation.extent_order = 0;
	reservation.address_bits = 0;
	set_xen_guest_handle(reservation.extent_start, &shared_info_pfn);
	reservation.nr_extents = nr_exts;
	if (HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation) != nr_exts)
		panic("Failed to populate memory on HYPERVISOR_shared_info addr\n");

	/* Now we can return this to memory allocator */
	free(HYPERVISOR_shared_info);
}

void do_hypervisor_callback(struct pt_regs *regs)
{
	unsigned long l1, l2, l1i, l2i;
	unsigned int port;
	int cpu = 0;
	struct shared_info *s = HYPERVISOR_shared_info;
	struct vcpu_info *vcpu_info = &s->vcpu_info[cpu];

	in_callback = 1;

	vcpu_info->evtchn_upcall_pending = 0;
	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);

	while (l1 != 0) {
		l1i = __ffs(l1);
		l1 &= ~(1UL << l1i);

		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
			l2i = __ffs(l2);
			l2 &= ~(1UL << l2i);

			port = (l1i * (sizeof(unsigned long) * 8)) + l2i;
			do_event(port, regs);
		}
	}

	in_callback = 0;
}

void force_evtchn_callback(void)
{
#ifdef XEN_HAVE_PV_UPCALL_MASK
	int save;
#endif
	struct vcpu_info *vcpu;

	vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];
#ifdef XEN_HAVE_PV_UPCALL_MASK
	save = vcpu->evtchn_upcall_mask;
#endif

	while (vcpu->evtchn_upcall_pending) {
#ifdef XEN_HAVE_PV_UPCALL_MASK
		vcpu->evtchn_upcall_mask = 1;
#endif
		do_hypervisor_callback(NULL);
#ifdef XEN_HAVE_PV_UPCALL_MASK
		vcpu->evtchn_upcall_mask = save;
#endif
	};
}

void mask_evtchn(uint32_t port)
{
	struct shared_info *s = HYPERVISOR_shared_info;

	synch_set_bit(port, &s->evtchn_mask[0]);
}

void unmask_evtchn(uint32_t port)
{
	struct shared_info *s = HYPERVISOR_shared_info;
	struct vcpu_info *vcpu_info = &s->vcpu_info[smp_processor_id()];

	synch_clear_bit(port, &s->evtchn_mask[0]);

	/*
	 * Just like a real IO-APIC we 'lose the interrupt edge' if the
	 * channel is masked.
	 */
	if (synch_test_bit(port, &s->evtchn_pending[0]) &&
	    !synch_test_and_set_bit(port / (sizeof(unsigned long) * 8),
				    &vcpu_info->evtchn_pending_sel)) {
		vcpu_info->evtchn_upcall_pending = 1;
#ifdef XEN_HAVE_PV_UPCALL_MASK
		if (!vcpu_info->evtchn_upcall_mask)
#endif
			force_evtchn_callback();
	}
}

void clear_evtchn(uint32_t port)
{
	struct shared_info *s = HYPERVISOR_shared_info;

	synch_clear_bit(port, &s->evtchn_pending[0]);
}

int xen_init(void)
{
	debug("%s\n", __func__);

	map_shared_info(NULL);
	init_events();
	init_xenbus();
	init_gnttab();

	return 0;
}

void xen_fini(void)
{
	debug("%s\n", __func__);

	fini_gnttab();
	fini_xenbus();
	fini_events();
	unmap_shared_info();
}