aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/pmu.c
blob: faf32a44ba04a0ac237f42eaaeab199185ab4c16 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright 2019 Arm Limited
 * Author: Andrew Murray <Andrew.Murray@arm.com>
 */
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include <asm/kvm_hyp.h>

/*
 * Given the perf event attributes and system type, determine
 * if we are going to need to switch counters at guest entry/exit.
 */
static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
{
	/**
	 * With VHE the guest kernel runs at EL1 and the host at EL2,
	 * where user (EL0) is excluded then we have no reason to switch
	 * counters.
	 */
	if (has_vhe() && attr->exclude_user)
		return false;

	/* Only switch if attributes are different */
	return (attr->exclude_host != attr->exclude_guest);
}

/*
 * Add events to track that we may want to switch at guest entry/exit
 * time.
 */
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
	struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);

	if (!ctx || !kvm_pmu_switch_needed(attr))
		return;

	if (!attr->exclude_host)
		ctx->pmu_events.events_host |= set;
	if (!attr->exclude_guest)
		ctx->pmu_events.events_guest |= set;
}

/*
 * Stop tracking events
 */
void kvm_clr_pmu_events(u32 clr)
{
	struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);

	if (!ctx)
		return;

	ctx->pmu_events.events_host &= ~clr;
	ctx->pmu_events.events_guest &= ~clr;
}

#define PMEVTYPER_READ_CASE(idx)				\
	case idx:						\
		return read_sysreg(pmevtyper##idx##_el0)

#define PMEVTYPER_WRITE_CASE(idx)				\
	case idx:						\
		write_sysreg(val, pmevtyper##idx##_el0);	\
		break

#define PMEVTYPER_CASES(readwrite)				\
	PMEVTYPER_##readwrite##_CASE(0);			\
	PMEVTYPER_##readwrite##_CASE(1);			\
	PMEVTYPER_##readwrite##_CASE(2);			\
	PMEVTYPER_##readwrite##_CASE(3);			\
	PMEVTYPER_##readwrite##_CASE(4);			\
	PMEVTYPER_##readwrite##_CASE(5);			\
	PMEVTYPER_##readwrite##_CASE(6);			\
	PMEVTYPER_##readwrite##_CASE(7);			\
	PMEVTYPER_##readwrite##_CASE(8);			\
	PMEVTYPER_##readwrite##_CASE(9);			\
	PMEVTYPER_##readwrite##_CASE(10);			\
	PMEVTYPER_##readwrite##_CASE(11);			\
	PMEVTYPER_##readwrite##_CASE(12);			\
	PMEVTYPER_##readwrite##_CASE(13);			\
	PMEVTYPER_##readwrite##_CASE(14);			\
	PMEVTYPER_##readwrite##_CASE(15);			\
	PMEVTYPER_##readwrite##_CASE(16);			\
	PMEVTYPER_##readwrite##_CASE(17);			\
	PMEVTYPER_##readwrite##_CASE(18);			\
	PMEVTYPER_##readwrite##_CASE(19);			\
	PMEVTYPER_##readwrite##_CASE(20);			\
	PMEVTYPER_##readwrite##_CASE(21);			\
	PMEVTYPER_##readwrite##_CASE(22);			\
	PMEVTYPER_##readwrite##_CASE(23);			\
	PMEVTYPER_##readwrite##_CASE(24);			\
	PMEVTYPER_##readwrite##_CASE(25);			\
	PMEVTYPER_##readwrite##_CASE(26);			\
	PMEVTYPER_##readwrite##_CASE(27);			\
	PMEVTYPER_##readwrite##_CASE(28);			\
	PMEVTYPER_##readwrite##_CASE(29);			\
	PMEVTYPER_##readwrite##_CASE(30)

/*
 * Read a value direct from PMEVTYPER<idx> where idx is 0-30
 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
 */
static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
{
	switch (idx) {
	PMEVTYPER_CASES(READ);
	case ARMV8_PMU_CYCLE_IDX:
		return read_sysreg(pmccfiltr_el0);
	default:
		WARN_ON(1);
	}

	return 0;
}

/*
 * Write a value direct to PMEVTYPER<idx> where idx is 0-30
 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
 */
static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
{
	switch (idx) {
	PMEVTYPER_CASES(WRITE);
	case ARMV8_PMU_CYCLE_IDX:
		write_sysreg(val, pmccfiltr_el0);
		break;
	default:
		WARN_ON(1);
	}
}

/*
 * Modify ARMv8 PMU events to include EL0 counting
 */
static void kvm_vcpu_pmu_enable_el0(unsigned long events)
{
	u64 typer;
	u32 counter;

	for_each_set_bit(counter, &events, 32) {
		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
		typer &= ~ARMV8_PMU_EXCLUDE_EL0;
		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
	}
}

/*
 * Modify ARMv8 PMU events to exclude EL0 counting
 */
static void kvm_vcpu_pmu_disable_el0(unsigned long events)
{
	u64 typer;
	u32 counter;

	for_each_set_bit(counter, &events, 32) {
		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
		typer |= ARMV8_PMU_EXCLUDE_EL0;
		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
	}
}

/*
 * On VHE ensure that only guest events have EL0 counting enabled.
 * This is called from both vcpu_{load,put} and the sysreg handling.
 * Since the latter is preemptible, special care must be taken to
 * disable preemption.
 */
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
	struct kvm_host_data *host;
	u32 events_guest, events_host;

	if (!has_vhe())
		return;

	preempt_disable();
	host = this_cpu_ptr_hyp_sym(kvm_host_data);
	events_guest = host->pmu_events.events_guest;
	events_host = host->pmu_events.events_host;

	kvm_vcpu_pmu_enable_el0(events_guest);
	kvm_vcpu_pmu_disable_el0(events_host);
	preempt_enable();
}

/*
 * On VHE ensure that only host events have EL0 counting enabled
 */
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{
	struct kvm_host_data *host;
	u32 events_guest, events_host;

	if (!has_vhe())
		return;

	host = this_cpu_ptr_hyp_sym(kvm_host_data);
	events_guest = host->pmu_events.events_guest;
	events_host = host->pmu_events.events_host;

	kvm_vcpu_pmu_enable_el0(events_host);
	kvm_vcpu_pmu_disable_el0(events_guest);
}