aboutsummaryrefslogtreecommitdiff
path: root/kernel/events/hw_breakpoint_test.c
blob: c57610f52bb4d591495a8ea2d6b99698e9bd5f71 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
// SPDX-License-Identifier: GPL-2.0
/*
 * KUnit test for hw_breakpoint constraints accounting logic.
 *
 * Copyright (C) 2022, Google LLC.
 */

#include <kunit/test.h>
#include <linux/cpumask.h>
#include <linux/hw_breakpoint.h>
#include <linux/kthread.h>
#include <linux/perf_event.h>
#include <asm/hw_breakpoint.h>

#define TEST_REQUIRES_BP_SLOTS(test, slots)						\
	do {										\
		if ((slots) > get_test_bp_slots()) {					\
			kunit_skip((test), "Requires breakpoint slots: %d > %d", slots,	\
				   get_test_bp_slots());				\
		}									\
	} while (0)

#define TEST_EXPECT_NOSPC(expr) KUNIT_EXPECT_EQ(test, -ENOSPC, PTR_ERR(expr))

#define MAX_TEST_BREAKPOINTS 512

static char break_vars[MAX_TEST_BREAKPOINTS];
static struct perf_event *test_bps[MAX_TEST_BREAKPOINTS];
static struct task_struct *__other_task;

static struct perf_event *register_test_bp(int cpu, struct task_struct *tsk, int idx)
{
	struct perf_event_attr attr = {};

	if (WARN_ON(idx < 0 || idx >= MAX_TEST_BREAKPOINTS))
		return NULL;

	hw_breakpoint_init(&attr);
	attr.bp_addr = (unsigned long)&break_vars[idx];
	attr.bp_len = HW_BREAKPOINT_LEN_1;
	attr.bp_type = HW_BREAKPOINT_RW;
	return perf_event_create_kernel_counter(&attr, cpu, tsk, NULL, NULL);
}

static void unregister_test_bp(struct perf_event **bp)
{
	if (WARN_ON(IS_ERR(*bp)))
		return;
	if (WARN_ON(!*bp))
		return;
	unregister_hw_breakpoint(*bp);
	*bp = NULL;
}

static int get_test_bp_slots(void)
{
	static int slots;

	if (!slots)
		slots = hw_breakpoint_slots(TYPE_DATA);

	return slots;
}

static void fill_one_bp_slot(struct kunit *test, int *id, int cpu, struct task_struct *tsk)
{
	struct perf_event *bp = register_test_bp(cpu, tsk, *id);

	KUNIT_ASSERT_NOT_NULL(test, bp);
	KUNIT_ASSERT_FALSE(test, IS_ERR(bp));
	KUNIT_ASSERT_NULL(test, test_bps[*id]);
	test_bps[(*id)++] = bp;
}

/*
 * Fills up the given @cpu/@tsk with breakpoints, only leaving @skip slots free.
 *
 * Returns true if this can be called again, continuing at @id.
 */
static bool fill_bp_slots(struct kunit *test, int *id, int cpu, struct task_struct *tsk, int skip)
{
	for (int i = 0; i < get_test_bp_slots() - skip; ++i)
		fill_one_bp_slot(test, id, cpu, tsk);

	return *id + get_test_bp_slots() <= MAX_TEST_BREAKPOINTS;
}

static int dummy_kthread(void *arg)
{
	return 0;
}

static struct task_struct *get_other_task(struct kunit *test)
{
	struct task_struct *tsk;

	if (__other_task)
		return __other_task;

	tsk = kthread_create(dummy_kthread, NULL, "hw_breakpoint_dummy_task");
	KUNIT_ASSERT_FALSE(test, IS_ERR(tsk));
	__other_task = tsk;
	return __other_task;
}

static int get_test_cpu(int num)
{
	int cpu;

	WARN_ON(num < 0);

	for_each_online_cpu(cpu) {
		if (num-- <= 0)
			break;
	}

	return cpu;
}

/* ===== Test cases ===== */

static void test_one_cpu(struct kunit *test)
{
	int idx = 0;

	fill_bp_slots(test, &idx, get_test_cpu(0), NULL, 0);
	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
}

static void test_many_cpus(struct kunit *test)
{
	int idx = 0;
	int cpu;

	/* Test that CPUs are independent. */
	for_each_online_cpu(cpu) {
		bool do_continue = fill_bp_slots(test, &idx, cpu, NULL, 0);

		TEST_EXPECT_NOSPC(register_test_bp(cpu, NULL, idx));
		if (!do_continue)
			break;
	}
}

static void test_one_task_on_all_cpus(struct kunit *test)
{
	int idx = 0;

	fill_bp_slots(test, &idx, -1, current, 0);
	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
	/* Remove one and adding back CPU-target should work. */
	unregister_test_bp(&test_bps[0]);
	fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
}

static void test_two_tasks_on_all_cpus(struct kunit *test)
{
	int idx = 0;

	/* Test that tasks are independent. */
	fill_bp_slots(test, &idx, -1, current, 0);
	fill_bp_slots(test, &idx, -1, get_other_task(test), 0);

	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test), idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test), idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
	/* Remove one from first task and adding back CPU-target should not work. */
	unregister_test_bp(&test_bps[0]);
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
}

static void test_one_task_on_one_cpu(struct kunit *test)
{
	int idx = 0;

	fill_bp_slots(test, &idx, get_test_cpu(0), current, 0);
	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
	/*
	 * Remove one and adding back CPU-target should work; this case is
	 * special vs. above because the task's constraints are CPU-dependent.
	 */
	unregister_test_bp(&test_bps[0]);
	fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
}

static void test_one_task_mixed(struct kunit *test)
{
	int idx = 0;

	TEST_REQUIRES_BP_SLOTS(test, 3);

	fill_one_bp_slot(test, &idx, get_test_cpu(0), current);
	fill_bp_slots(test, &idx, -1, current, 1);
	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));

	/* Transition from CPU-dependent pinned count to CPU-independent. */
	unregister_test_bp(&test_bps[0]);
	unregister_test_bp(&test_bps[1]);
	fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
	fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
}

static void test_two_tasks_on_one_cpu(struct kunit *test)
{
	int idx = 0;

	fill_bp_slots(test, &idx, get_test_cpu(0), current, 0);
	fill_bp_slots(test, &idx, get_test_cpu(0), get_other_task(test), 0);

	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test), idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test), idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
	/* Can still create breakpoints on some other CPU. */
	fill_bp_slots(test, &idx, get_test_cpu(1), NULL, 0);
}

static void test_two_tasks_on_one_all_cpus(struct kunit *test)
{
	int idx = 0;

	fill_bp_slots(test, &idx, get_test_cpu(0), current, 0);
	fill_bp_slots(test, &idx, -1, get_other_task(test), 0);

	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test), idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test), idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
	/* Cannot create breakpoints on some other CPU either. */
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL, idx));
}

static void test_task_on_all_and_one_cpu(struct kunit *test)
{
	int tsk_on_cpu_idx, cpu_idx;
	int idx = 0;

	TEST_REQUIRES_BP_SLOTS(test, 3);

	fill_bp_slots(test, &idx, -1, current, 2);
	/* Transitioning from only all CPU breakpoints to mixed. */
	tsk_on_cpu_idx = idx;
	fill_one_bp_slot(test, &idx, get_test_cpu(0), current);
	fill_one_bp_slot(test, &idx, -1, current);

	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));

	/* We should still be able to use up another CPU's slots. */
	cpu_idx = idx;
	fill_one_bp_slot(test, &idx, get_test_cpu(1), NULL);
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL, idx));

	/* Transitioning back to task target on all CPUs. */
	unregister_test_bp(&test_bps[tsk_on_cpu_idx]);
	/* Still have a CPU target breakpoint in get_test_cpu(1). */
	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
	/* Remove it and try again. */
	unregister_test_bp(&test_bps[cpu_idx]);
	fill_one_bp_slot(test, &idx, -1, current);

	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL, idx));
}

static struct kunit_case hw_breakpoint_test_cases[] = {
	KUNIT_CASE(test_one_cpu),
	KUNIT_CASE(test_many_cpus),
	KUNIT_CASE(test_one_task_on_all_cpus),
	KUNIT_CASE(test_two_tasks_on_all_cpus),
	KUNIT_CASE(test_one_task_on_one_cpu),
	KUNIT_CASE(test_one_task_mixed),
	KUNIT_CASE(test_two_tasks_on_one_cpu),
	KUNIT_CASE(test_two_tasks_on_one_all_cpus),
	KUNIT_CASE(test_task_on_all_and_one_cpu),
	{},
};

static int test_init(struct kunit *test)
{
	/* Most test cases want 2 distinct CPUs. */
	if (num_online_cpus() < 2)
		kunit_skip(test, "not enough cpus");

	/* Want the system to not use breakpoints elsewhere. */
	if (hw_breakpoint_is_used())
		kunit_skip(test, "hw breakpoint already in use");

	return 0;
}

static void test_exit(struct kunit *test)
{
	for (int i = 0; i < MAX_TEST_BREAKPOINTS; ++i) {
		if (test_bps[i])
			unregister_test_bp(&test_bps[i]);
	}

	if (__other_task) {
		kthread_stop(__other_task);
		__other_task = NULL;
	}

	/* Verify that internal state agrees that no breakpoints are in use. */
	KUNIT_EXPECT_FALSE(test, hw_breakpoint_is_used());
}

static struct kunit_suite hw_breakpoint_test_suite = {
	.name = "hw_breakpoint",
	.test_cases = hw_breakpoint_test_cases,
	.init = test_init,
	.exit = test_exit,
};

kunit_test_suites(&hw_breakpoint_test_suite);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marco Elver <elver@google.com>");