aboutsummaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney2016-01-12 14:15:40 -0800
committerPaul E. McKenney2016-03-31 13:37:39 -0700
commit6b558c4c7a4ba410e39dbcb9d4c2b6e928c09308 (patch)
treedb3bcb367b47fd09843616d1eef8d25f880fc4d5 /kernel/rcu
parentbdea9e347783c2724997db7c5d5b45a301e2dc90 (diff)
rcutorture: Bind rcuperf reader/writer kthreads to CPUs
This commit forces more deterministic behavior by binding rcuperf's rcu_perf_reader() and rcu_perf_writer() kthreads to their respective CPUs. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/rcuperf.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index 9d54a57bee7d..7a1edf417d18 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -328,8 +328,10 @@ rcu_perf_reader(void *arg)
{
unsigned long flags;
int idx;
+ long me = (long)arg;
VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
+ set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
set_user_nice(current, MAX_NICE);
atomic_inc(&n_rcu_perf_reader_started);
@@ -362,6 +364,7 @@ rcu_perf_writer(void *arg)
WARN_ON(rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp);
WARN_ON(rcu_gp_is_normal() && gp_exp);
WARN_ON(!wdpp);
+ set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
t = ktime_get_mono_fast_ns();
if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
t_rcu_perf_writer_started = t;
@@ -594,7 +597,7 @@ rcu_perf_init(void)
goto unwind;
}
for (i = 0; i < nrealreaders; i++) {
- firsterr = torture_create_kthread(rcu_perf_reader, NULL,
+ firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
reader_tasks[i]);
if (firsterr)
goto unwind;