aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorWill Deacon2021-07-30 12:24:32 +0100
committerPeter Zijlstra2021-08-20 12:32:59 +0200
commit234a503e670be01f72841be9fcf68dfb89a1fa8b (patch)
treeee867fb4f0dc0afd94d240eb949e4d569b85e4d8 /kernel
parent97c0054dbe2c3c59d1156fd233f2d44e91981c8e (diff)
sched: Reject CPU affinity changes based on task_cpu_possible_mask()
Reject explicit requests to change the affinity mask of a task via set_cpus_allowed_ptr() if the requested mask is not a subset of the mask returned by task_cpu_possible_mask(). This ensures that the 'cpus_mask' for a given task cannot contain CPUs which are incapable of executing it, except in cases where the affinity is forced. Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <Valentin.Schneider@arm.com> Reviewed-by: Quentin Perret <qperret@google.com> Link: https://lore.kernel.org/r/20210730112443.23245-6-will@kernel.org
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b9d4bae922a8..8cec0d24c88c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2709,7 +2709,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask,
u32 flags)
{
+ const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
const struct cpumask *cpu_valid_mask = cpu_active_mask;
+ bool kthread = p->flags & PF_KTHREAD;
unsigned int dest_cpu;
struct rq_flags rf;
struct rq *rq;
@@ -2718,7 +2720,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
rq = task_rq_lock(p, &rf);
update_rq_clock(rq);
- if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
+ if (kthread || is_migration_disabled(p)) {
/*
* Kernel threads are allowed on online && !active CPUs,
* however, during cpu-hot-unplug, even these might get pushed
@@ -2732,6 +2734,11 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
cpu_valid_mask = cpu_online_mask;
}
+ if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
/*
* Must re-check here, to close a race against __kthread_bind(),
* sched_setaffinity() is not guaranteed to observe the flag.