diff options
author | Tejun Heo | 2013-01-07 08:51:07 -0800 |
---|---|---|
committer | Tejun Heo | 2013-01-07 08:51:07 -0800 |
commit | efeb77b2f13deb0503e65ad2b243495b6de75173 (patch) | |
tree | 2b0db442e04561e7993301247ae8d8a10855a721 /kernel | |
parent | c8f699bb56aeae951e02fe2a46c9ada022535770 (diff) |
cpuset: introduce CS_ONLINE
Add CS_ONLINE which is set from css_online() and cleared from
css_offline(). This will enable using generic cgroup iterator while
allowing decoupling cpuset from cgroup internal locking.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 11 |
1 files changed, 10 insertions, 1 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1d7a611ff771..e857887bd246 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -138,6 +138,7 @@ static inline bool task_has_mempolicy(struct task_struct *task) /* bits in struct cpuset flags field */ typedef enum { + CS_ONLINE, CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, CS_MEM_HARDWALL, @@ -154,6 +155,11 @@ enum hotplug_event { }; /* convenient tests for these bits */ +static inline bool is_cpuset_online(const struct cpuset *cs) +{ + return test_bit(CS_ONLINE, &cs->flags); +} + static inline int is_cpu_exclusive(const struct cpuset *cs) { return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); @@ -190,7 +196,8 @@ static inline int is_spread_slab(const struct cpuset *cs) } static struct cpuset top_cpuset = { - .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), + .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | + (1 << CS_MEM_EXCLUSIVE)), }; /* @@ -1822,6 +1829,7 @@ static int cpuset_css_online(struct cgroup *cgrp) if (!parent) return 0; + set_bit(CS_ONLINE, &cs->flags); if (is_spread_page(parent)) set_bit(CS_SPREAD_PAGE, &cs->flags); if (is_spread_slab(parent)) @@ -1871,6 +1879,7 @@ static void cpuset_css_offline(struct cgroup *cgrp) update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); number_of_cpusets--; + clear_bit(CS_ONLINE, &cs->flags); cgroup_unlock(); } |