diff options
author | Andi Kleen | 2006-06-26 13:59:20 +0200 |
---|---|---|
committer | Linus Torvalds | 2006-06-26 10:48:21 -0700 |
commit | da5311258d2afb96fc592c8b11e818facfa46dc3 (patch) | |
tree | cdd51f187028cb2cd3da716cb7c1b3df88902ed6 /include | |
parent | 75bd665cc9f4d0f67164d9145e446ae554d4ed3f (diff) |
[PATCH] x86_64: Fix race in cpu_local_* on preemptible kernels
When a process changes CPUs while doing the non atomic cpu_local_*
operations it might operate on the local_t of a different CPUs.
Fix that by disabling preemption.
Pointed out by Christopher Lameter
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-i386/local.h | 26 | ||||
-rw-r--r-- | include/asm-x86_64/local.h | 26 |
2 files changed, 40 insertions, 12 deletions
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h index e67fa08260fe..3b4998c51d08 100644 --- a/include/asm-i386/local.h +++ b/include/asm-i386/local.h @@ -55,12 +55,26 @@ static __inline__ void local_sub(long i, local_t *v) * much more efficient than these naive implementations. Note they take * a variable, not an address. */ -#define cpu_local_read(v) local_read(&__get_cpu_var(v)) -#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) -#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) -#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) -#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) -#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) + +/* Need to disable preemption for the cpu local counters otherwise we could + still access a variable of a previous CPU in a non atomic way. */ +#define cpu_local_wrap_v(v) \ + ({ local_t res__; \ + preempt_disable(); \ + res__ = (v); \ + preempt_enable(); \ + res__; }) +#define cpu_local_wrap(v) \ + ({ preempt_disable(); \ + v; \ + preempt_enable(); }) \ + +#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) +#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) +#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) +#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) +#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) +#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) #define __cpu_local_inc(v) cpu_local_inc(v) #define __cpu_local_dec(v) cpu_local_dec(v) diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h index cd17945bf218..e769e6200225 100644 --- a/include/asm-x86_64/local.h +++ b/include/asm-x86_64/local.h @@ -59,12 +59,26 @@ static inline void local_sub(long i, local_t *v) * This could be done better if we moved the per cpu data directly * after GS. */ -#define cpu_local_read(v) local_read(&__get_cpu_var(v)) -#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) -#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) -#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) -#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) -#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) + +/* Need to disable preemption for the cpu local counters otherwise we could + still access a variable of a previous CPU in a non atomic way. */ +#define cpu_local_wrap_v(v) \ + ({ local_t res__; \ + preempt_disable(); \ + res__ = (v); \ + preempt_enable(); \ + res__; }) +#define cpu_local_wrap(v) \ + ({ preempt_disable(); \ + v; \ + preempt_enable(); }) \ + +#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) +#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) +#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) +#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) +#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) +#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) #define __cpu_local_inc(v) cpu_local_inc(v) #define __cpu_local_dec(v) cpu_local_dec(v) |