diff options
author | Roman Zippel | 2006-03-09 17:33:53 -0800 |
---|---|---|
committer | Linus Torvalds | 2006-03-09 19:47:38 -0800 |
commit | 7b61fcda8a640bb87be23f9f09c1f24357b5c6e1 (patch) | |
tree | 691c42a07df609934cb1cba15cf94b9c04978654 /include | |
parent | b707dbe6c52e143a9afea06aa8f84103135ca873 (diff) |
[PATCH] m68k: fix cmpxchg compile errors if CONFIG_RMW_INSNS=n
We require that all archs implement atomic_cmpxchg(), for the generic
version of atomic_add_unless().
Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-m68k/atomic.h | 35 |
1 files changed, 32 insertions, 3 deletions
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h index a4a84d5c65d5..862e497c2645 100644 --- a/include/asm-m68k/atomic.h +++ b/include/asm-m68k/atomic.h @@ -55,6 +55,7 @@ static inline int atomic_inc_and_test(atomic_t *v) } #ifdef CONFIG_RMW_INSNS + static inline int atomic_add_return(int i, atomic_t *v) { int t, tmp; @@ -82,7 +83,12 @@ static inline int atomic_sub_return(int i, atomic_t *v) : "g" (i), "2" (atomic_read(v))); return t; } + +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + #else /* !CONFIG_RMW_INSNS */ + static inline int atomic_add_return(int i, atomic_t * v) { unsigned long flags; @@ -110,6 +116,32 @@ static inline int atomic_sub_return(int i, atomic_t * v) return t; } + +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + unsigned long flags; + int prev; + + local_irq_save(flags); + prev = atomic_read(v); + if (prev == old) + atomic_set(v, new); + local_irq_restore(flags); + return prev; +} + +static inline int atomic_xchg(atomic_t *v, int new) +{ + unsigned long flags; + int prev; + + local_irq_save(flags); + prev = atomic_read(v); + atomic_set(v, new); + local_irq_restore(flags); + return prev; +} + #endif /* !CONFIG_RMW_INSNS */ #define atomic_dec_return(v) atomic_sub_return(1, (v)) @@ -139,9 +171,6 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); } -#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - #define atomic_add_unless(v, a, u) \ ({ \ int c, old; \ |