aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Kuo2013-11-25 13:49:26 -0600
committerRichard Kuo2014-04-04 18:20:02 -0500
commitb10fa7b62d4201602dac45290b43defd4a980a12 (patch)
tree4a8f1acc0d5ac4f4f41e54b0f29230abd781f654
parent17ca896dae8272a3dadf6343dca9daa5c2e6ebb5 (diff)
Hexagon: fix atomic_set
Normal writes in our our architecture don't invalidate lock reservations. Signed-off-by: Richard Kuo <rkuo@codeaurora.org>
-rw-r--r--arch/hexagon/include/asm/atomic.h15
1 files changed, 14 insertions, 1 deletions
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 7aae4cb2a29a..17dc63780c06 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -26,7 +26,20 @@
#include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) }
-#define atomic_set(v, i) ((v)->counter = (i))
+
+/* Normal writes in our arch don't clear lock reservations */
+
+static inline void atomic_set(atomic_t *v, int new)
+{
+ asm volatile(
+ "1: r6 = memw_locked(%0);\n"
+ " memw_locked(%0,p0) = %1;\n"
+ " if (!P0) jump 1b;\n"
+ :
+ : "r" (&v->counter), "r" (new)
+ : "memory", "p0", "r6"
+ );
+}
/**
* atomic_read - reads a word, atomically