aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorDave Martin2012-08-17 16:07:02 +0100
committerNicolas Pitre2013-04-24 10:37:01 -0400
commit1ae98561b16f305e43151405f226727c00ee52bc (patch)
treef48c7fa44fdd147340f00d4a51555cbd7c476e27 /arch
parent9762f12d3e05c8d6a0651f4c7e76ae72ce27fc3a (diff)
ARM: mcpm_head.S: vlock-based first man election
Instead of requiring the first man to be elected in advance (which can be suboptimal in some situations), this patch uses a per- cluster mutex to co-ordinate selection of the first man. This should also make it more feasible to reuse this code path for asynchronous cluster resume (as in CPUidle scenarios). We must ensure that the vlock data doesn't share a cacheline with anything else, or dirty cache eviction could corrupt it. Signed-off-by: Dave Martin <dave.martin@linaro.org> Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org> Reviewed-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Reviewed-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/common/Makefile2
-rw-r--r--arch/arm/common/mcpm_head.S41
-rw-r--r--arch/arm/kernel/asm-offsets.c1
3 files changed, 38 insertions, 6 deletions
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index b070671033ae..9ec273188ccb 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -11,4 +11,4 @@ obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
-obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o
+obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o vlock.o
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
index 7d729bd72674..8178705c4b24 100644
--- a/arch/arm/common/mcpm_head.S
+++ b/arch/arm/common/mcpm_head.S
@@ -16,6 +16,8 @@
#include <linux/linkage.h>
#include <asm/mcpm.h>
+#include "vlock.h"
+
.if MCPM_SYNC_CLUSTER_CPUS
.error "cpus must be the first member of struct mcpm_sync_struct"
.endif
@@ -69,10 +71,11 @@ ENTRY(mcpm_entry_point)
* position independent way.
*/
adr r5, 3f
- ldmia r5, {r6, r7, r8}
+ ldmia r5, {r6, r7, r8, r11}
add r6, r5, r6 @ r6 = mcpm_entry_vectors
ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
add r8, r5, r8 @ r8 = mcpm_sync
+ add r11, r5, r11 @ r11 = first_man_locks
mov r0, #MCPM_SYNC_CLUSTER_SIZE
mla r8, r0, r10, r8 @ r8 = sync cluster base
@@ -86,13 +89,22 @@ ENTRY(mcpm_entry_point)
@ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
@ state, because there is at least one active CPU (this CPU).
- @ Note: the following is racy as another CPU might be testing
- @ the same flag at the same moment. That'll be fixed later.
+ mov r0, #VLOCK_SIZE
+ mla r11, r0, r10, r11 @ r11 = cluster first man lock
+ mov r0, r11
+ mov r1, r9 @ cpu
+ bl vlock_trylock @ implies DMB
+
+ cmp r0, #0 @ failed to get the lock?
+ bne mcpm_setup_wait @ wait for cluster setup if so
+
ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
cmp r0, #CLUSTER_UP @ cluster already up?
bne mcpm_setup @ if not, set up the cluster
- @ Otherwise, skip setup:
+ @ Otherwise, release the first man lock and skip setup:
+ mov r0, r11
+ bl vlock_unlock
b mcpm_setup_complete
mcpm_setup:
@@ -142,6 +154,19 @@ mcpm_setup_leave:
dsb
sev
+ mov r0, r11
+ bl vlock_unlock @ implies DMB
+ b mcpm_setup_complete
+
+ @ In the contended case, non-first men wait here for cluster setup
+ @ to complete:
+mcpm_setup_wait:
+ ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ cmp r0, #CLUSTER_UP
+ wfene
+ bne mcpm_setup_wait
+ dmb
+
mcpm_setup_complete:
@ If a platform-specific CPU setup hook is needed, it is
@ called from here.
@@ -173,11 +198,17 @@ mcpm_entry_gated:
3: .word mcpm_entry_vectors - .
.word mcpm_power_up_setup_phys - 3b
.word mcpm_sync - 3b
+ .word first_man_locks - 3b
ENDPROC(mcpm_entry_point)
.bss
- .align 5
+
+ .align CACHE_WRITEBACK_ORDER
+ .type first_man_locks, #object
+first_man_locks:
+ .space VLOCK_SIZE * MAX_NR_CLUSTERS
+ .align CACHE_WRITEBACK_ORDER
.type mcpm_entry_vectors, #object
ENTRY(mcpm_entry_vectors)
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 1bed82a0a9e0..3f088225e71c 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -150,6 +150,7 @@ int main(void)
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
BLANK();
+ DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
BLANK();
#ifdef CONFIG_KVM_ARM_HOST