aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorCatalin Marinas2010-09-13 16:03:21 +0100
committerRussell King2010-11-04 15:44:31 +0000
commit247055aa21ffef1c49dd64710d5e94c2aee19b58 (patch)
treee9e026b96597d080de4c16bb88c17b0495c61904 /arch/arm/mm
parentff8b16d7e15a8ba2a6086645614a483e048e3fbf (diff)
ARM: 6384/1: Remove the domain switching on ARMv6k/v7 CPUs
This patch removes the domain switching functionality via the set_fs and __switch_to functions on cores that have a TLS register. Currently, the ioremap and vmalloc areas share the same level 1 page tables and therefore have the same domain (DOMAIN_KERNEL). When the kernel domain is modified from Client to Manager (via the __set_fs or in the __switch_to function), the XN (eXecute Never) bit is overridden and newer CPUs can speculatively prefetch the ioremap'ed memory. Linux performs the kernel domain switching to allow user-specific functions (copy_to/from_user, get/put_user etc.) to access kernel memory. In order for these functions to work with the kernel domain set to Client, the patch modifies the LDRT/STRT and related instructions to the LDR/STR ones. The user pages access rights are also modified for kernel read-only access rather than read/write so that the copy-on-write mechanism still works. CPU_USE_DOMAINS gets disabled only if the hardware has a TLS register (CPU_32v6K is defined) since writing the TLS value to the high vectors page isn't possible. The user addresses passed to the kernel are checked by the access_ok() function so that they do not point to the kernel space. Tested-by: Anton Vorontsov <cbouatmailru@gmail.com> Cc: Tony Lindgren <tony@atomide.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig8
-rw-r--r--arch/arm/mm/mmu.c6
-rw-r--r--arch/arm/mm/proc-macros.S7
-rw-r--r--arch/arm/mm/proc-v7.S5
4 files changed, 21 insertions, 5 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 4414a01e1e8a..6d05f79a8cd2 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -599,6 +599,14 @@ config CPU_CP15_MPU
help
Processor has the CP15 register, which has MPU related registers.
+config CPU_USE_DOMAINS
+ bool
+ depends on MMU
+ default y if !CPU_32v6K
+ help
+ This option enables or disables the use of domain switching
+ via the set_fs() function.
+
#
# CPU supports 36-bit I/O
#
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 72ad3e1f56cf..79c01f540cbe 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -24,6 +24,7 @@
#include <asm/smp_plat.h>
#include <asm/tlb.h>
#include <asm/highmem.h>
+#include <asm/traps.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -914,12 +915,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
{
struct map_desc map;
unsigned long addr;
- void *vectors;
/*
* Allocate the vector page early.
*/
- vectors = early_alloc(PAGE_SIZE);
+ vectors_page = early_alloc(PAGE_SIZE);
for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
@@ -959,7 +959,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
* location (0xffff0000). If we aren't using high-vectors, also
* create a mapping at the low-vectors virtual address.
*/
- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+ map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
map.type = MT_HIGH_VECTORS;
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 7d63beaf9745..337f10256cd6 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -99,6 +99,10 @@
* 110x 0 1 0 r/w r/o
* 11x0 0 1 0 r/w r/o
* 1111 0 1 1 r/w r/w
+ *
+ * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
+ * 110x 1 1 1 r/o r/o
+ * 11x0 1 1 1 r/o r/o
*/
.macro armv6_mt_table pfx
\pfx\()_mt_table:
@@ -138,8 +142,11 @@
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
+#ifdef CONFIG_CPU_USE_DOMAINS
+ @ allow kernel read/write access to read-only user pages
tstne r3, #PTE_EXT_APX
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+#endif
tst r1, #L_PTE_EXEC
orreq r3, r3, #PTE_EXT_XN
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 53cbe2225153..cfc11afab1fb 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -148,8 +148,11 @@ ENTRY(cpu_v7_set_pte_ext)
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
+#ifdef CONFIG_CPU_USE_DOMAINS
+ @ allow kernel read/write access to read-only user pages
tstne r3, #PTE_EXT_APX
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+#endif
tst r1, #L_PTE_EXEC
orreq r3, r3, #PTE_EXT_XN
@@ -273,8 +276,6 @@ __v7_setup:
ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
- mov r10, #0x1f @ domains 0, 1 = manager
- mcr p15, 0, r10, c3, c0, 0 @ load domain access register
/*
* Memory region attributes with SCTLR.TRE=1
*