aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds2012-09-13 09:05:22 +0800
committerLinus Torvalds2012-09-13 09:05:22 +0800
commit8507876aaada8a2cf68ebccdf1d056465dd1fc11 (patch)
treeae8a028afcf5131dcf99d2fb31564524f05507e4 /arch
parent22b4e63ebe062e2e3d4a3a2b468e47ca9575d598 (diff)
parentbeafa0de3d3e0d0ece7638cded879815f359f1cb (diff)
Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM fixes from Russell King: "It's been a while... so there's a little more here than normal. Mostly updates from Will for the breakpoint stuff, and plugging a few holes in the user access functions which crept in when domain support was disabled for ARMv7 CPUs." * 'fixes' of git://git.linaro.org/people/rmk/linux-arm: ARM: 7529/1: delay: set loops_per_jiffy when moving to timer-based loop ARM: 7528/1: uaccess: annotate [__]{get,put}_user functions with might_fault() ARM: 7527/1: uaccess: explicitly check __user pointer when !CPU_USE_DOMAINS ARM: 7526/1: traps: send SIGILL if get_user fails on undef handling path ARM: 7521/1: Fix semihosting Kconfig text ARM: 7513/1: Make sure dtc is built before running it ARM: 7512/1: Fix XIP build due to PHYS_OFFSET definition moving ARM: 7499/1: mm: Fix vmalloc overlap check for !HIGHMEM ARM: 7503/1: mm: only flush both pmd entries for classic MMU ARM: 7502/1: contextidr: avoid using bfi instruction during notifier ARM: 7501/1: decompressor: reset ttbcr for VMSA ARMv7 cores ARM: 7497/1: hw_breakpoint: allow single-byte watchpoints on all addresses ARM: 7496/1: hw_breakpoint: don't rely on dfsr to show watchpoint access type ARM: Fix ioremap() of address zero
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig.debug6
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/boot/compressed/head.S4
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/memory.h3
-rw-r--r--arch/arm/include/asm/tlb.h4
-rw-r--r--arch/arm/include/asm/uaccess.h58
-rw-r--r--arch/arm/kernel/hw_breakpoint.c62
-rw-r--r--arch/arm/kernel/traps.c11
-rw-r--r--arch/arm/lib/delay.c1
-rw-r--r--arch/arm/lib/getuser.S23
-rw-r--r--arch/arm/lib/putuser.S6
-rw-r--r--arch/arm/mm/context.c7
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c8
15 files changed, 151 insertions, 57 deletions
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f15f82bf3a50..e968a52e4881 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -356,15 +356,15 @@ choice
is nothing connected to read from the DCC.
config DEBUG_SEMIHOSTING
- bool "Kernel low-level debug output via semihosting I"
+ bool "Kernel low-level debug output via semihosting I/O"
help
Semihosting enables code running on an ARM target to use
the I/O facilities on a host debugger/emulator through a
- simple SVC calls. The host debugger or emulator must have
+ simple SVC call. The host debugger or emulator must have
semihosting enabled for the special svc call to be trapped
otherwise the kernel will crash.
- This is known to work with OpenOCD, as wellas
+ This is known to work with OpenOCD, as well as
ARM's Fast Models, or any other controlling environment
that implements semihosting.
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 30eae87ead6d..a051dfbdd7db 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
zinstall uinstall install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
-%.dtb:
+%.dtb: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
-dtbs:
+dtbs: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
# We use MRPROPER_FILES and CLEAN_FILES now
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index b8c64b80bafc..81769c1341fa 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -659,10 +659,14 @@ __armv7_mmu_cache_on:
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
+ mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client
+ bic r6, r6, #1 << 31 @ 32-bit translation system
+ bic r6, r6, #3 << 0 @ use only ttbr0
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
+ mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
#endif
mcr p15, 0, r0, c7, c5, 4 @ ISB
mcr p15, 0, r0, c1, c0, 0 @ load control register
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 03fb93621d0d..5c8b3bf4d825 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -320,4 +320,12 @@
.size \name , . - \name
.endm
+ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+ adds \tmp, \addr, #\size - 1
+ sbcccs \tmp, \tmp, \limit
+ bcs \bad
+#endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e965f1b560f1..5f6ddcc56452 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
#endif
#endif
+#endif /* __ASSEMBLY__ */
#ifndef PHYS_OFFSET
#ifdef PLAT_PHYS_OFFSET
@@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#endif
#endif
+#ifndef __ASSEMBLY__
+
/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 314d4664eae7..99a19512ee26 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
{
pgtable_page_dtor(pte);
+#ifdef CONFIG_ARM_LPAE
+ tlb_add_flush(tlb, addr);
+#else
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB.
@@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
addr &= PMD_MASK;
tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
tlb_add_flush(tlb, addr + SZ_1M);
+#endif
tlb_remove_page(tlb, pte);
}
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 479a6352e0b5..77bd79f2ffdb 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -101,28 +101,39 @@ extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
-#define __get_user_x(__r2,__p,__e,__s,__i...) \
+#define __GUP_CLOBBER_1 "lr", "cc"
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define __GUP_CLOBBER_2 "ip", "lr", "cc"
+#else
+#define __GUP_CLOBBER_2 "lr", "cc"
+#endif
+#define __GUP_CLOBBER_4 "lr", "cc"
+
+#define __get_user_x(__r2,__p,__e,__l,__s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r2") \
+ __asmeq("%3", "r1") \
"bl __get_user_" #__s \
: "=&r" (__e), "=r" (__r2) \
- : "0" (__p) \
- : __i, "cc")
+ : "0" (__p), "r" (__l) \
+ : __GUP_CLOBBER_##__s)
-#define get_user(x,p) \
+#define __get_user_check(x,p) \
({ \
+ unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
register unsigned long __r2 asm("r2"); \
+ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
- __get_user_x(__r2, __p, __e, 1, "lr"); \
- break; \
+ __get_user_x(__r2, __p, __e, __l, 1); \
+ break; \
case 2: \
- __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \
+ __get_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
- __get_user_x(__r2, __p, __e, 4, "lr"); \
+ __get_user_x(__r2, __p, __e, __l, 4); \
break; \
default: __e = __get_user_bad(); break; \
} \
@@ -130,42 +141,57 @@ extern int __get_user_4(void *);
__e; \
})
+#define get_user(x,p) \
+ ({ \
+ might_fault(); \
+ __get_user_check(x,p); \
+ })
+
extern int __put_user_1(void *, unsigned int);
extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
-#define __put_user_x(__r2,__p,__e,__s) \
+#define __put_user_x(__r2,__p,__e,__l,__s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r2") \
+ __asmeq("%3", "r1") \
"bl __put_user_" #__s \
: "=&r" (__e) \
- : "0" (__p), "r" (__r2) \
+ : "0" (__p), "r" (__r2), "r" (__l) \
: "ip", "lr", "cc")
-#define put_user(x,p) \
+#define __put_user_check(x,p) \
({ \
+ unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __r2 asm("r2") = (x); \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
+ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
- __put_user_x(__r2, __p, __e, 1); \
+ __put_user_x(__r2, __p, __e, __l, 1); \
break; \
case 2: \
- __put_user_x(__r2, __p, __e, 2); \
+ __put_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
- __put_user_x(__r2, __p, __e, 4); \
+ __put_user_x(__r2, __p, __e, __l, 4); \
break; \
case 8: \
- __put_user_x(__r2, __p, __e, 8); \
+ __put_user_x(__r2, __p, __e, __l, 8); \
break; \
default: __e = __put_user_bad(); break; \
} \
__e; \
})
+#define put_user(x,p) \
+ ({ \
+ might_fault(); \
+ __put_user_check(x,p); \
+ })
+
#else /* CONFIG_MMU */
/*
@@ -219,6 +245,7 @@ do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
+ might_fault(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
@@ -300,6 +327,7 @@ do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
+ might_fault(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index ba386bd94107..281bf3301241 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -159,6 +159,12 @@ static int debug_arch_supported(void)
arch >= ARM_DEBUG_ARCH_V7_1;
}
+/* Can we determine the watchpoint access type from the fsr? */
+static int debug_exception_updates_fsr(void)
+{
+ return 0;
+}
+
/* Determine number of WRP registers available. */
static int get_num_wrp_resources(void)
{
@@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
/* Aligned */
break;
case 1:
- /* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
- break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
default:
ret = -EINVAL;
goto out;
@@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
info->address &= ~alignment_mask;
info->ctrl.len <<= offset;
- /*
- * Currently we rely on an overflow handler to take
- * care of single-stepping the breakpoint when it fires.
- * In the case of userspace breakpoints on a core with V7 debug,
- * we can use the mismatch feature as a poor-man's hardware
- * single-step, but this only works for per-task breakpoints.
- */
- if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
- !core_has_mismatch_brps() || !bp->hw.bp_target)) {
- pr_warning("overflow handler required but none found\n");
- ret = -EINVAL;
+ if (!bp->overflow_handler) {
+ /*
+ * Mismatch breakpoints are required for single-stepping
+ * breakpoints.
+ */
+ if (!core_has_mismatch_brps())
+ return -EINVAL;
+
+ /* We don't allow mismatch breakpoints in kernel space. */
+ if (arch_check_bp_in_kernelspace(bp))
+ return -EPERM;
+
+ /*
+ * Per-cpu breakpoints are not supported by our stepping
+ * mechanism.
+ */
+ if (!bp->hw.bp_target)
+ return -EINVAL;
+
+ /*
+ * We only support specific access types if the fsr
+ * reports them.
+ */
+ if (!debug_exception_updates_fsr() &&
+ (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
+ info->ctrl.type == ARM_BREAKPOINT_STORE))
+ return -EINVAL;
}
+
out:
return ret;
}
@@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
goto unlock;
/* Check that the access type matches. */
- access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
- HW_BREAKPOINT_R;
- if (!(access & hw_breakpoint_type(wp)))
- goto unlock;
+ if (debug_exception_updates_fsr()) {
+ access = (fsr & ARM_FSR_ACCESS_MASK) ?
+ HW_BREAKPOINT_W : HW_BREAKPOINT_R;
+ if (!(access & hw_breakpoint_type(wp)))
+ goto unlock;
+ }
/* We have a winner. */
info->trigger = addr;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index f7945218b8c6..b0179b89a04c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
#endif
instr = *(u32 *) pc;
} else if (thumb_mode(regs)) {
- get_user(instr, (u16 __user *)pc);
+ if (get_user(instr, (u16 __user *)pc))
+ goto die_sig;
if (is_wide_instruction(instr)) {
unsigned int instr2;
- get_user(instr2, (u16 __user *)pc+1);
+ if (get_user(instr2, (u16 __user *)pc+1))
+ goto die_sig;
instr <<= 16;
instr |= instr2;
}
- } else {
- get_user(instr, (u32 __user *)pc);
+ } else if (get_user(instr, (u32 __user *)pc)) {
+ goto die_sig;
}
if (call_undef_hook(regs, instr) == 0)
return;
+die_sig:
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_UNDEFINED) {
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index d6dacc69254e..395d5fbb8fa2 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)
{
pr_info("Switching to timer-based delay loop\n");
lpj_fine = freq / HZ;
+ loops_per_jiffy = lpj_fine;
arm_delay_ops.delay = __timer_delay;
arm_delay_ops.const_udelay = __timer_const_udelay;
arm_delay_ops.udelay = __timer_udelay;
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 11093a7c3e32..9b06bb41fca6 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -16,8 +16,9 @@
* __get_user_X
*
* Inputs: r0 contains the address
+ * r1 contains the address limit, which must be preserved
* Outputs: r0 is the error code
- * r2, r3 contains the zero-extended value
+ * r2 contains the zero-extended value
* lr corrupted
*
* No other registers must be altered. (see <asm/uaccess.h>
@@ -27,33 +28,39 @@
* Note also that it is intended that __get_user_bad is not global.
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
ENTRY(__get_user_1)
+ check_uaccess r0, 1, r1, r2, __get_user_bad
1: TUSER(ldrb) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__get_user_1)
ENTRY(__get_user_2)
-#ifdef CONFIG_THUMB2_KERNEL
-2: TUSER(ldrb) r2, [r0]
-3: TUSER(ldrb) r3, [r0, #1]
+ check_uaccess r0, 2, r1, r2, __get_user_bad
+#ifdef CONFIG_CPU_USE_DOMAINS
+rb .req ip
+2: ldrbt r2, [r0], #1
+3: ldrbt rb, [r0], #0
#else
-2: TUSER(ldrb) r2, [r0], #1
-3: TUSER(ldrb) r3, [r0]
+rb .req r0
+2: ldrb r2, [r0]
+3: ldrb rb, [r0, #1]
#endif
#ifndef __ARMEB__
- orr r2, r2, r3, lsl #8
+ orr r2, r2, rb, lsl #8
#else
- orr r2, r3, r2, lsl #8
+ orr r2, rb, r2, lsl #8
#endif
mov r0, #0
mov pc, lr
ENDPROC(__get_user_2)
ENTRY(__get_user_4)
+ check_uaccess r0, 4, r1, r2, __get_user_bad
4: TUSER(ldr) r2, [r0]
mov r0, #0
mov pc, lr
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 7db25990c589..3d73dcb959b0 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -16,6 +16,7 @@
* __put_user_X
*
* Inputs: r0 contains the address
+ * r1 contains the address limit, which must be preserved
* r2, r3 contains the value
* Outputs: r0 is the error code
* lr corrupted
@@ -27,16 +28,19 @@
* Note also that it is intended that __put_user_bad is not global.
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
ENTRY(__put_user_1)
+ check_uaccess r0, 1, r1, ip, __put_user_bad
1: TUSER(strb) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__put_user_1)
ENTRY(__put_user_2)
+ check_uaccess r0, 2, r1, ip, __put_user_bad
mov ip, r2, lsr #8
#ifdef CONFIG_THUMB2_KERNEL
#ifndef __ARMEB__
@@ -60,12 +64,14 @@ ENTRY(__put_user_2)
ENDPROC(__put_user_2)
ENTRY(__put_user_4)
+ check_uaccess r0, 4, r1, ip, __put_user_bad
4: TUSER(str) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__put_user_4)
ENTRY(__put_user_8)
+ check_uaccess r0, 8, r1, ip, __put_user_bad
#ifdef CONFIG_THUMB2_KERNEL
5: TUSER(str) r2, [r0]
6: TUSER(str) r3, [r0, #4]
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 119bc52ab93e..4e07eec1270d 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
pid = task_pid_nr(thread->task) << ASID_BITS;
asm volatile(
" mrc p15, 0, %0, c13, c0, 1\n"
- " bfi %1, %0, #0, %2\n"
- " mcr p15, 0, %1, c13, c0, 1\n"
+ " and %0, %0, %2\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c13, c0, 1\n"
: "=r" (contextidr), "+r" (pid)
- : "I" (ASID_BITS));
+ : "I" (~ASID_MASK));
isb();
return NOTIFY_OK;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 6776160618ef..a8ee92da3544 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
/* permanent static mappings from iotable_init() */
#define VM_ARM_STATIC_MAPPING 0x40000000
+/* empty mapping */
+#define VM_ARM_EMPTY_MAPPING 0x20000000
+
/* mapping type (attributes) for permanent static mappings */
#define VM_ARM_MTYPE(mt) ((mt) << 20)
#define VM_ARM_MTYPE_MASK (0x1f << 20)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4c2d0451e84a..c2fa21d0103e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
vm->addr = (void *)addr;
vm->size = SECTION_SIZE;
- vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = pmd_empty_section_gap;
vm_area_add_early(vm);
}
@@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
/* we're still single threaded hence no lock needed here */
for (vm = vmlist; vm; vm = vm->next) {
- if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
continue;
addr = (unsigned long)vm->addr;
if (addr < next)
@@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void)
* Check whether this memory bank would partially overlap
* the vmalloc area.
*/
- if (__va(bank->start + bank->size) > vmalloc_min ||
- __va(bank->start + bank->size) < __va(bank->start)) {
+ if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
+ __va(bank->start + bank->size - 1) <= __va(bank->start)) {
unsigned long newsize = vmalloc_min - __va(bank->start);
printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
"to -%.8llx (vmalloc region overlap).\n",