From 638591cd7b601d403ed703d55062b48c32ea8cfb Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 10 Jan 2013 12:20:15 +0100 Subject: ARM: 7626/1: arm/crypto: Make asm SHA-1 and AES code Thumb-2 compatible This patch fixes aes-armv4.S and sha1-armv4-large.S to work natively in Thumb. This allows ARM/Thumb interworking workarounds to be removed. I also take the opportunity to convert some explicit assembler directives for exported functions to the standard ENTRY()/ENDPROC(). For the code itself: * In sha1_block_data_order, use of TEQ with sp is deprecated in ARMv7 and not supported in Thumb. For the branches back to .L_00_15 and .L_40_59, the TEQ is converted to a CMP, under the assumption that clobbering the C flag here will not cause incorrect behaviour. For the first branch back to .L_20_39_or_60_79 the C flag is important, so sp is moved temporarily into another register so that TEQ can be used for the comparison. * In the AES code, most forms of register-indexed addressing with shifts and rotates are not permitted for loads and stores in Thumb, so the address calculation is done using a separate instruction for the Thumb case. The resulting code is unlikely to be optimally scheduled, but it should not have a large impact given the overall size of the code. I haven't run any benchmarks. Signed-off-by: Dave Martin Tested-by: David McCullough (ARM only) Acked-by: David McCullough Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/crypto/aes-armv4.S | 64 ++++++++++++-------------------------- arch/arm/crypto/sha1-armv4-large.S | 24 ++++++-------- 2 files changed, 29 insertions(+), 59 deletions(-) (limited to 'arch') diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S index e59b1d505d6c..19d6cd6f29f9 100644 --- a/arch/arm/crypto/aes-armv4.S +++ b/arch/arm/crypto/aes-armv4.S @@ -34,8 +34,9 @@ @ A little glue here to select the correct code below for the ARM CPU @ that is being targetted. +#include + .text -.code 32 .type AES_Te,%object .align 5 @@ -145,10 +146,8 @@ AES_Te: @ void AES_encrypt(const unsigned char *in, unsigned char *out, @ const AES_KEY *key) { -.global AES_encrypt -.type AES_encrypt,%function .align 5 -AES_encrypt: +ENTRY(AES_encrypt) sub r3,pc,#8 @ AES_encrypt stmdb sp!,{r1,r4-r12,lr} mov r12,r0 @ inp @@ -239,15 +238,8 @@ AES_encrypt: strb r6,[r12,#14] strb r3,[r12,#15] #endif -#if __ARM_ARCH__>=5 ldmia sp!,{r4-r12,pc} -#else - ldmia sp!,{r4-r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size AES_encrypt,.-AES_encrypt +ENDPROC(AES_encrypt) .type _armv4_AES_encrypt,%function .align 2 @@ -386,10 +378,8 @@ _armv4_AES_encrypt: ldr pc,[sp],#4 @ pop and return .size _armv4_AES_encrypt,.-_armv4_AES_encrypt -.global private_AES_set_encrypt_key -.type private_AES_set_encrypt_key,%function .align 5 -private_AES_set_encrypt_key: +ENTRY(private_AES_set_encrypt_key) _armv4_AES_set_encrypt_key: sub r3,pc,#8 @ AES_set_encrypt_key teq r0,#0 @@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key: .Ldone: mov r0,#0 ldmia sp!,{r4-r12,lr} -.Labrt: tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key +.Labrt: mov pc,lr +ENDPROC(private_AES_set_encrypt_key) -.global private_AES_set_decrypt_key -.type private_AES_set_decrypt_key,%function .align 5 -private_AES_set_decrypt_key: +ENTRY(private_AES_set_decrypt_key) str lr,[sp,#-4]! @ push lr #if 0 @ kernel does both of these in setkey so optimise this bit out by @@ -748,15 +734,8 @@ private_AES_set_decrypt_key: bne .Lmix mov r0,#0 -#if __ARM_ARCH__>=5 ldmia sp!,{r4-r12,pc} -#else - ldmia sp!,{r4-r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key +ENDPROC(private_AES_set_decrypt_key) .type AES_Td,%object .align 5 @@ -862,10 +841,8 @@ AES_Td: @ void AES_decrypt(const unsigned char *in, unsigned char *out, @ const AES_KEY *key) { -.global AES_decrypt -.type AES_decrypt,%function .align 5 -AES_decrypt: +ENTRY(AES_decrypt) sub r3,pc,#8 @ AES_decrypt stmdb sp!,{r1,r4-r12,lr} mov r12,r0 @ inp @@ -956,15 +933,8 @@ AES_decrypt: strb r6,[r12,#14] strb r3,[r12,#15] #endif -#if __ARM_ARCH__>=5 ldmia sp!,{r4-r12,pc} -#else - ldmia sp!,{r4-r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size AES_decrypt,.-AES_decrypt +ENDPROC(AES_decrypt) .type _armv4_AES_decrypt,%function .align 2 @@ -1064,7 +1034,9 @@ _armv4_AES_decrypt: and r9,lr,r1,lsr#8 ldrb r7,[r10,r7] @ Td4[s1>>0] - ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24] + ARM( ldrb r1,[r10,r1,lsr#24] ) @ Td4[s1>>24] + THUMB( add r1,r10,r1,lsr#24 ) @ Td4[s1>>24] + THUMB( ldrb r1,[r1] ) ldrb r8,[r10,r8] @ Td4[s1>>16] eor r0,r7,r0,lsl#24 ldrb r9,[r10,r9] @ Td4[s1>>8] @@ -1077,7 +1049,9 @@ _armv4_AES_decrypt: ldrb r8,[r10,r8] @ Td4[s2>>0] and r9,lr,r2,lsr#16 - ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24] + ARM( ldrb r2,[r10,r2,lsr#24] ) @ Td4[s2>>24] + THUMB( add r2,r10,r2,lsr#24 ) @ Td4[s2>>24] + THUMB( ldrb r2,[r2] ) eor r0,r0,r7,lsl#8 ldrb r9,[r10,r9] @ Td4[s2>>16] eor r1,r8,r1,lsl#16 @@ -1090,7 +1064,9 @@ _armv4_AES_decrypt: and r9,lr,r3 @ i2 ldrb r9,[r10,r9] @ Td4[s3>>0] - ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24] + ARM( ldrb r3,[r10,r3,lsr#24] ) @ Td4[s3>>24] + THUMB( add r3,r10,r3,lsr#24 ) @ Td4[s3>>24] + THUMB( ldrb r3,[r3] ) eor r0,r0,r7,lsl#16 ldr r7,[r11,#0] eor r1,r1,r8,lsl#8 diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S index 7050ab133b9d..92c6eed7aac9 100644 --- a/arch/arm/crypto/sha1-armv4-large.S +++ b/arch/arm/crypto/sha1-armv4-large.S @@ -51,13 +51,12 @@ @ Profiler-assisted and platform-specific optimization resulted in 10% @ improvement on Cortex A8 core and 12.2 cycles per byte. -.text +#include -.global sha1_block_data_order -.type sha1_block_data_order,%function +.text .align 2 -sha1_block_data_order: +ENTRY(sha1_block_data_order) stmdb sp!,{r4-r12,lr} add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 ldmia r0,{r3,r4,r5,r6,r7} @@ -194,7 +193,7 @@ sha1_block_data_order: eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r3,r3,r10 @ E+=F_00_19(B,C,D) - teq r14,sp + cmp r14,sp bne .L_00_15 @ [((11+4)*5+2)*3] #if __ARM_ARCH__<7 ldrb r10,[r1,#2] @@ -374,7 +373,9 @@ sha1_block_data_order: @ F_xx_xx add r3,r3,r9 @ E+=X[i] add r3,r3,r10 @ E+=F_20_39(B,C,D) - teq r14,sp @ preserve carry + ARM( teq r14,sp ) @ preserve carry + THUMB( mov r11,sp ) + THUMB( teq r14,r11 ) @ preserve carry bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes @@ -466,7 +467,7 @@ sha1_block_data_order: add r3,r3,r9 @ E+=X[i] add r3,r3,r10 @ E+=F_40_59(B,C,D) add r3,r3,r11,ror#2 - teq r14,sp + cmp r14,sp bne .L_40_59 @ [+((12+5)*5+2)*4] ldr r8,.LK_60_79 @@ -485,19 +486,12 @@ sha1_block_data_order: teq r1,r2 bne .Lloop @ [+18], total 1307 -#if __ARM_ARCH__>=5 ldmia sp!,{r4-r12,pc} -#else - ldmia sp!,{r4-r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif .align 2 .LK_00_19: .word 0x5a827999 .LK_20_39: .word 0x6ed9eba1 .LK_40_59: .word 0x8f1bbcdc .LK_60_79: .word 0xca62c1d6 -.size sha1_block_data_order,.-sha1_block_data_order +ENDPROC(sha1_block_data_order) .asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by " .align 2 -- cgit v1.2.3 From 0a301110b7bd33ef10164c184fe2c1d8c4c3ab6b Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 14 Jan 2013 15:54:28 +0000 Subject: ARM: smp: remove wrapper functions Remove some silly wrapper functions which aren't really required: platform_smp_prepare_cpus platform_secondary_init platform_cpu_die This simplifies the code. Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 31 ++++++++----------------------- 1 file changed, 8 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 84f4cbf652e5..365c8d92e2eb 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -125,18 +125,6 @@ void __init smp_init_cpus(void) smp_ops.smp_init_cpus(); } -static void __init platform_smp_prepare_cpus(unsigned int max_cpus) -{ - if (smp_ops.smp_prepare_cpus) - smp_ops.smp_prepare_cpus(max_cpus); -} - -static void __cpuinit platform_secondary_init(unsigned int cpu) -{ - if (smp_ops.smp_secondary_init) - smp_ops.smp_secondary_init(cpu); -} - int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { if (smp_ops.smp_boot_secondary) @@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu) return 1; } -static void platform_cpu_die(unsigned int cpu) -{ - if (smp_ops.cpu_die) - smp_ops.cpu_die(cpu); -} - static int platform_cpu_disable(unsigned int cpu) { if (smp_ops.cpu_disable) @@ -257,7 +239,8 @@ void __ref cpu_die(void) * actual CPU shutdown procedure is at least platform (if not * CPU) specific. */ - platform_cpu_die(cpu); + if (smp_ops.cpu_die) + smp_ops.cpu_die(cpu); /* * Do not return to the idle loop - jump back to the secondary @@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) /* * Give the platform a chance to do its own initialisation. */ - platform_secondary_init(cpu); + if (smp_ops.smp_secondary_init) + smp_ops.smp_secondary_init(cpu); notify_cpu_starting(cpu); @@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. A platform should - * re-initialize the map in platform_smp_prepare_cpus() if - * present != possible (e.g. physical hotplug). + * re-initialize the map in the platforms smp_prepare_cpus() + * if present != possible (e.g. physical hotplug). */ init_cpu_present(cpu_possible_mask); @@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) * Initialise the SCU if there are more than one CPU * and let them know where to start. */ - platform_smp_prepare_cpus(max_cpus); + if (smp_ops.smp_prepare_cpus) + smp_ops.smp_prepare_cpus(max_cpus); } } -- cgit v1.2.3 From 20e260b6f4f717c100620122f626a2c06a4cfd72 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 24 Jan 2013 14:47:38 +0100 Subject: ARM: 7632/1: spinlock: avoid exclusive accesses on unlock() path When unlocking a spinlock, all we need to do is increment the owner field of the lock. Since only one CPU can be performing an unlock() operation for a given lock, this doesn't need to be exclusive. This patch simplifies arch_spin_unlock to use non-exclusive accesses when updating the owner field of the lock. Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/spinlock.h | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index b4ca707d0a69..6220e9fdf4c7 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock) { - unsigned long tmp; - u32 slock; - smp_mb(); - - __asm__ __volatile__( -" mov %1, #1\n" -"1: ldrex %0, [%2]\n" -" uadd16 %0, %0, %1\n" -" strex %1, %0, [%2]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (slock), "=&r" (tmp) - : "r" (&lock->slock) - : "cc"); - + lock->tickets.owner++; dsb_sev(); } -- cgit v1.2.3 From 4e79a62d84b9a3b84c609d73382415c71d911a4c Mon Sep 17 00:00:00 2001 From: Barry Song Date: Mon, 4 Feb 2013 07:46:57 +0100 Subject: ARM: 7639/1: cache-l2x0: add missed dummy outer_resume entry Commit 91c2ebb90b1890a (ARM: 7114/1: cache-l2x0: add resume entry for l2 in secure mode) added resume entry for l2 in secure mode, but it missed the dummy entry when CONFIG_CACHE_L2X0 is not set. (Commit text edited by rmk.) Signed-off-by: Barry Song Signed-off-by: Russell King --- arch/arm/include/asm/outercache.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index 53426c66352a..12f71a190422 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) static inline void outer_flush_all(void) { } static inline void outer_inv_all(void) { } static inline void outer_disable(void) { } +static inline void outer_resume(void) { } #endif -- cgit v1.2.3 From e3e92a7be6936dff1de80e66b0b683d54e9e02d8 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 28 Jan 2013 21:58:22 +0100 Subject: ARM: 7635/1: versatile: fix the PCI IRQ regression The PCI IRQs were regressing due to two things: - The PCI glue layer was using an hard-coded IRQ 27 offset. This caused the immediate regression. - The SIC IRQ mask was inverted (i.e. a bit was indeed set to one for each valid IRQ on the SIC, but accidentally inverted in the init call). This has been around forever, but we have been saved by some other forgiving code that would reserve IRQ descriptors in this range, as the versatile is non-sparse. When the IRQs were bumped up 32 steps so as to avoid using IRQ zero and avoid touching the 16 legacy IRQs, things broke. Introduce an explicit valid mask for the IRQs that are active on the PIC/SIC, and pass that. Use the BIT() macro from to make sure we hit the right bits, readily defined in . Reported-by: Tetsuo Handa Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mach-versatile/core.c | 15 ++++++++++++++- arch/arm/mach-versatile/pci.c | 11 ++++++----- 2 files changed, 20 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 5d5929450366..a78827b70270 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -65,16 +66,28 @@ #define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) #define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) +/* These PIC IRQs are valid in each configuration */ +#define PIC_VALID_ALL BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \ + BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \ + BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \ + BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \ + BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \ + BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \ + BIT(SIC_INT_PCI3) #if 1 #define IRQ_MMCI0A IRQ_VICSOURCE22 #define IRQ_AACI IRQ_VICSOURCE24 #define IRQ_ETH IRQ_VICSOURCE25 #define PIC_MASK 0xFFD00000 +#define PIC_VALID PIC_VALID_ALL #else #define IRQ_MMCI0A IRQ_SIC_MMCI0A #define IRQ_AACI IRQ_SIC_AACI #define IRQ_ETH IRQ_SIC_ETH #define PIC_MASK 0 +#define PIC_VALID PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \ + BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \ + BIT(SIC_INT_ETH) #endif /* Lookup table for finding a DT node that represents the vic instance */ @@ -102,7 +115,7 @@ void __init versatile_init_irq(void) VERSATILE_SIC_BASE); fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START, - IRQ_VICSOURCE31, ~PIC_MASK, np); + IRQ_VICSOURCE31, PIC_VALID, np); /* * Interrupts on secondary controller from 0 to 8 are routed to diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index 2f84f4094f13..e92e5e0705bc 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c @@ -23,6 +23,7 @@ #include #include +#include #include #include @@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) int irq; /* slot, pin, irq - * 24 1 27 - * 25 1 28 - * 26 1 29 - * 27 1 30 + * 24 1 IRQ_SIC_PCI0 + * 25 1 IRQ_SIC_PCI1 + * 26 1 IRQ_SIC_PCI2 + * 27 1 IRQ_SIC_PCI3 */ - irq = 27 + ((slot - 24 + pin - 1) & 3); + irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3); return irq; } -- cgit v1.2.3 From 5d1c20bce5bc1a7a94bca1324451532c28c89e36 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 31 Jan 2013 19:19:30 +0100 Subject: ARM: 7637/1: memory: use SZ_ constants for defining the virtual memory layout Parts of the virtual memory layout (mainly the modules area) are described using open-coded immediate values. Use the SZ_ definitions from linux/sizes.h instead to make the code clearer. Acked-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 73cf03aa981e..924320f4f22a 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -36,23 +36,23 @@ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area */ #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) -#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) +#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) #define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) /* * The maximum size of a 26-bit user space task. */ -#define TASK_SIZE_26 UL(0x04000000) +#define TASK_SIZE_26 (UL(1) << 26) /* * The module space lives between the addresses given by TASK_SIZE * and PAGE_OFFSET - it must be within 32MB of the kernel text. */ #ifndef CONFIG_THUMB2_KERNEL -#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) +#define MODULES_VADDR (PAGE_OFFSET - SZ_16M) #else /* smaller range for Thumb-2 symbols relocation (2^24)*/ -#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024) +#define MODULES_VADDR (PAGE_OFFSET - SZ_8M) #endif #if TASK_SIZE > MODULES_VADDR -- cgit v1.2.3 From 48dc369d21b41f6400d15cdaf9411e2e6fd62323 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Sat, 9 Feb 2013 06:28:04 +0100 Subject: ARM: 7644/1: vmregion: remove vmregion code entirely Now, there is no user for vmregion. So remove it. Acked-by: Nicolas Pitre Tested-by: Santosh Shilimkar Signed-off-by: Joonsoo Kim Signed-off-by: Russell King --- arch/arm/mm/Makefile | 2 +- arch/arm/mm/vmregion.c | 205 ------------------------------------------------- arch/arm/mm/vmregion.h | 31 -------- 3 files changed, 1 insertion(+), 237 deletions(-) delete mode 100644 arch/arm/mm/vmregion.c delete mode 100644 arch/arm/mm/vmregion.h (limited to 'arch') diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 8a9c4cb50a93..4e333fa2756f 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ iomap.o obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ - mmap.o pgd.o mmu.o vmregion.o + mmap.o pgd.o mmu.o ifneq ($(CONFIG_MMU),y) obj-y += nommu.o diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c deleted file mode 100644 index a631016e1f8f..000000000000 --- a/arch/arm/mm/vmregion.c +++ /dev/null @@ -1,205 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include "vmregion.h" - -/* - * VM region handling support. - * - * This should become something generic, handling VM region allocations for - * vmalloc and similar (ioremap, module space, etc). - * - * I envisage vmalloc()'s supporting vm_struct becoming: - * - * struct vm_struct { - * struct vmregion region; - * unsigned long flags; - * struct page **pages; - * unsigned int nr_pages; - * unsigned long phys_addr; - * }; - * - * get_vm_area() would then call vmregion_alloc with an appropriate - * struct vmregion head (eg): - * - * struct vmregion vmalloc_head = { - * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), - * .vm_start = VMALLOC_START, - * .vm_end = VMALLOC_END, - * }; - * - * However, vmalloc_head.vm_start is variable (typically, it is dependent on - * the amount of RAM found at boot time.) I would imagine that get_vm_area() - * would have to initialise this each time prior to calling vmregion_alloc(). - */ - -struct arm_vmregion * -arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, - size_t size, gfp_t gfp, const void *caller) -{ - unsigned long start = head->vm_start, addr = head->vm_end; - unsigned long flags; - struct arm_vmregion *c, *new; - - if (head->vm_end - head->vm_start < size) { - printk(KERN_WARNING "%s: allocation too big (requested %#x)\n", - __func__, size); - goto out; - } - - new = kmalloc(sizeof(struct arm_vmregion), gfp); - if (!new) - goto out; - - new->caller = caller; - - spin_lock_irqsave(&head->vm_lock, flags); - - addr = rounddown(addr - size, align); - list_for_each_entry_reverse(c, &head->vm_list, vm_list) { - if (addr >= c->vm_end) - goto found; - addr = rounddown(c->vm_start - size, align); - if (addr < start) - goto nospc; - } - - found: - /* - * Insert this entry after the one we found. - */ - list_add(&new->vm_list, &c->vm_list); - new->vm_start = addr; - new->vm_end = addr + size; - new->vm_active = 1; - - spin_unlock_irqrestore(&head->vm_lock, flags); - return new; - - nospc: - spin_unlock_irqrestore(&head->vm_lock, flags); - kfree(new); - out: - return NULL; -} - -static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) -{ - struct arm_vmregion *c; - - list_for_each_entry(c, &head->vm_list, vm_list) { - if (c->vm_active && c->vm_start == addr) - goto out; - } - c = NULL; - out: - return c; -} - -struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) -{ - struct arm_vmregion *c; - unsigned long flags; - - spin_lock_irqsave(&head->vm_lock, flags); - c = __arm_vmregion_find(head, addr); - spin_unlock_irqrestore(&head->vm_lock, flags); - return c; -} - -struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr) -{ - struct arm_vmregion *c; - unsigned long flags; - - spin_lock_irqsave(&head->vm_lock, flags); - c = __arm_vmregion_find(head, addr); - if (c) - c->vm_active = 0; - spin_unlock_irqrestore(&head->vm_lock, flags); - return c; -} - -void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) -{ - unsigned long flags; - - spin_lock_irqsave(&head->vm_lock, flags); - list_del(&c->vm_list); - spin_unlock_irqrestore(&head->vm_lock, flags); - - kfree(c); -} - -#ifdef CONFIG_PROC_FS -static int arm_vmregion_show(struct seq_file *m, void *p) -{ - struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list); - - seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end, - c->vm_end - c->vm_start); - if (c->caller) - seq_printf(m, " %pS", (void *)c->caller); - seq_putc(m, '\n'); - return 0; -} - -static void *arm_vmregion_start(struct seq_file *m, loff_t *pos) -{ - struct arm_vmregion_head *h = m->private; - spin_lock_irq(&h->vm_lock); - return seq_list_start(&h->vm_list, *pos); -} - -static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos) -{ - struct arm_vmregion_head *h = m->private; - return seq_list_next(p, &h->vm_list, pos); -} - -static void arm_vmregion_stop(struct seq_file *m, void *p) -{ - struct arm_vmregion_head *h = m->private; - spin_unlock_irq(&h->vm_lock); -} - -static const struct seq_operations arm_vmregion_ops = { - .start = arm_vmregion_start, - .stop = arm_vmregion_stop, - .next = arm_vmregion_next, - .show = arm_vmregion_show, -}; - -static int arm_vmregion_open(struct inode *inode, struct file *file) -{ - struct arm_vmregion_head *h = PDE(inode)->data; - int ret = seq_open(file, &arm_vmregion_ops); - if (!ret) { - struct seq_file *m = file->private_data; - m->private = h; - } - return ret; -} - -static const struct file_operations arm_vmregion_fops = { - .open = arm_vmregion_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) -{ - proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h); - return 0; -} -#else -int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) -{ - return 0; -} -#endif diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h deleted file mode 100644 index 0f5a5f2a2c7b..000000000000 --- a/arch/arm/mm/vmregion.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef VMREGION_H -#define VMREGION_H - -#include -#include - -struct page; - -struct arm_vmregion_head { - spinlock_t vm_lock; - struct list_head vm_list; - unsigned long vm_start; - unsigned long vm_end; -}; - -struct arm_vmregion { - struct list_head vm_list; - unsigned long vm_start; - unsigned long vm_end; - int vm_active; - const void *caller; -}; - -struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *); -struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); -struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); -void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); - -int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *); - -#endif -- cgit v1.2.3 From ed8fd2186a4e4f3b98434093b56f9b793d48443e Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Sat, 9 Feb 2013 06:28:05 +0100 Subject: ARM: 7645/1: ioremap: introduce an infrastructure for static mapped area In current implementation, we used ARM-specific flag, that is, VM_ARM_STATIC_MAPPING, for distinguishing ARM specific static mapped area. The purpose of static mapped area is to re-use static mapped area when entire physical address range of the ioremap request can be covered by this area. This implementation causes needless overhead for some cases. For example, assume that there is only one static mapped area and vmlist has 300 areas. Every time we call ioremap, we check 300 areas for deciding whether it is matched or not. Moreover, even if there is no static mapped area and vmlist has 300 areas, every time we call ioremap, we check 300 areas in now. If we construct a extra list for static mapped area, we can eliminate above mentioned overhead. With a extra list, if there is one static mapped area, we just check only one area and proceed next operation quickly. In fact, it is not a critical problem, because ioremap is not frequently used. But reducing overhead is better idea. Another reason for doing this work is for removing architecture dependency on vmalloc layer. I think that vmlist and vmlist_lock is internal data structure for vmalloc layer. Some codes for debugging and stat inevitably use vmlist and vmlist_lock. But it is preferable that they are used as least as possible in outside of vmalloc.c Now, I introduce an ARM-specific infrastructure for static mapped area. In the following patch, we will use this and resolve above mentioned problem. Reviewed-by: Nicolas Pitre Tested-by: Santosh Shilimkar Signed-off-by: Joonsoo Kim Signed-off-by: Russell King --- arch/arm/mm/ioremap.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++ arch/arm/mm/mm.h | 12 ++++++++++ 2 files changed, 76 insertions(+) (limited to 'arch') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 88fd86cf3d9a..904c15e86063 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -39,6 +39,70 @@ #include #include "mm.h" + +LIST_HEAD(static_vmlist); + +static struct static_vm *find_static_vm_paddr(phys_addr_t paddr, + size_t size, unsigned int mtype) +{ + struct static_vm *svm; + struct vm_struct *vm; + + list_for_each_entry(svm, &static_vmlist, list) { + vm = &svm->vm; + if (!(vm->flags & VM_ARM_STATIC_MAPPING)) + continue; + if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) + continue; + + if (vm->phys_addr > paddr || + paddr + size - 1 > vm->phys_addr + vm->size - 1) + continue; + + return svm; + } + + return NULL; +} + +struct static_vm *find_static_vm_vaddr(void *vaddr) +{ + struct static_vm *svm; + struct vm_struct *vm; + + list_for_each_entry(svm, &static_vmlist, list) { + vm = &svm->vm; + + /* static_vmlist is ascending order */ + if (vm->addr > vaddr) + break; + + if (vm->addr <= vaddr && vm->addr + vm->size > vaddr) + return svm; + } + + return NULL; +} + +void __init add_static_vm_early(struct static_vm *svm) +{ + struct static_vm *curr_svm; + struct vm_struct *vm; + void *vaddr; + + vm = &svm->vm; + vm_area_add_early(vm); + vaddr = vm->addr; + + list_for_each_entry(curr_svm, &static_vmlist, list) { + vm = &curr_svm->vm; + + if (vm->addr > vaddr) + break; + } + list_add_tail(&svm->list, &curr_svm->list); +} + int ioremap_page(unsigned long virt, unsigned long phys, const struct mem_type *mtype) { diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index a8ee92da3544..d5a4e9ad8f0f 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -1,4 +1,6 @@ #ifdef CONFIG_MMU +#include +#include /* the upper-most page table pointer */ extern pmd_t *top_pmd; @@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page /* consistent regions used by dma_alloc_attrs() */ #define VM_ARM_DMA_CONSISTENT 0x20000000 + +struct static_vm { + struct vm_struct vm; + struct list_head list; +}; + +extern struct list_head static_vmlist; +extern struct static_vm *find_static_vm_vaddr(void *vaddr); +extern __init void add_static_vm_early(struct static_vm *svm); + #endif #ifdef CONFIG_ZONE_DMA -- cgit v1.2.3 From 101eeda38c0ab8a4f916176e325d9e036d981a24 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Sat, 9 Feb 2013 06:28:06 +0100 Subject: ARM: 7646/1: mm: use static_vm for managing static mapped areas A static mapped area is ARM-specific, so it is better not to use generic vmalloc data structure, that is, vmlist and vmlist_lock for managing static mapped area. And it causes some needless overhead and reducing this overhead is better idea. Now, we have newly introduced static_vm infrastructure. With it, we don't need to iterate all mapped areas. Instead, we just iterate static mapped areas. It helps to reduce an overhead of finding matched area. And architecture dependency on vmalloc layer is removed, so it will help to maintainability for vmalloc layer. Reviewed-by: Nicolas Pitre Acked-by: Rob Herring Tested-by: Santosh Shilimkar Signed-off-by: Joonsoo Kim Signed-off-by: Russell King --- arch/arm/mm/ioremap.c | 71 +++++++++++++++++++++------------------------------ arch/arm/mm/mmu.c | 34 ++++++++++++------------ 2 files changed, 46 insertions(+), 59 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 904c15e86063..04d9006eab1f 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -261,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, const struct mem_type *type; int err; unsigned long addr; - struct vm_struct * area; + struct vm_struct *area; + phys_addr_t paddr = __pfn_to_phys(pfn); #ifndef CONFIG_ARM_LPAE /* * High mappings must be supersection aligned */ - if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) + if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK)) return NULL; #endif @@ -283,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, /* * Try to reuse one of the static mapping whenever possible. */ - read_lock(&vmlist_lock); - for (area = vmlist; area; area = area->next) { - if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) - break; - if (!(area->flags & VM_ARM_STATIC_MAPPING)) - continue; - if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) - continue; - if (__phys_to_pfn(area->phys_addr) > pfn || - __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) - continue; - /* we can drop the lock here as we know *area is static */ - read_unlock(&vmlist_lock); - addr = (unsigned long)area->addr; - addr += __pfn_to_phys(pfn) - area->phys_addr; - return (void __iomem *) (offset + addr); + if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) { + struct static_vm *svm; + + svm = find_static_vm_paddr(paddr, size, mtype); + if (svm) { + addr = (unsigned long)svm->vm.addr; + addr += paddr - svm->vm.phys_addr; + return (void __iomem *) (offset + addr); + } } - read_unlock(&vmlist_lock); /* * Don't allow RAM to be mapped - this causes problems with ARMv6+ @@ -312,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, if (!area) return NULL; addr = (unsigned long)area->addr; - area->phys_addr = __pfn_to_phys(pfn); + area->phys_addr = paddr; #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) if (DOMAIN_IO == 0 && (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || cpu_is_xsc3()) && pfn >= 0x100000 && - !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { + !((paddr | size | addr) & ~SUPERSECTION_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_supersections(addr, pfn, size, type); - } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { + } else if (!((paddr | size | addr) & ~PMD_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_sections(addr, pfn, size, type); } else #endif - err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), + err = ioremap_page_range(addr, addr + size, paddr, __pgprot(type->prot_pte)); if (err) { @@ -410,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); - struct vm_struct *vm; + struct static_vm *svm; + + /* If this is a static mapping, we must leave it alone */ + svm = find_static_vm_vaddr(addr); + if (svm) + return; - read_lock(&vmlist_lock); - for (vm = vmlist; vm; vm = vm->next) { - if (vm->addr > addr) - break; - if (!(vm->flags & VM_IOREMAP)) - continue; - /* If this is a static mapping we must leave it alone */ - if ((vm->flags & VM_ARM_STATIC_MAPPING) && - (vm->addr <= addr) && (vm->addr + vm->size > addr)) { - read_unlock(&vmlist_lock); - return; - } #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) + { + struct vm_struct *vm; + + vm = find_vm_area(addr); + /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle * such a beast. */ - if ((vm->addr == addr) && - (vm->flags & VM_ARM_SECTION_MAPPING)) { + if (vm && (vm->flags & VM_ARM_SECTION_MAPPING)) unmap_area_sections((unsigned long)vm->addr, vm->size); - break; - } -#endif } - read_unlock(&vmlist_lock); +#endif vunmap(addr); } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9f0610243bd6..a35b314d270d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr) { struct map_desc *md; struct vm_struct *vm; + struct static_vm *svm; if (!nr) return; - vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); + svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); for (md = io_desc; nr; md++, nr--) { create_mapping(md); + + vm = &svm->vm; vm->addr = (void *)(md->virtual & PAGE_MASK); vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); vm->phys_addr = __pfn_to_phys(md->pfn); vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; vm->flags |= VM_ARM_MTYPE(md->type); vm->caller = iotable_init; - vm_area_add_early(vm++); + add_static_vm_early(svm++); } } @@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, void *caller) { struct vm_struct *vm; + struct static_vm *svm; + + svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); - vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); + vm = &svm->vm; vm->addr = (void *)addr; vm->size = size; vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; vm->caller = caller; - vm_area_add_early(vm); + add_static_vm_early(svm); } #ifndef CONFIG_ARM_LPAE @@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr) static void __init fill_pmd_gaps(void) { + struct static_vm *svm; struct vm_struct *vm; unsigned long addr, next = 0; pmd_t *pmd; - /* we're still single threaded hence no lock needed here */ - for (vm = vmlist; vm; vm = vm->next) { - if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) - continue; + list_for_each_entry(svm, &static_vmlist, list) { + vm = &svm->vm; addr = (unsigned long)vm->addr; if (addr < next) continue; @@ -859,17 +864,12 @@ static void __init pci_reserve_io(void) { struct vm_struct *vm; unsigned long addr; + struct static_vm *svm; - /* we're still single threaded hence no lock needed here */ - for (vm = vmlist; vm; vm = vm->next) { - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) - continue; - addr = (unsigned long)vm->addr; - addr &= ~(SZ_2M - 1); - if (addr == PCI_IO_VIRT_BASE) - return; + svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); + if (svm) + return; - } vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); } #else -- cgit v1.2.3 From bce2bd3cda4e73cd92cf08e017c073058c151ddc Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 14 Feb 2013 10:50:04 +0000 Subject: ARM: fix warnings introduced by previous patch 869486d5f51 (ARM: 7646/1: mm: use static_vm for managing static mapped areas) introduced new warnings: arch/arm/mm/mmu.c: In function 'pci_reserve_io': arch/arm/mm/mmu.c:888:16: warning: unused variable 'addr' arch/arm/mm/mmu.c:887:20: warning: unused variable 'vm' because it failed to delete the two local variables it no longer used. Fix this. Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index a35b314d270d..3da1d3e0af24 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -862,8 +862,6 @@ static void __init fill_pmd_gaps(void) #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) static void __init pci_reserve_io(void) { - struct vm_struct *vm; - unsigned long addr; struct static_vm *svm; svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); -- cgit v1.2.3 From 276fd3c5f8a21f9d7d398383b818d5d0ac81db23 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Mon, 11 Feb 2013 08:44:47 +0100 Subject: ARM: 7647/1: pci: Keep pci_common_init() around after init When using deferred driver probing, PCI host controller drivers may actually require this function after the init stage. Signed-off-by: Thierry Reding Signed-off-by: Russell King --- arch/arm/kernel/bios32.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 379cf3292390..da7b0c99f296 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) return irq; } -static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) +static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) { int ret; struct pci_host_bridge_window *window; @@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) return 0; } -static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) +static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) { struct pci_sys_data *sys = NULL; int ret; @@ -493,7 +493,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) } } -void __init pci_common_init(struct hw_pci *hw) +void pci_common_init(struct hw_pci *hw) { struct pci_sys_data *sys; LIST_HEAD(head); -- cgit v1.2.3 From 352af7d4dd90bac3640ad2383e4e9f332d3a4537 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Mon, 11 Feb 2013 08:46:10 +0100 Subject: ARM: 7648/1: pci: Allow passing per-controller private data In order to allow drivers to specify private data for each controller, this commit adds a private_data field to the struct hw_pci. This field is an array of nr_controllers pointers that will be used to initialize the private_data field of the corresponding controller's pci_sys_data structure. Signed-off-by: Thierry Reding Signed-off-by: Russell King --- arch/arm/include/asm/mach/pci.h | 1 + arch/arm/kernel/bios32.c | 3 +++ 2 files changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index db9fedb57f2c..5cf2e979b4be 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -23,6 +23,7 @@ struct hw_pci { #endif struct pci_ops *ops; int nr_controllers; + void **private_data; int (*setup)(int nr, struct pci_sys_data *); struct pci_bus *(*scan)(int nr, struct pci_sys_data *); void (*preinit)(void); diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index da7b0c99f296..a1f73b502ef0 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -464,6 +464,9 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) sys->map_irq = hw->map_irq; INIT_LIST_HEAD(&sys->resources); + if (hw->private_data) + sys->private_data = hw->private_data[nr]; + ret = hw->setup(nr, sys); if (ret > 0) { -- cgit v1.2.3 From 9520a5bece13b7382f4b0059180f61530c423c81 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Mon, 11 Feb 2013 12:25:06 +0100 Subject: ARM: 7649/1: mm: mm->context.id fix for big-endian Since the new ASID code in b5466f8728527a05a493cc4abe9e6f034a1bbaab ("ARM: mm: remove IPI broadcasting on ASID rollover") was changed to use 64bit operations it has broken the BE operation due to an issue with the MM code accessing sub-fields of mm->context.id. When running in BE mode we see the values in mm->context.id are stored with the highest value first, so the LDR in the arch/arm/mm/proc-macros.S reads the wrong part of this field. To resolve this, change the LDR in the mmid macro to load from +4. Acked-by: Will Deacon Signed-off-by: Ben Dooks Signed-off-by: Russell King --- arch/arm/mm/context.c | 3 +++ arch/arm/mm/proc-macros.S | 5 +++++ 2 files changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index bc4a5e9ebb78..7a0511191f6b 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -34,6 +34,9 @@ * The ASID is used to tag entries in the CPU caches and TLBs. * The context ID is used by debuggers and trace logic, and * should be unique within all running processes. + * + * In big endian operation, the two 32 bit words are swapped if accesed by + * non 64-bit operations. */ #define ASID_FIRST_VERSION (1ULL << ASID_BITS) #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index eb6aa73bc8b7..f9a0aa725ea9 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -38,9 +38,14 @@ /* * mmid - get context id from mm pointer (mm->context.id) + * note, this field is 64bit, so in big-endian the two words are swapped too. */ .macro mmid, rd, rn +#ifdef __ARMEB__ + ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ] +#else ldr \rd, [\rn, #MM_CONTEXT_ID] +#endif .endm /* -- cgit v1.2.3 From 251019fb97b67067c34292bb89c19e712f91971c Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Mon, 11 Feb 2013 12:25:05 +0100 Subject: ARM: 7650/1: mm: replace direct access to mm->context.id with new macro The mmid macro is meant to be used to get the mm->context.id data from the mm structure, but it seems to have been missed in a cuple of files. Acked-by: Will Deacon Signed-off-by: Ben Dooks Signed-off-by: Russell King --- arch/arm/mm/proc-v6.S | 2 +- arch/arm/mm/proc-v7-2level.S | 2 +- arch/arm/mm/proc-v7-3level.S | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 09c5233f4dfc..bcaaa8de9325 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area) ENTRY(cpu_v6_switch_mm) #ifdef CONFIG_MMU mov r2, #0 - ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id + mmid r1, r1 @ get mm->context.id ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) ALT_UP(orr r0, r0, #TTB_FLAGS_UP) mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 6d98c13ab827..78f520bc0e99 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -40,7 +40,7 @@ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU mov r2, #0 - ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id + mmid r1, r1 @ get mm->context.id ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) ALT_UP(orr r0, r0, #TTB_FLAGS_UP) #ifdef CONFIG_ARM_ERRATA_430973 diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 7b56386f9496..50bf1dafc9ea 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -47,7 +47,7 @@ */ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU - ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id + mmid r1, r1 @ get mm->context.id and r3, r1, #0xff mov r3, r3, lsl #(48 - 32) @ ASID mcrr p15, 0, r0, r3, c2 @ set TTB 0 -- cgit v1.2.3 From b28748fb5d21d5d64c9ce31579ffbbd41f317042 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 17 Feb 2013 14:40:33 +0000 Subject: ARM: add SCHED_HRTICK config option As we don't include kernel/Kconfig.hz as this defines HZ values unsuitable for ARM platforms, add the SCHED_HRTICK to properly configure the scheduler for hrtimer operation. Signed-off-by: Russell King --- arch/arm/Kconfig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 67874b82a4ed..a192a5c1984c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1655,6 +1655,9 @@ config HZ default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE default 100 +config SCHED_HRTICK + def_bool HIGH_RES_TIMERS + config THUMB2_KERNEL bool "Compile the kernel in Thumb-2 mode" depends on CPU_V7 && !CPU_V6 && !CPU_V6K -- cgit v1.2.3