diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 18 | ||||
-rw-r--r-- | arch/arm/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/mm/cache-feroceon-l2.c | 54 | ||||
-rw-r--r-- | arch/arm/mm/cache-xsc3l2.c | 107 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 72 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/highmem.c | 116 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 21 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 3 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 41 | ||||
-rw-r--r-- | arch/arm/mm/proc-mohawk.S | 416 |
11 files changed, 798 insertions, 54 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index bc3331863d9d..20979564e7ee 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -358,6 +358,17 @@ config CPU_XSC3 select CPU_TLB_V4WBI if MMU select IO_36 +# Marvell PJ1 (Mohawk) +config CPU_MOHAWK + bool + select CPU_32v5 + select CPU_ABRT_EV5T + select CPU_PABRT_NOIFAR + select CPU_CACHE_VIVT + select CPU_CP15_MMU + select CPU_TLB_V4WBI if MMU + select CPU_COPY_V4WB if MMU + # Feroceon config CPU_FEROCEON bool @@ -600,7 +611,7 @@ comment "Processor Features" config ARM_THUMB bool "Support Thumb user binaries" - depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6 || CPU_V7 || CPU_FEROCEON + depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V7 || CPU_FEROCEON default y help Say Y if you want to include kernel support for running user space @@ -684,7 +695,7 @@ config CPU_CACHE_ROUND_ROBIN config CPU_BPREDICT_DISABLE bool "Disable branch prediction" - depends on CPU_ARM1020 || CPU_V6 || CPU_XSC3 || CPU_V7 || CPU_FA526 + depends on CPU_ARM1020 || CPU_V6 || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 help Say Y here to disable branch prediction. If unsure, say N. @@ -735,7 +746,8 @@ config CACHE_FEROCEON_L2_WRITETHROUGH config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" - depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || REALVIEW_EB_A9MP + depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ + REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 default y select OUTER_CACHE help diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 40f941c2245c..63e3f6dd0e21 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o obj-$(CONFIG_DISCONTIGMEM) += discontig.o +obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o @@ -74,6 +75,7 @@ obj-$(CONFIG_CPU_SA110) += proc-sa110.o obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o +obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o obj-$(CONFIG_CPU_V6) += proc-v6.o obj-$(CONFIG_CPU_V7) += proc-v7.o diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 80cd207cbaea..d6dd83826f8a 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -14,8 +14,12 @@ #include <linux/init.h> #include <asm/cacheflush.h> +#include <asm/kmap_types.h> +#include <asm/fixmap.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> #include <plat/cache-feroceon-l2.h> - +#include "mm.h" /* * Low-level cache maintenance operations. @@ -34,14 +38,36 @@ * The range operations require two successive cp15 writes, in * between which we don't want to be preempted. */ + +static inline unsigned long l2_start_va(unsigned long paddr) +{ +#ifdef CONFIG_HIGHMEM + /* + * Let's do our own fixmap stuff in a minimal way here. + * Because range ops can't be done on physical addresses, + * we simply install a virtual mapping for it only for the + * TLB lookup to occur, hence no need to flush the untouched + * memory mapping. This is protected with the disabling of + * interrupts by the caller. + */ + unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); + unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0); + local_flush_tlb_kernel_page(vaddr); + return vaddr + (paddr & ~PAGE_MASK); +#else + return __phys_to_virt(paddr); +#endif +} + static inline void l2_clean_pa(unsigned long addr) { __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); } -static inline void l2_clean_mva_range(unsigned long start, unsigned long end) +static inline void l2_clean_pa_range(unsigned long start, unsigned long end) { - unsigned long flags; + unsigned long va_start, va_end, flags; /* * Make sure 'start' and 'end' reference the same page, as @@ -51,17 +77,14 @@ static inline void l2_clean_mva_range(unsigned long start, unsigned long end) BUG_ON((start ^ end) >> PAGE_SHIFT); raw_local_irq_save(flags); + va_start = l2_start_va(start); + va_end = va_start + (end - start); __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" "mcr p15, 1, %1, c15, c9, 5" - : : "r" (start), "r" (end)); + : : "r" (va_start), "r" (va_end)); raw_local_irq_restore(flags); } -static inline void l2_clean_pa_range(unsigned long start, unsigned long end) -{ - l2_clean_mva_range(__phys_to_virt(start), __phys_to_virt(end)); -} - static inline void l2_clean_inv_pa(unsigned long addr) { __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr)); @@ -72,9 +95,9 @@ static inline void l2_inv_pa(unsigned long addr) __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr)); } -static inline void l2_inv_mva_range(unsigned long start, unsigned long end) +static inline void l2_inv_pa_range(unsigned long start, unsigned long end) { - unsigned long flags; + unsigned long va_start, va_end, flags; /* * Make sure 'start' and 'end' reference the same page, as @@ -84,17 +107,14 @@ static inline void l2_inv_mva_range(unsigned long start, unsigned long end) BUG_ON((start ^ end) >> PAGE_SHIFT); raw_local_irq_save(flags); + va_start = l2_start_va(start); + va_end = va_start + (end - start); __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" "mcr p15, 1, %1, c15, c11, 5" - : : "r" (start), "r" (end)); + : : "r" (va_start), "r" (va_end)); raw_local_irq_restore(flags); } -static inline void l2_inv_pa_range(unsigned long start, unsigned long end) -{ - l2_inv_mva_range(__phys_to_virt(start), __phys_to_virt(end)); -} - /* * Linux primitives. diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index 464de893a988..5d180cb0bd94 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c @@ -17,12 +17,14 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/io.h> - #include <asm/system.h> #include <asm/cputype.h> #include <asm/cacheflush.h> +#include <asm/kmap_types.h> +#include <asm/fixmap.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> +#include "mm.h" #define CR_L2 (1 << 26) @@ -47,21 +49,11 @@ static inline void xsc3_l2_clean_mva(unsigned long addr) __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr)); } -static inline void xsc3_l2_clean_pa(unsigned long addr) -{ - xsc3_l2_clean_mva(__phys_to_virt(addr)); -} - static inline void xsc3_l2_inv_mva(unsigned long addr) { __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr)); } -static inline void xsc3_l2_inv_pa(unsigned long addr) -{ - xsc3_l2_inv_mva(__phys_to_virt(addr)); -} - static inline void xsc3_l2_inv_all(void) { unsigned long l2ctype, set_way; @@ -79,50 +71,103 @@ static inline void xsc3_l2_inv_all(void) dsb(); } +#ifdef CONFIG_HIGHMEM +#define l2_map_save_flags(x) raw_local_save_flags(x) +#define l2_map_restore_flags(x) raw_local_irq_restore(x) +#else +#define l2_map_save_flags(x) ((x) = 0) +#define l2_map_restore_flags(x) ((void)(x)) +#endif + +static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, + unsigned long flags) +{ +#ifdef CONFIG_HIGHMEM + unsigned long va = prev_va & PAGE_MASK; + unsigned long pa_offset = pa << (32 - PAGE_SHIFT); + if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) { + /* + * Switching to a new page. Because cache ops are + * using virtual addresses only, we must put a mapping + * in place for it. We also enable interrupts for a + * short while and disable them again to protect this + * mapping. + */ + unsigned long idx; + raw_local_irq_restore(flags); + idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); + va = __fix_to_virt(FIX_KMAP_BEGIN + idx); + raw_local_irq_restore(flags | PSR_I_BIT); + set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0); + local_flush_tlb_kernel_page(va); + } + return va + (pa_offset >> (32 - PAGE_SHIFT)); +#else + return __phys_to_virt(pa); +#endif +} + static void xsc3_l2_inv_range(unsigned long start, unsigned long end) { + unsigned long vaddr, flags; + if (start == 0 && end == -1ul) { xsc3_l2_inv_all(); return; } + vaddr = -1; /* to force the first mapping */ + l2_map_save_flags(flags); + /* * Clean and invalidate partial first cache line. */ if (start & (CACHE_LINE_SIZE - 1)) { - xsc3_l2_clean_pa(start & ~(CACHE_LINE_SIZE - 1)); - xsc3_l2_inv_pa(start & ~(CACHE_LINE_SIZE - 1)); + vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags); + xsc3_l2_clean_mva(vaddr); + xsc3_l2_inv_mva(vaddr); start = (start | (CACHE_LINE_SIZE - 1)) + 1; } /* - * Clean and invalidate partial last cache line. + * Invalidate all full cache lines between 'start' and 'end'. */ - if (start < end && (end & (CACHE_LINE_SIZE - 1))) { - xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1)); - xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); - end &= ~(CACHE_LINE_SIZE - 1); + while (start < (end & ~(CACHE_LINE_SIZE - 1))) { + vaddr = l2_map_va(start, vaddr, flags); + xsc3_l2_inv_mva(vaddr); + start += CACHE_LINE_SIZE; } /* - * Invalidate all full cache lines between 'start' and 'end'. + * Clean and invalidate partial last cache line. */ - while (start < end) { - xsc3_l2_inv_pa(start); - start += CACHE_LINE_SIZE; + if (start < end) { + vaddr = l2_map_va(start, vaddr, flags); + xsc3_l2_clean_mva(vaddr); + xsc3_l2_inv_mva(vaddr); } + l2_map_restore_flags(flags); + dsb(); } static void xsc3_l2_clean_range(unsigned long start, unsigned long end) { + unsigned long vaddr, flags; + + vaddr = -1; /* to force the first mapping */ + l2_map_save_flags(flags); + start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { - xsc3_l2_clean_pa(start); + vaddr = l2_map_va(start, vaddr, flags); + xsc3_l2_clean_mva(vaddr); start += CACHE_LINE_SIZE; } + l2_map_restore_flags(flags); + dsb(); } @@ -148,18 +193,26 @@ static inline void xsc3_l2_flush_all(void) static void xsc3_l2_flush_range(unsigned long start, unsigned long end) { + unsigned long vaddr, flags; + if (start == 0 && end == -1ul) { xsc3_l2_flush_all(); return; } + vaddr = -1; /* to force the first mapping */ + l2_map_save_flags(flags); + start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { - xsc3_l2_clean_pa(start); - xsc3_l2_inv_pa(start); + vaddr = l2_map_va(start, vaddr, flags); + xsc3_l2_clean_mva(vaddr); + xsc3_l2_inv_mva(vaddr); start += CACHE_LINE_SIZE; } + l2_map_restore_flags(flags); + dsb(); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f1ef5613ccd4..510c179b0ac8 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -19,6 +19,7 @@ #include <linux/dma-mapping.h> #include <asm/memory.h> +#include <asm/highmem.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/sizes.h> @@ -517,6 +518,74 @@ void dma_cache_maint(const void *start, size_t size, int direction) } EXPORT_SYMBOL(dma_cache_maint); +static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, + size_t size, int direction) +{ + void *vaddr; + unsigned long paddr; + void (*inner_op)(const void *, const void *); + void (*outer_op)(unsigned long, unsigned long); + + switch (direction) { + case DMA_FROM_DEVICE: /* invalidate only */ + inner_op = dmac_inv_range; + outer_op = outer_inv_range; + break; + case DMA_TO_DEVICE: /* writeback only */ + inner_op = dmac_clean_range; + outer_op = outer_clean_range; + break; + case DMA_BIDIRECTIONAL: /* writeback and invalidate */ + inner_op = dmac_flush_range; + outer_op = outer_flush_range; + break; + default: + BUG(); + } + + if (!PageHighMem(page)) { + vaddr = page_address(page) + offset; + inner_op(vaddr, vaddr + size); + } else { + vaddr = kmap_high_get(page); + if (vaddr) { + vaddr += offset; + inner_op(vaddr, vaddr + size); + kunmap_high(page); + } + } + + paddr = page_to_phys(page) + offset; + outer_op(paddr, paddr + size); +} + +void dma_cache_maint_page(struct page *page, unsigned long offset, + size_t size, int dir) +{ + /* + * A single sg entry may refer to multiple physically contiguous + * pages. But we still need to process highmem pages individually. + * If highmem is not configured then the bulk of this loop gets + * optimized out. + */ + size_t left = size; + do { + size_t len = left; + if (PageHighMem(page) && len + offset > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset / PAGE_SIZE; + offset %= PAGE_SIZE; + } + len = PAGE_SIZE - offset; + } + dma_cache_maint_contiguous(page, offset, len, dir); + offset = 0; + page++; + left -= len; + } while (left); +} +EXPORT_SYMBOL(dma_cache_maint_page); + /** * dma_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -614,7 +683,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, continue; if (!arch_is_coherent()) - dma_cache_maint(sg_virt(s), s->length, dir); + dma_cache_maint_page(sg_page(s), s->offset, + s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_device); diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 0fa9bf388f0b..4e283481cee1 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -192,7 +192,7 @@ void flush_dcache_page(struct page *page) struct address_space *mapping = page_mapping(page); #ifndef CONFIG_SMP - if (mapping && !mapping_mapped(mapping)) + if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) set_bit(PG_dcache_dirty, &page->flags); else #endif diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c new file mode 100644 index 000000000000..a34954d9df7d --- /dev/null +++ b/arch/arm/mm/highmem.c @@ -0,0 +1,116 @@ +/* + * arch/arm/mm/highmem.c -- ARM highmem support + * + * Author: Nicolas Pitre + * Created: september 8, 2008 + * Copyright: Marvell Semiconductors Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/highmem.h> +#include <linux/interrupt.h> +#include <asm/fixmap.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> +#include "mm.h" + +void *kmap(struct page *page) +{ + might_sleep(); + if (!PageHighMem(page)) + return page_address(page); + return kmap_high(page); +} +EXPORT_SYMBOL(kmap); + +void kunmap(struct page *page) +{ + BUG_ON(in_interrupt()); + if (!PageHighMem(page)) + return; + kunmap_high(page); +} +EXPORT_SYMBOL(kunmap); + +void *kmap_atomic(struct page *page, enum km_type type) +{ + unsigned int idx; + unsigned long vaddr; + + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +#ifdef CONFIG_DEBUG_HIGHMEM + /* + * With debugging enabled, kunmap_atomic forces that entry to 0. + * Make sure it was indeed properly unmapped. + */ + BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); +#endif + set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); + /* + * When debugging is off, kunmap_atomic leaves the previous mapping + * in place, so this TLB flush ensures the TLB is updated with the + * new mapping. + */ + local_flush_tlb_kernel_page(vaddr); + + return (void *)vaddr; +} +EXPORT_SYMBOL(kmap_atomic); + +void kunmap_atomic(void *kvaddr, enum km_type type) +{ + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; + unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); + + if (kvaddr >= (void *)FIXADDR_START) { + __cpuc_flush_dcache_page((void *)vaddr); +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); + local_flush_tlb_kernel_page(vaddr); +#else + (void) idx; /* to kill a warning */ +#endif + } + pagefault_enable(); +} +EXPORT_SYMBOL(kunmap_atomic); + +void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +{ + unsigned int idx; + unsigned long vaddr; + + pagefault_disable(); + + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); +#endif + set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); + local_flush_tlb_kernel_page(vaddr); + + return (void *)vaddr; +} + +struct page *kmap_atomic_to_page(const void *ptr) +{ + unsigned long vaddr = (unsigned long)ptr; + pte_t *pte; + + if (vaddr < FIXADDR_START) + return virt_to_page(ptr); + + pte = TOP_PTE(vaddr); + return pte_page(*pte); +} diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 80fd3b69ae1f..8277802ec859 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -15,6 +15,7 @@ #include <linux/mman.h> #include <linux/nodemask.h> #include <linux/initrd.h> +#include <linux/highmem.h> #include <asm/mach-types.h> #include <asm/sections.h> @@ -485,7 +486,7 @@ void __init mem_init(void) int i, node; #ifndef CONFIG_DISCONTIGMEM - max_mapnr = virt_to_page(high_memory) - mem_map; + max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; #endif /* this will put all unused low memory onto the freelists */ @@ -504,6 +505,19 @@ void __init mem_init(void) __phys_to_pfn(__pa(swapper_pg_dir)), NULL); #endif +#ifdef CONFIG_HIGHMEM + /* set highmem page free */ + for_each_online_node(node) { + for_each_nodebank (i, &meminfo, node) { + unsigned long start = bank_pfn_start(&meminfo.bank[i]); + unsigned long end = bank_pfn_end(&meminfo.bank[i]); + if (start >= max_low_pfn + PHYS_PFN_OFFSET) + totalhigh_pages += free_area(start, end, NULL); + } + } + totalram_pages += totalhigh_pages; +#endif + /* * Since our memory may not be contiguous, calculate the * real number of pages we have in this system @@ -521,9 +535,10 @@ void __init mem_init(void) initsize = __init_end - __init_begin; printk(KERN_NOTICE "Memory: %luKB available (%dK code, " - "%dK data, %dK init)\n", + "%dK data, %dK init, %luK highmem)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - codesize >> 10, datasize >> 10, initsize >> 10); + codesize >> 10, datasize >> 10, initsize >> 10, + (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); if (PAGE_SIZE >= 16384 && num_physpages <= 128) { extern int sysctl_overcommit_memory; diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 95bbe112965e..c4f6f05198e0 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -1,7 +1,6 @@ -/* the upper-most page table pointer */ - #ifdef CONFIG_MMU +/* the upper-most page table pointer */ extern pmd_t *top_pmd; #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index d4d082c5c2d4..1585814f8414 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -18,9 +18,11 @@ #include <asm/cputype.h> #include <asm/mach-types.h> #include <asm/sections.h> +#include <asm/cachetype.h> #include <asm/setup.h> #include <asm/sizes.h> #include <asm/tlb.h> +#include <asm/highmem.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -243,6 +245,10 @@ static struct mem_type mem_types[] = { .prot_sect = PMD_TYPE_SECT, .domain = DOMAIN_KERNEL, }, + [MT_MEMORY_NONCACHED] = { + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, + .domain = DOMAIN_KERNEL, + }, }; const struct mem_type *get_mem_type(unsigned int type) @@ -406,9 +412,28 @@ static void __init build_mem_type_table(void) kern_pgprot |= L_PTE_SHARED; vecs_pgprot |= L_PTE_SHARED; mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; #endif } + /* + * Non-cacheable Normal - intended for memory areas that must + * not cause dirty cache line writebacks when used + */ + if (cpu_arch >= CPU_ARCH_ARMv6) { + if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { + /* Non-cacheable Normal is XCB = 001 */ + mem_types[MT_MEMORY_NONCACHED].prot_sect |= + PMD_SECT_BUFFERED; + } else { + /* For both ARMv6 and non-TEX-remapping ARMv7 */ + mem_types[MT_MEMORY_NONCACHED].prot_sect |= + PMD_SECT_TEX(1); + } + } else { + mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; + } + for (i = 0; i < 16; i++) { unsigned long v = pgprot_val(protection_map[i]); protection_map[i] = __pgprot(v | user_pgprot); @@ -677,6 +702,10 @@ static void __init sanity_check_meminfo(void) if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " "ignoring high memory\n"); + } else if (cache_is_vipt_aliasing()) { + printk(KERN_CRIT "HIGHMEM is not yet supported " + "with VIPT aliasing cache, " + "ignoring high memory\n"); } else { memmove(bank + 1, bank, (meminfo.nr_banks - i) * sizeof(*bank)); @@ -895,6 +924,17 @@ static void __init devicemaps_init(struct machine_desc *mdesc) flush_cache_all(); } +static void __init kmap_init(void) +{ +#ifdef CONFIG_HIGHMEM + pmd_t *pmd = pmd_off_k(PKMAP_BASE); + pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); + BUG_ON(!pmd_none(*pmd) || !pte); + __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE); + pkmap_page_table = pte + PTRS_PER_PTE; +#endif +} + /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. @@ -908,6 +948,7 @@ void __init paging_init(struct machine_desc *mdesc) prepare_page_table(); bootmem_init(); devicemaps_init(mdesc); + kmap_init(); top_pmd = pmd_off_k(0xffff0000); diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S new file mode 100644 index 000000000000..540f5078496b --- /dev/null +++ b/arch/arm/mm/proc-mohawk.S @@ -0,0 +1,416 @@ +/* + * linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core + * + * PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core. + * + * Heavily based on proc-arm926.S and proc-xsc3.S + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/linkage.h> +#include <linux/init.h> +#include <asm/assembler.h> +#include <asm/hwcap.h> +#include <asm/pgtable-hwdef.h> +#include <asm/pgtable.h> +#include <asm/page.h> +#include <asm/ptrace.h> +#include "proc-macros.S" + +/* + * This is the maximum size of an area which will be flushed. If the + * area is larger than this, then we flush the whole cache. + */ +#define CACHE_DLIMIT 32768 + +/* + * The cache line size of the L1 D cache. + */ +#define CACHE_DLINESIZE 32 + +/* + * cpu_mohawk_proc_init() + */ +ENTRY(cpu_mohawk_proc_init) + mov pc, lr + +/* + * cpu_mohawk_proc_fin() + */ +ENTRY(cpu_mohawk_proc_fin) + stmfd sp!, {lr} + mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE + msr cpsr_c, ip + bl mohawk_flush_kern_cache_all + mrc p15, 0, r0, c1, c0, 0 @ ctrl register + bic r0, r0, #0x1800 @ ...iz........... + bic r0, r0, #0x0006 @ .............ca. + mcr p15, 0, r0, c1, c0, 0 @ disable caches + ldmfd sp!, {pc} + +/* + * cpu_mohawk_reset(loc) + * + * Perform a soft reset of the system. Put the CPU into the + * same state as it would be if it had been reset, and branch + * to what would be the reset vector. + * + * loc: location to jump to for soft reset + * + * (same as arm926) + */ + .align 5 +ENTRY(cpu_mohawk_reset) + mov ip, #0 + mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs + mrc p15, 0, ip, c1, c0, 0 @ ctrl register + bic ip, ip, #0x0007 @ .............cam + bic ip, ip, #0x1100 @ ...i...s........ + mcr p15, 0, ip, c1, c0, 0 @ ctrl register + mov pc, r0 + +/* + * cpu_mohawk_do_idle() + * + * Called with IRQs disabled + */ + .align 5 +ENTRY(cpu_mohawk_do_idle) + mov r0, #0 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer + mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt + mov pc, lr + +/* + * flush_user_cache_all() + * + * Clean and invalidate all cache entries in a particular + * address space. + */ +ENTRY(mohawk_flush_user_cache_all) + /* FALLTHROUGH */ + +/* + * flush_kern_cache_all() + * + * Clean and invalidate the entire cache. + */ +ENTRY(mohawk_flush_kern_cache_all) + mov r2, #VM_EXEC + mov ip, #0 +__flush_whole_cache: + mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache + tst r2, #VM_EXEC + mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache + mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer + mov pc, lr + +/* + * flush_user_cache_range(start, end, flags) + * + * Clean and invalidate a range of cache entries in the + * specified address range. + * + * - start - start address (inclusive) + * - end - end address (exclusive) + * - flags - vm_flags describing address space + * + * (same as arm926) + */ +ENTRY(mohawk_flush_user_cache_range) + mov ip, #0 + sub r3, r1, r0 @ calculate total size + cmp r3, #CACHE_DLIMIT + bgt __flush_whole_cache +1: tst r2, #VM_EXEC + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry + mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry + add r0, r0, #CACHE_DLINESIZE + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry + mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b + tst r2, #VM_EXEC + mcrne p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * coherent_kern_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start, end. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(mohawk_coherent_kern_range) + /* FALLTHROUGH */ + +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start, end. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + * + * (same as arm926) + */ +ENTRY(mohawk_coherent_user_range) + bic r0, r0, #CACHE_DLINESIZE - 1 +1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b + mcr p15, 0, r0, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * flush_kern_dcache_page(void *page) + * + * Ensure no D cache aliasing occurs, either with itself or + * the I cache + * + * - addr - page aligned address + */ +ENTRY(mohawk_flush_kern_dcache_page) + add r1, r0, #PAGE_SZ +1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mcr p15, 0, r0, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * dma_inv_range(start, end) + * + * Invalidate (discard) the specified virtual address range. + * May not write back any entries. If 'start' or 'end' + * are not cache line aligned, those lines must be written + * back. + * + * - start - virtual start address + * - end - virtual end address + * + * (same as v4wb) + */ +ENTRY(mohawk_dma_inv_range) + tst r0, #CACHE_DLINESIZE - 1 + mcrne p15, 0, r0, c7, c10, 1 @ clean D entry + tst r1, #CACHE_DLINESIZE - 1 + mcrne p15, 0, r1, c7, c10, 1 @ clean D entry + bic r0, r0, #CACHE_DLINESIZE - 1 +1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b + mcr p15, 0, r0, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * dma_clean_range(start, end) + * + * Clean the specified virtual address range. + * + * - start - virtual start address + * - end - virtual end address + * + * (same as v4wb) + */ +ENTRY(mohawk_dma_clean_range) + bic r0, r0, #CACHE_DLINESIZE - 1 +1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b + mcr p15, 0, r0, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * dma_flush_range(start, end) + * + * Clean and invalidate the specified virtual address range. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(mohawk_dma_flush_range) + bic r0, r0, #CACHE_DLINESIZE - 1 +1: + mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b + mcr p15, 0, r0, c7, c10, 4 @ drain WB + mov pc, lr + +ENTRY(mohawk_cache_fns) + .long mohawk_flush_kern_cache_all + .long mohawk_flush_user_cache_all + .long mohawk_flush_user_cache_range + .long mohawk_coherent_kern_range + .long mohawk_coherent_user_range + .long mohawk_flush_kern_dcache_page + .long mohawk_dma_inv_range + .long mohawk_dma_clean_range + .long mohawk_dma_flush_range + +ENTRY(cpu_mohawk_dcache_clean_area) +1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + add r0, r0, #CACHE_DLINESIZE + subs r1, r1, #CACHE_DLINESIZE + bhi 1b + mcr p15, 0, r0, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * cpu_mohawk_switch_mm(pgd) + * + * Set the translation base pointer to be as described by pgd. + * + * pgd: new page tables + */ + .align 5 +ENTRY(cpu_mohawk_switch_mm) + mov ip, #0 + mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache + mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache + mcr p15, 0, ip, c7, c10, 4 @ drain WB + orr r0, r0, #0x18 @ cache the page table in L2 + mcr p15, 0, r0, c2, c0, 0 @ load page table pointer + mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs + mov pc, lr + +/* + * cpu_mohawk_set_pte_ext(ptep, pte, ext) + * + * Set a PTE and flush it out + */ + .align 5 +ENTRY(cpu_mohawk_set_pte_ext) + armv3_set_pte_ext + mov r0, r0 + mcr p15, 0, r0, c7, c10, 1 @ clean D entry + mcr p15, 0, r0, c7, c10, 4 @ drain WB + mov pc, lr + + __INIT + + .type __mohawk_setup, #function +__mohawk_setup: + mov r0, #0 + mcr p15, 0, r0, c7, c7 @ invalidate I,D caches + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer + mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs + orr r4, r4, #0x18 @ cache the page table in L2 + mcr p15, 0, r4, c2, c0, 0 @ load page table pointer + + mov r0, #0 @ don't allow CP access + mcr p15, 0, r0, c15, c1, 0 @ write CP access register + + adr r5, mohawk_crval + ldmia r5, {r5, r6} + mrc p15, 0, r0, c1, c0 @ get control register + bic r0, r0, r5 + orr r0, r0, r6 + mov pc, lr + + .size __mohawk_setup, . - __mohawk_setup + + /* + * R + * .RVI ZFRS BLDP WCAM + * .011 1001 ..00 0101 + * + */ + .type mohawk_crval, #object +mohawk_crval: + crval clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134 + + __INITDATA + +/* + * Purpose : Function pointers used to access above functions - all calls + * come through these + */ + .type mohawk_processor_functions, #object +mohawk_processor_functions: + .word v5t_early_abort + .word pabort_noifar + .word cpu_mohawk_proc_init + .word cpu_mohawk_proc_fin + .word cpu_mohawk_reset + .word cpu_mohawk_do_idle + .word cpu_mohawk_dcache_clean_area + .word cpu_mohawk_switch_mm + .word cpu_mohawk_set_pte_ext + .size mohawk_processor_functions, . - mohawk_processor_functions + + .section ".rodata" + + .type cpu_arch_name, #object +cpu_arch_name: + .asciz "armv5te" + .size cpu_arch_name, . - cpu_arch_name + + .type cpu_elf_name, #object +cpu_elf_name: + .asciz "v5" + .size cpu_elf_name, . - cpu_elf_name + + .type cpu_mohawk_name, #object +cpu_mohawk_name: + .asciz "Marvell 88SV331x" + .size cpu_mohawk_name, . - cpu_mohawk_name + + .align + + .section ".proc.info.init", #alloc, #execinstr + + .type __88sv331x_proc_info,#object +__88sv331x_proc_info: + .long 0x56158000 @ Marvell 88SV331x (MOHAWK) + .long 0xfffff000 + .long PMD_TYPE_SECT | \ + PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | \ + PMD_BIT4 | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ + .long PMD_TYPE_SECT | \ + PMD_BIT4 | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ + b __mohawk_setup + .long cpu_arch_name + .long cpu_elf_name + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP + .long cpu_mohawk_name + .long mohawk_processor_functions + .long v4wbi_tlb_fns + .long v4wb_user_fns + .long mohawk_cache_fns + .size __88sv331x_proc_info, . - __88sv331x_proc_info |