aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorAlexander Graf2013-08-29 00:41:59 +0200
committerAlexander Graf2013-08-29 00:41:59 +0200
commitbf550fc93d9855872a95e69e4002256110d89858 (patch)
tree10876bb4304bffe54c4160a132e7b8de6577ac4e /arch/arm/mm
parent7e48c101e0c53e6095c5f4f5e63d14df50aae8fc (diff)
parentcc2df20c7c4ce594c3e17e9cc260c330646012c8 (diff)
Merge remote-tracking branch 'origin/next' into kvm-ppc-next
Conflicts: mm/Kconfig CMA DMA split and ZSWAP introduction were conflicting, fix up manually.
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c40
-rw-r--r--arch/arm/mm/init.c58
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm/mm/mmu.c2
4 files changed, 40 insertions, 62 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 19a19bd44447..dbddc07a3bbd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1328,6 +1328,15 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (gfp & GFP_ATOMIC)
return __iommu_alloc_atomic(dev, size, handle);
+ /*
+ * Following is a work-around (a.k.a. hack) to prevent pages
+ * with __GFP_COMP being passed to split_page() which cannot
+ * handle them. The real problem is that this flag probably
+ * should be 0 on ARM as it is not supported on this
+ * platform; see CONFIG_HUGETLBFS.
+ */
+ gfp &= ~(__GFP_COMP);
+
pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
if (!pages)
return NULL;
@@ -1386,16 +1395,17 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs)
{
- struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+ struct page **pages;
size = PAGE_ALIGN(size);
- if (!pages) {
- WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ if (__in_atomic_pool(cpu_addr, size)) {
+ __iommu_free_atomic(dev, cpu_addr, handle, size);
return;
}
- if (__in_atomic_pool(cpu_addr, size)) {
- __iommu_free_atomic(dev, cpu_addr, handle, size);
+ pages = __iommu_get_pages(cpu_addr, attrs);
+ if (!pages) {
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
return;
}
@@ -1650,13 +1660,27 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
{
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
dma_addr_t dma_addr;
- int ret, len = PAGE_ALIGN(size + offset);
+ int ret, prot, len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
- ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ prot = IOMMU_READ | IOMMU_WRITE;
+ break;
+ case DMA_TO_DEVICE:
+ prot = IOMMU_READ;
+ break;
+ case DMA_FROM_DEVICE:
+ prot = IOMMU_WRITE;
+ break;
+ default:
+ prot = 0;
+ }
+
+ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
if (ret < 0)
goto fail;
@@ -1921,7 +1945,7 @@ void arm_iommu_detach_device(struct device *dev)
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping);
- mapping = NULL;
+ dev->archdata.mapping = NULL;
set_dma_ops(dev, NULL);
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 2ffee02d1d5c..15225d829d71 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -583,9 +583,6 @@ static void __init free_highpages(void)
*/
void __init mem_init(void)
{
- unsigned long reserved_pages, free_pages;
- struct memblock_region *reg;
- int i;
#ifdef CONFIG_HAVE_TCM
/* These pointers are filled in on TCM detection */
extern u32 dtcm_end;
@@ -596,57 +593,16 @@ void __init mem_init(void)
/* this will put all unused low memory onto the freelists */
free_unused_memmap(&meminfo);
-
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
- free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL);
+ free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
#endif
free_highpages();
- reserved_pages = free_pages = 0;
-
- for_each_bank(i, &meminfo) {
- struct membank *bank = &meminfo.bank[i];
- unsigned int pfn1, pfn2;
- struct page *page, *end;
-
- pfn1 = bank_pfn_start(bank);
- pfn2 = bank_pfn_end(bank);
-
- page = pfn_to_page(pfn1);
- end = pfn_to_page(pfn2 - 1) + 1;
-
- do {
- if (PageReserved(page))
- reserved_pages++;
- else if (!page_count(page))
- free_pages++;
- page++;
- } while (page < end);
- }
-
- /*
- * Since our memory may not be contiguous, calculate the
- * real number of pages we have in this system
- */
- printk(KERN_INFO "Memory:");
- num_physpages = 0;
- for_each_memblock(memory, reg) {
- unsigned long pages = memblock_region_memory_end_pfn(reg) -
- memblock_region_memory_base_pfn(reg);
- num_physpages += pages;
- printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
- }
- printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
-
- printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- free_pages << (PAGE_SHIFT-10),
- reserved_pages << (PAGE_SHIFT-10),
- totalhigh_pages << (PAGE_SHIFT-10));
+ mem_init_print_info(NULL);
#define MLK(b, t) b, t, ((t) - (b)) >> 10
#define MLM(b, t) b, t, ((t) - (b)) >> 20
@@ -712,7 +668,7 @@ void __init mem_init(void)
BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
#endif
- if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
+ if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
/*
* On a machine this small we won't get
@@ -729,12 +685,12 @@ void free_initmem(void)
extern char __tcm_start, __tcm_end;
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
- free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
+ free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
#endif
poison_init_mem(__init_begin, __init_end - __init_begin);
if (!machine_is_integrator() && !machine_is_cintegrator())
- free_initmem_default(0);
+ free_initmem_default(-1);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -745,7 +701,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd) {
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
}
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 10062ceadd1c..0c6356255fe3 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -181,11 +181,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
} else {
mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
}
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index d7229d28c7f8..4f56617a2392 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -950,7 +950,7 @@ void __init debug_ll_io_init(void)
map.virtual &= PAGE_MASK;
map.length = PAGE_SIZE;
map.type = MT_DEVICE;
- create_mapping(&map);
+ iotable_init(&map, 1);
}
#endif