aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAneesh Kumar K.V2016-07-13 15:06:43 +0530
committerMichael Ellerman2016-08-01 11:15:13 +1000
commit5491ae7b6f48499b8892822cff371746f0b4102f (patch)
treea8bb6c61ba9276e1b64d4ad2ef1b6c10b8508929
parentfbfa26d85418a155feacdb0f73cbf938f1027a8c (diff)
powerpc/mm/hugetlb: Add flush_hugetlb_tlb_range
Some archs like ppc64 need to do special things when flushing tlb for hugepage. Add a new helper to flush hugetlb tlb range. This helps us to avoid flushing the entire tlb mapping for the pid. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h10
-rw-r--r--arch/powerpc/mm/hugetlbpage-radix.c10
-rw-r--r--mm/hugetlb.c10
4 files changed, 31 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 10eb0d1e3140..65037762b120 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -10,6 +10,8 @@ static inline int mmu_get_ap(int psize)
return mmu_psize_defs[psize].ap;
}
+extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long end, int psize);
extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 0790c4e92a64..146b269de8fa 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -16,6 +16,16 @@ static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
return hash__flush_tlb_range(vma, start, end);
}
+#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+ if (radix_enabled())
+ return radix__flush_hugetlb_tlb_range(vma, start, end);
+ return hash__flush_tlb_range(vma, start, end);
+}
+
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 1eca0deaf89b..35254a678456 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -25,6 +25,16 @@ void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long v
radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
}
+void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ int psize;
+ struct hstate *hstate = hstate_file(vma->vm_file);
+
+ psize = hstate_get_psize(hstate);
+ radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
+}
+
/*
* A vairant of hugetlb_get_unmapped_area doing topdown search
* FIXME!! should we do as x86 does or non hugetlb area does ?
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f904246a8fd5..af2d88253bfc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3938,6 +3938,14 @@ same_page:
return i ? i : -EFAULT;
}
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+/*
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
+ * implement this.
+ */
+#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#endif
+
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
{
@@ -3998,7 +4006,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* once we release i_mmap_rwsem, another task can do the final put_page
* and that page table be reused and filled with junk.
*/
- flush_tlb_range(vma, start, end);
+ flush_hugetlb_tlb_range(vma, start, end);
mmu_notifier_invalidate_range(mm, start, end);
i_mmap_unlock_write(vma->vm_file->f_mapping);
mmu_notifier_invalidate_range_end(mm, start, end);