diff options
author | Linus Torvalds | 2014-04-27 15:08:12 -0700 |
---|---|---|
committer | Linus Torvalds | 2014-04-27 15:08:12 -0700 |
commit | ac6c9e2bed093c4b60e313674fb7aec4f264c3d4 (patch) | |
tree | 6eccadffcc04a7b66021ab3bd114cafcb52181c6 /arch | |
parent | 33c0022f0e687b0161a9bb84a5671df932551e3a (diff) | |
parent | 1cf35d47712dd5dc4d62c6ce984f04ac6eab0408 (diff) |
Merge branch 'safe-dirty-tlb-flush'
This merges the patch to fix possible loss of dirty bit on munmap() or
madvice(DONTNEED). If there are concurrent writers on other CPU's that
have the unmapped/unneeded page in their TLBs, their writes to the page
could possibly get lost if a third CPU raced with the TLB flush and did
a page_mkclean() before the page was fully written.
Admittedly, if you unmap() or madvice(DONTNEED) an area _while_ another
thread is still busy writing to it, you deserve all the lost writes you
could get. But we kernel people hold ourselves to higher quality
standards than "crazy people deserve to lose", because, well, we've seen
people do all kinds of crazy things.
So let's get it right, just because we can, and we don't have to worry
about it.
* safe-dirty-tlb-flush:
mm: split 'tlb_flush_mmu()' into tlb flushing and memory freeing parts
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/tlb.h | 12 | ||||
-rw-r--r-- | arch/ia64/include/asm/tlb.h | 42 | ||||
-rw-r--r-- | arch/s390/include/asm/tlb.h | 13 | ||||
-rw-r--r-- | arch/sh/include/asm/tlb.h | 8 | ||||
-rw-r--r-- | arch/um/include/asm/tlb.h | 16 |
5 files changed, 77 insertions, 14 deletions
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 0baf7f0d9394..f1a0dace3efe 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -98,15 +98,25 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) } } -static inline void tlb_flush_mmu(struct mmu_gather *tlb) +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { tlb_flush(tlb); +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ free_pages_and_swap_cache(tlb->pages, tlb->nr); tlb->nr = 0; if (tlb->pages == tlb->local) __tlb_alloc_page(tlb); } +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); +} + static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index bc5efc7c3f3f..39d64e0df1de 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -91,18 +91,9 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; #define RR_RID_MASK 0x00000000ffffff00L #define RR_TO_RID(val) ((val >> 8) & 0xffffff) -/* - * Flush the TLB for address range START to END and, if not in fast mode, release the - * freed pages that where gathered up to this point. - */ static inline void -ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) +ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - unsigned long i; - unsigned int nr; - - if (!tlb->need_flush) - return; tlb->need_flush = 0; if (tlb->fullmm) { @@ -135,6 +126,14 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); } +} + +static inline void +ia64_tlb_flush_mmu_free(struct mmu_gather *tlb) +{ + unsigned long i; + unsigned int nr; + /* lastly, release the freed pages */ nr = tlb->nr; @@ -144,6 +143,19 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e free_page_and_swap_cache(tlb->pages[i]); } +/* + * Flush the TLB for address range START to END and, if not in fast mode, release the + * freed pages that where gathered up to this point. + */ +static inline void +ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) +{ + if (!tlb->need_flush) + return; + ia64_tlb_flush_mmu_tlbonly(tlb, start, end); + ia64_tlb_flush_mmu_free(tlb); +} + static inline void __tlb_alloc_page(struct mmu_gather *tlb) { unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); @@ -206,6 +218,16 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) return tlb->max - tlb->nr; } +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) +{ + ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr); +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ + ia64_tlb_flush_mmu_free(tlb); +} + static inline void tlb_flush_mmu(struct mmu_gather *tlb) { ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index c544b6f05d95..a25f09fbaf36 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -59,12 +59,23 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, tlb->batch = NULL; } -static inline void tlb_flush_mmu(struct mmu_gather *tlb) +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { __tlb_flush_mm_lazy(tlb->mm); +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ tlb_table_flush(tlb); } + +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); +} + static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 362192ed12fe..62f80d2a9df9 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -86,6 +86,14 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) } } +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) +{ +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ +} + static inline void tlb_flush_mmu(struct mmu_gather *tlb) { } diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 29b0301c18aa..16eb63fac57d 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -59,13 +59,25 @@ extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end); static inline void +tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) +{ + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); +} + +static inline void +tlb_flush_mmu_free(struct mmu_gather *tlb) +{ + init_tlb_gather(tlb); +} + +static inline void tlb_flush_mmu(struct mmu_gather *tlb) { if (!tlb->need_flush) return; - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); - init_tlb_gather(tlb); + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); } /* tlb_finish_mmu |