diff options
author | Will Deacon | 2020-09-30 13:20:40 +0100 |
---|---|---|
committer | Will Deacon | 2020-10-01 09:45:32 +0100 |
commit | 6a1bdb173f9967b2329aab0f25bcba963f54e06b (patch) | |
tree | 5f504bd8dd7dc5c1b67bedf76ca5726c503979fa | |
parent | e676594115f0bcc12d4791f9ee919e20d8d750ee (diff) |
arm64: mm: Make flush_tlb_fix_spurious_fault() a no-op
Our use of broadcast TLB maintenance means that spurious page-faults
that have been handled already by another CPU do not require additional
TLB maintenance.
Make flush_tlb_fix_spurious_fault() a no-op and rely on the existing TLB
invalidation instead. Add an explicit flush_tlb_page() when making a page
dirty, as the TLB is permitted to cache the old read-only entry.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20200728092220.GA21800@willie-the-truck
Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 8 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 4 |
2 files changed, 11 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index bc68da9f5706..02ad3105c14c 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -51,6 +51,14 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* + * Outside of a few very special situations (e.g. hibernation), we always + * use broadcast TLB invalidation instructions, therefore a spurious page + * fault on one CPU which has been handled concurrently by another CPU + * does not need to perform additional invalidation. + */ +#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) + +/* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index f07333e86c2f..a696a7921da4 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -218,7 +218,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma, pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval); } while (pteval != old_pteval); - flush_tlb_fix_spurious_fault(vma, address); + /* Invalidate a stale read-only entry */ + if (dirty) + flush_tlb_page(vma, address); return 1; } |