diff options
author | Sergey Matyukevich | 2023-02-26 18:01:36 +0300 |
---|---|---|
committer | Greg Kroah-Hartman | 2023-03-22 13:34:00 +0100 |
commit | eed0dc9c42eaab0c956a7089495b7679b5019227 (patch) | |
tree | a77153e89cf85ac23dc2811428d29eb6ab7fadf1 /arch/riscv/mm | |
parent | d813d8e6d9a066cb01c033959d11f23dd85a32c0 (diff) |
Revert "riscv: mm: notify remote harts about mmu cache updates"
commit e921050022f1f12d5029d1487a7dfc46cde15523 upstream.
This reverts the remaining bits of commit 4bd1d80efb5a ("riscv: mm:
notify remote harts harts about mmu cache updates").
According to bug reports, suggested approach to fix stale TLB entries
is not sufficient. It needs to be replaced by a more robust solution.
Fixes: 4bd1d80efb5a ("riscv: mm: notify remote harts about mmu cache updates")
Reported-by: Zong Li <zong.li@sifive.com>
Reported-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
Cc: stable@vger.kernel.org
Reviewed-by: Guo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/20230226150137.1919750-2-geomatsi@gmail.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r-- | arch/riscv/mm/context.c | 10 | ||||
-rw-r--r-- | arch/riscv/mm/tlbflush.c | 28 |
2 files changed, 17 insertions, 21 deletions
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 80ce9caba8d2..7acbfbd14557 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -196,16 +196,6 @@ switch_mm_fast: if (need_flush_tlb) local_flush_tlb_all(); -#ifdef CONFIG_SMP - else { - cpumask_t *mask = &mm->context.tlb_stale_mask; - - if (cpumask_test_cpu(cpu, mask)) { - cpumask_clear_cpu(cpu, mask); - local_flush_tlb_all_asid(cntx & asid_mask); - } - } -#endif } static void set_mm_noasid(struct mm_struct *mm) diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index ce7dfc81bb3f..37ed760d007c 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -5,7 +5,23 @@ #include <linux/sched.h> #include <asm/sbi.h> #include <asm/mmu_context.h> -#include <asm/tlbflush.h> + +static inline void local_flush_tlb_all_asid(unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma x0, %0" + : + : "r" (asid) + : "memory"); +} + +static inline void local_flush_tlb_page_asid(unsigned long addr, + unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma %0, %1" + : + : "r" (addr), "r" (asid) + : "memory"); +} void flush_tlb_all(void) { @@ -15,7 +31,6 @@ void flush_tlb_all(void) static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, unsigned long size, unsigned long stride) { - struct cpumask *pmask = &mm->context.tlb_stale_mask; struct cpumask *cmask = mm_cpumask(mm); unsigned int cpuid; bool broadcast; @@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, if (static_branch_unlikely(&use_asid_allocator)) { unsigned long asid = atomic_long_read(&mm->context.id); - /* - * TLB will be immediately flushed on harts concurrently - * executing this MM context. TLB flush on other harts - * is deferred until this MM context migrates there. - */ - cpumask_setall(pmask); - cpumask_clear_cpu(cpuid, pmask); - cpumask_andnot(pmask, pmask, cmask); - if (broadcast) { sbi_remote_sfence_vma_asid(cmask, start, size, asid); } else if (size <= stride) { |