diff options
author | Thomas Gleixner | 2020-04-21 11:20:32 +0200 |
---|---|---|
committer | Borislav Petkov | 2020-04-26 11:00:05 +0200 |
commit | 2faf153bb7346b7dfc895f916edf93a86297ec0a (patch) | |
tree | 763c31123cdf2cb76222cc28ffdb6c9df795f377 /arch/x86 | |
parent | 9020d3956317d052cdddd43e55acdd2970344192 (diff) |
x86/tlb: Move __flush_tlb() out of line
cpu_tlbstate is exported because various TLB-related functions need
access to it, but cpu_tlbstate is sensitive information which should
only be accessed by well-contained kernel functions and not be directly
exposed to modules.
As a first step, move __flush_tlb() out of line and hide the native
function. The latter can be static when CONFIG_PARAVIRT is disabled.
Consolidate the namespace while at it and remove the pointless extra
wrapper in the paravirt code.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200421092559.246130908@linutronix.de
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 29 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/generic.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 7 | ||||
-rw-r--r-- | arch/x86/mm/mem_encrypt.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 33 | ||||
-rw-r--r-- | arch/x86/platform/uv/tlb_uv.c | 2 |
7 files changed, 45 insertions, 36 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 694d8daf4983..f412450668d8 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -47,7 +47,9 @@ static inline void slow_down_io(void) #endif } -static inline void __flush_tlb(void) +void native_flush_tlb_local(void); + +static inline void __flush_tlb_local(void) { PVOP_VCALL0(mmu.flush_tlb_user); } diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index d804030079da..fe1fd02904ba 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -140,12 +140,13 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; } +void flush_tlb_local(void); + #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> #else -#define __flush_tlb() __native_flush_tlb() -#define __flush_tlb_global() __native_flush_tlb_global() -#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr) +#define __flush_tlb_global() __native_flush_tlb_global() +#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr) #endif struct tlb_context { @@ -371,24 +372,6 @@ static inline void invalidate_user_asid(u16 asid) } /* - * flush the entire current user mapping - */ -static inline void __native_flush_tlb(void) -{ - /* - * Preemption or interrupts must be disabled to protect the access - * to the per CPU variable and to prevent being preempted between - * read_cr3() and write_cr3(). - */ - WARN_ON_ONCE(preemptible()); - - invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); - - /* If current->mm == NULL then the read_cr3() "borrows" an mm */ - native_write_cr3(__native_read_cr3()); -} - -/* * flush everything */ static inline void __native_flush_tlb_global(void) @@ -461,7 +444,7 @@ static inline void __flush_tlb_all(void) /* * !PGE -> !PCID (setup_pcid()), thus every flush is total. */ - __flush_tlb(); + flush_tlb_local(); } } @@ -537,8 +520,6 @@ struct flush_tlb_info { bool freed_tables; }; -#define local_flush_tlb() __flush_tlb() - #define flush_tlb_mm(mm) \ flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true) diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 51b9190c628b..23ad8e953dfb 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -761,7 +761,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); - __flush_tlb(); + flush_tlb_local(); /* Save MTRR state */ rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); @@ -778,7 +778,7 @@ static void post_set(void) __releases(set_atomicity_lock) { /* Flush TLBs (no need to flush caches - they are disabled) */ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); - __flush_tlb(); + flush_tlb_local(); /* Intel (P6) standard MTRRs */ mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index c131ba4e70ef..4cb3d822ea09 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -160,11 +160,6 @@ unsigned paravirt_patch_insns(void *insn_buff, unsigned len, return insn_len; } -static void native_flush_tlb(void) -{ - __native_flush_tlb(); -} - /* * Global pages have to be flushed a bit differently. Not a real * performance problem because this does not happen often. @@ -359,7 +354,7 @@ struct paravirt_patch_template pv_ops = { #endif /* CONFIG_PARAVIRT_XXL */ /* Mmu ops. */ - .mmu.flush_tlb_user = native_flush_tlb, + .mmu.flush_tlb_user = native_flush_tlb_local, .mmu.flush_tlb_kernel = native_flush_tlb_global, .mmu.flush_tlb_one_user = native_flush_tlb_one_user, .mmu.flush_tlb_others = native_flush_tlb_others, diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index a03614bd3e1a..4a781cf99e92 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -134,7 +134,7 @@ static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; } while (size); - __native_flush_tlb(); + flush_tlb_local(); } void __init sme_unmap_bootdata(char *real_mode_data) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 3d9d81951962..06116480c343 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -18,6 +18,13 @@ #include "mm_internal.h" +#ifdef CONFIG_PARAVIRT +# define STATIC_NOPV +#else +# define STATIC_NOPV static +# define __flush_tlb_local native_flush_tlb_local +#endif + /* * TLB flushing, formerly SMP-only * c/o Linus Torvalds. @@ -645,7 +652,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, trace_tlb_flush(reason, nr_invalidate); } else { /* Full flush. */ - local_flush_tlb(); + flush_tlb_local(); if (local) count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); trace_tlb_flush(reason, TLB_FLUSH_ALL); @@ -884,6 +891,30 @@ unsigned long __get_current_cr3_fast(void) EXPORT_SYMBOL_GPL(__get_current_cr3_fast); /* + * Flush the entire current user mapping + */ +STATIC_NOPV void native_flush_tlb_local(void) +{ + /* + * Preemption or interrupts must be disabled to protect the access + * to the per CPU variable and to prevent being preempted between + * read_cr3() and write_cr3(). + */ + WARN_ON_ONCE(preemptible()); + + invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); + + /* If current->mm == NULL then the read_cr3() "borrows" an mm */ + native_write_cr3(__native_read_cr3()); +} + +void flush_tlb_local(void) +{ + __flush_tlb_local(); +} +EXPORT_SYMBOL_GPL(flush_tlb_local); + +/* * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm. * This means that the 'struct flush_tlb_info' that describes which mappings to * flush is actually fixed. We therefore set a single fixed struct and use it in diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 1fd321f37f1b..6af766c47dd2 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -293,7 +293,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, * This must be a normal message, or retry of a normal message */ if (msg->address == TLB_FLUSH_ALL) { - local_flush_tlb(); + flush_tlb_local(); stat->d_alltlb++; } else { __flush_tlb_one_user(msg->address); |