diff options
author | Linus Torvalds | 2019-07-19 12:15:33 -0700 |
---|---|---|
committer | Linus Torvalds | 2019-07-19 12:15:33 -0700 |
commit | a84d2d2906f983fb80f5dcc3e8e7c3ad70aa9f0d (patch) | |
tree | 26d5ca2e6f2e799d858a18bccaa7c3f42f7d3fe3 /arch/csky/include | |
parent | b5d72dda8976e878be47415b94bca8465d1fa22d (diff) | |
parent | bdfeb0ccea1a12b58299b95eb0f28e2aa26de4c2 (diff) |
Merge tag 'csky-for-linus-5.3-rc1' of git://github.com/c-sky/csky-linux
Pull arch/csky pupdates from Guo Ren:
"This round of csky subsystem gives two features (ASID algorithm
update, Perf pmu record support) and some fixups.
ASID updates:
- Revert mmu ASID mechanism
- Add new asid lib code from arm
- Use generic asid algorithm to implement switch_mm
- Improve tlb operation with help of asid
Perf pmu record support:
- Init pmu as a device
- Add count-width property for csky pmu
- Add pmu interrupt support
- Fix perf record in kernel/user space
- dt-bindings: Add csky PMU bindings
Fixes:
- Fixup no panic in kernel for some traps
- Fixup some error count in 810 & 860.
- Fixup abiv1 memset error"
* tag 'csky-for-linus-5.3-rc1' of git://github.com/c-sky/csky-linux:
csky: Fixup abiv1 memset error
csky: Improve tlb operation with help of asid
csky: Use generic asid algorithm to implement switch_mm
csky: Add new asid lib code from arm
csky: Revert mmu ASID mechanism
dt-bindings: csky: Add csky PMU bindings
dt-bindings: interrupt-controller: Update csky mpintc
csky: Fixup some error count in 810 & 860.
csky: Fix perf record in kernel/user space
csky: Add pmu interrupt support
csky: Add count-width property for csky pmu
csky: Init pmu as a device
csky: Fixup no panic in kernel for some traps
csky: Select intc & timer drivers
Diffstat (limited to 'arch/csky/include')
-rw-r--r-- | arch/csky/include/asm/asid.h | 78 | ||||
-rw-r--r-- | arch/csky/include/asm/mmu.h | 2 | ||||
-rw-r--r-- | arch/csky/include/asm/mmu_context.h | 114 | ||||
-rw-r--r-- | arch/csky/include/asm/pgtable.h | 2 |
4 files changed, 91 insertions, 105 deletions
diff --git a/arch/csky/include/asm/asid.h b/arch/csky/include/asm/asid.h new file mode 100644 index 000000000000..ac08b0ffbe1f --- /dev/null +++ b/arch/csky/include/asm/asid.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ASM_ASID_H +#define __ASM_ASM_ASID_H + +#include <linux/atomic.h> +#include <linux/compiler.h> +#include <linux/cpumask.h> +#include <linux/percpu.h> +#include <linux/spinlock.h> + +struct asid_info +{ + atomic64_t generation; + unsigned long *map; + atomic64_t __percpu *active; + u64 __percpu *reserved; + u32 bits; + /* Lock protecting the structure */ + raw_spinlock_t lock; + /* Which CPU requires context flush on next call */ + cpumask_t flush_pending; + /* Number of ASID allocated by context (shift value) */ + unsigned int ctxt_shift; + /* Callback to locally flush the context. */ + void (*flush_cpu_ctxt_cb)(void); +}; + +#define NUM_ASIDS(info) (1UL << ((info)->bits)) +#define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift) + +#define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu) + +void asid_new_context(struct asid_info *info, atomic64_t *pasid, + unsigned int cpu, struct mm_struct *mm); + +/* + * Check the ASID is still valid for the context. If not generate a new ASID. + * + * @pasid: Pointer to the current ASID batch + * @cpu: current CPU ID. Must have been acquired throught get_cpu() + */ +static inline void asid_check_context(struct asid_info *info, + atomic64_t *pasid, unsigned int cpu, + struct mm_struct *mm) +{ + u64 asid, old_active_asid; + + asid = atomic64_read(pasid); + + /* + * The memory ordering here is subtle. + * If our active_asid is non-zero and the ASID matches the current + * generation, then we update the active_asid entry with a relaxed + * cmpxchg. Racing with a concurrent rollover means that either: + * + * - We get a zero back from the cmpxchg and end up waiting on the + * lock. Taking the lock synchronises with the rollover and so + * we are forced to see the updated generation. + * + * - We get a valid ASID back from the cmpxchg, which means the + * relaxed xchg in flush_context will treat us as reserved + * because atomic RmWs are totally ordered for a given location. + */ + old_active_asid = atomic64_read(&active_asid(info, cpu)); + if (old_active_asid && + !((asid ^ atomic64_read(&info->generation)) >> info->bits) && + atomic64_cmpxchg_relaxed(&active_asid(info, cpu), + old_active_asid, asid)) + return; + + asid_new_context(info, pasid, cpu, mm); +} + +int asid_allocator_init(struct asid_info *info, + u32 bits, unsigned int asid_per_ctxt, + void (*flush_cpu_ctxt_cb)(void)); + +#endif diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h index cb344675ccc4..b382a14ea4ec 100644 --- a/arch/csky/include/asm/mmu.h +++ b/arch/csky/include/asm/mmu.h @@ -5,7 +5,7 @@ #define __ASM_CSKY_MMU_H typedef struct { - unsigned long asid[NR_CPUS]; + atomic64_t asid; void *vdso; } mm_context_t; diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h index 734db3a122e1..0285b0ad18b6 100644 --- a/arch/csky/include/asm/mmu_context.h +++ b/arch/csky/include/asm/mmu_context.h @@ -16,122 +16,32 @@ #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ setup_pgd(__pa(pgd), false) + #define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ setup_pgd(__pa(pgd), true) -#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) -#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) -#define asid_cache(cpu) (cpu_data[cpu].asid_cache) +#define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1) +#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK) -#define ASID_FIRST_VERSION (1 << CONFIG_CPU_ASID_BITS) -#define ASID_INC 0x1 -#define ASID_MASK (ASID_FIRST_VERSION - 1) -#define ASID_VERSION_MASK ~ASID_MASK +#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; }) +#define activate_mm(prev,next) switch_mm(prev, next, current) #define destroy_context(mm) do {} while (0) #define enter_lazy_tlb(mm, tsk) do {} while (0) #define deactivate_mm(tsk, mm) do {} while (0) -/* - * All unused by hardware upper bits will be considered - * as a software asid extension. - */ -static inline void -get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) -{ - unsigned long asid = asid_cache(cpu); - - asid += ASID_INC; - if (!(asid & ASID_MASK)) { - flush_tlb_all(); /* start new asid cycle */ - if (!asid) /* fix version if needed */ - asid = ASID_FIRST_VERSION; - } - cpu_context(cpu, mm) = asid_cache(cpu) = asid; -} - -/* - * Initialize the context related info for a new mm_struct - * instance. - */ -static inline int -init_new_context(struct task_struct *tsk, struct mm_struct *mm) -{ - int i; - - for_each_online_cpu(i) - cpu_context(i, mm) = 0; - return 0; -} - -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) -{ - unsigned int cpu = smp_processor_id(); - unsigned long flags; - - local_irq_save(flags); - /* Check if our ASID is of an older version and thus invalid */ - if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) - get_new_mmu_context(next, cpu); - write_mmu_entryhi(cpu_asid(cpu, next)); - TLBMISS_HANDLER_SETUP_PGD(next->pgd); - - /* - * Mark current->active_mm as not "active" anymore. - * We don't want to mislead possible IPI tlb flush routines. - */ - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); +void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); - local_irq_restore(flags); -} - -/* - * After we have set current->mm to a new value, this activates - * the context for the new mm so we see the new mappings. - */ static inline void -activate_mm(struct mm_struct *prev, struct mm_struct *next) +switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) { - unsigned long flags; - int cpu = smp_processor_id(); - - local_irq_save(flags); + unsigned int cpu = smp_processor_id(); - /* Unconditionally get a new ASID. */ - get_new_mmu_context(next, cpu); + if (prev != next) + check_and_switch_context(next, cpu); - write_mmu_entryhi(cpu_asid(cpu, next)); TLBMISS_HANDLER_SETUP_PGD(next->pgd); - - /* mark mmu ownership change */ - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); - - local_irq_restore(flags); -} - -/* - * If mm is currently active_mm, we can't really drop it. Instead, - * we will get a new one for it. - */ -static inline void -drop_mmu_context(struct mm_struct *mm, unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - - if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { - get_new_mmu_context(mm, cpu); - write_mmu_entryhi(cpu_asid(cpu, mm)); - } else { - /* will get a new context next time */ - cpu_context(cpu, mm) = 0; - } - - local_irq_restore(flags); + write_mmu_entryhi(next->context.asid.counter); } - #endif /* __ASM_CSKY_MMU_CONTEXT_H */ diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index dcea277c09ae..c429a6f347de 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -290,8 +290,6 @@ static inline pte_t *pte_offset(pmd_t *dir, unsigned long address) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void); -extern void show_jtlb_table(void); - void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *pte); |