From f163f0302ab69722c052519f4014814bf10026a9 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 8 Jun 2022 16:40:21 +0200 Subject: context_tracking: Rename context_tracking_user_enter/exit() to user_enter/exit_callable() context_tracking_user_enter() and context_tracking_user_exit() are ASM callable versions of user_enter() and user_exit() for architectures that didn't manage to check the context tracking static key from ASM. Change those function names to better reflect their purpose. [ frederic: Apply Max Filippov feedback. ] Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Joel Fernandes Cc: Boqun Feng Cc: Nicolas Saenz Julienne Cc: Marcelo Tosatti Cc: Xiongfeng Wang Cc: Yu Liao Cc: Phil Auld Cc: Paul Gortmaker Cc: Alex Belits Signed-off-by: Paul E. McKenney Reviewed-by: Nicolas Saenz Julienne Tested-by: Nicolas Saenz Julienne --- arch/arm/kernel/entry-header.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 5865621bf691..95def2b38d1c 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -369,10 +369,10 @@ ALT_UP_B(.L1_\@) #ifdef CONFIG_CONTEXT_TRACKING .if \save stmdb sp!, {r0-r3, ip, lr} - bl context_tracking_user_exit + bl user_exit_callable ldmia sp!, {r0-r3, ip, lr} .else - bl context_tracking_user_exit + bl user_exit_callable .endif #endif .endm @@ -381,10 +381,10 @@ ALT_UP_B(.L1_\@) #ifdef CONFIG_CONTEXT_TRACKING .if \save stmdb sp!, {r0-r3, ip, lr} - bl context_tracking_user_enter + bl user_enter_callable ldmia sp!, {r0-r3, ip, lr} .else - bl context_tracking_user_enter + bl user_enter_callable .endif #endif .endm -- cgit v1.2.3 From 24a9c54182b3758801b8ca6c8c237cc2ff654732 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 8 Jun 2022 16:40:24 +0200 Subject: context_tracking: Split user tracking Kconfig Context tracking is going to be used not only to track user transitions but also idle/IRQs/NMIs. The user tracking part will then become a separate feature. Prepare Kconfig for that. [ frederic: Apply Max Filippov feedback. ] Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Joel Fernandes Cc: Boqun Feng Cc: Nicolas Saenz Julienne Cc: Marcelo Tosatti Cc: Xiongfeng Wang Cc: Yu Liao Cc: Phil Auld Cc: Paul Gortmaker Cc: Alex Belits Signed-off-by: Paul E. McKenney Reviewed-by: Nicolas Saenz Julienne Tested-by: Nicolas Saenz Julienne --- .../time/context-tracking/arch-support.txt | 6 ++--- arch/Kconfig | 4 +-- arch/arm/Kconfig | 2 +- arch/arm/kernel/entry-common.S | 4 +-- arch/arm/kernel/entry-header.S | 4 +-- arch/arm64/Kconfig | 2 +- arch/csky/Kconfig | 2 +- arch/csky/kernel/entry.S | 4 +-- arch/loongarch/Kconfig | 2 +- arch/mips/Kconfig | 2 +- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/context_tracking.h | 2 +- arch/riscv/Kconfig | 2 +- arch/riscv/kernel/entry.S | 6 ++--- arch/sparc/Kconfig | 2 +- arch/sparc/kernel/rtrap_64.S | 2 +- arch/x86/Kconfig | 4 +-- arch/xtensa/Kconfig | 2 +- arch/xtensa/kernel/entry.S | 4 +-- include/linux/context_tracking.h | 12 ++++----- include/linux/context_tracking_state.h | 4 +-- init/Kconfig | 4 +-- kernel/context_tracking.c | 6 ++++- kernel/sched/core.c | 2 +- kernel/time/Kconfig | 31 ++++++++++++++-------- 25 files changed, 65 insertions(+), 52 deletions(-) (limited to 'arch/arm') diff --git a/Documentation/features/time/context-tracking/arch-support.txt b/Documentation/features/time/context-tracking/arch-support.txt index c9e0a16290e6..e59071a49090 100644 --- a/Documentation/features/time/context-tracking/arch-support.txt +++ b/Documentation/features/time/context-tracking/arch-support.txt @@ -1,7 +1,7 @@ # -# Feature name: context-tracking -# Kconfig: HAVE_CONTEXT_TRACKING -# description: arch supports context tracking for NO_HZ_FULL +# Feature name: user-context-tracking +# Kconfig: HAVE_CONTEXT_TRACKING_USER +# description: arch supports user context tracking for NO_HZ_FULL # ----------------------- | arch |status| diff --git a/arch/Kconfig b/arch/Kconfig index fcf9a41a4ef5..154b7b78da09 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -774,7 +774,7 @@ config HAVE_ARCH_WITHIN_STACK_FRAMES and similar) by implementing an inline arch_within_stack_frames(), which is used by CONFIG_HARDENED_USERCOPY. -config HAVE_CONTEXT_TRACKING +config HAVE_CONTEXT_TRACKING_USER bool help Provide kernel/user boundaries probes necessary for subsystems @@ -785,7 +785,7 @@ config HAVE_CONTEXT_TRACKING protected inside rcu_irq_enter/rcu_irq_exit() but preemption or signal handling on irq exit still need to be protected. -config HAVE_CONTEXT_TRACKING_OFFSTACK +config HAVE_CONTEXT_TRACKING_USER_OFFSTACK bool help Architecture neither relies on exception_enter()/exception_exit() diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 7630ba9cb6cc..9acc6aac5912 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -84,7 +84,7 @@ config ARM select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARM_LPAE select HAVE_ARM_SMCCC if CPU_V7 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 - select HAVE_CONTEXT_TRACKING + select HAVE_CONTEXT_TRACKING_USER select HAVE_C_RECORDMCOUNT select HAVE_BUILDTIME_MCOUNT_SORT select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 7aa3ded4af92..37a0125fc926 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -28,7 +28,7 @@ #include "entry-header.S" saved_psr .req r8 -#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) +#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER) saved_pc .req r9 #define TRACE(x...) x #else @@ -38,7 +38,7 @@ saved_pc .req lr .section .entry.text,"ax",%progbits .align 5 -#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING) || \ +#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING_USER) || \ IS_ENABLED(CONFIG_DEBUG_RSEQ)) /* * This is the fast syscall return path. We do as little as possible here, diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 95def2b38d1c..99411fa91350 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -366,7 +366,7 @@ ALT_UP_B(.L1_\@) * between user and kernel mode. */ .macro ct_user_exit, save = 1 -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER .if \save stmdb sp!, {r0-r3, ip, lr} bl user_exit_callable @@ -378,7 +378,7 @@ ALT_UP_B(.L1_\@) .endm .macro ct_user_enter, save = 1 -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER .if \save stmdb sp!, {r0-r3, ip, lr} bl user_enter_callable diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1652a9800ebe..7c5dd2af9ca9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -174,7 +174,7 @@ config ARM64 select HAVE_C_RECORDMCOUNT select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL - select HAVE_CONTEXT_TRACKING + select HAVE_CONTEXT_TRACKING_USER select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 21d72b078eef..f55ba1745f7b 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -42,7 +42,7 @@ config CSKY select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_SECCOMP_FILTER - select HAVE_CONTEXT_TRACKING + select HAVE_CONTEXT_TRACKING_USER select HAVE_VIRT_CPU_ACCOUNTING_GEN select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S index bc734d17c16f..547b4cd1b24b 100644 --- a/arch/csky/kernel/entry.S +++ b/arch/csky/kernel/entry.S @@ -19,7 +19,7 @@ .endm .macro context_tracking -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER mfcr a0, epsr btsti a0, 31 bt 1f @@ -159,7 +159,7 @@ ret_from_exception: and r10, r9 cmpnei r10, 0 bt exit_work -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER jbsr user_enter_callable #endif 1: diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 1920d52653b4..130dc65f3c85 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -76,7 +76,7 @@ config LOONGARCH select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ASM_MODVERSIONS - select HAVE_CONTEXT_TRACKING + select HAVE_CONTEXT_TRACKING_USER select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_STACKOVERFLOW select HAVE_DMA_CONTIGUOUS diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index db09d45d59ec..9457894db237 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -56,7 +56,7 @@ config MIPS select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES select HAVE_ASM_MODVERSIONS - select HAVE_CONTEXT_TRACKING + select HAVE_CONTEXT_TRACKING_USER select HAVE_TIF_NOHZ select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index c2ce2e60c8f0..874c8d81284a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -202,7 +202,7 @@ config PPC select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ASM_MODVERSIONS - select HAVE_CONTEXT_TRACKING if PPC64 + select HAVE_CONTEXT_TRACKING_USER if PPC64 select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h index f2682b28b050..4b63931c49e0 100644 --- a/arch/powerpc/include/asm/context_tracking.h +++ b/arch/powerpc/include/asm/context_tracking.h @@ -2,7 +2,7 @@ #ifndef _ASM_POWERPC_CONTEXT_TRACKING_H #define _ASM_POWERPC_CONTEXT_TRACKING_H -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER #define SCHEDULE_USER bl schedule_user #else #define SCHEDULE_USER bl schedule diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 32ffef9f6e5b..29b46f217345 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -86,7 +86,7 @@ config RISCV select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_VMAP_STACK if MMU && 64BIT select HAVE_ASM_MODVERSIONS - select HAVE_CONTEXT_TRACKING + select HAVE_CONTEXT_TRACKING_USER select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS if MMU select HAVE_EBPF_JIT if MMU diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 12f6bba57e33..b9eda3fcbd6d 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -111,7 +111,7 @@ _save_context: call __trace_hardirqs_off #endif -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER /* If previous state is in user mode, call user_exit_callable(). */ li a0, SR_PP and a0, s1, a0 @@ -176,7 +176,7 @@ handle_syscall: */ csrs CSR_STATUS, SR_IE #endif -#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) +#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER) /* Recover a0 - a7 for system calls */ REG_L a0, PT_A0(sp) REG_L a1, PT_A1(sp) @@ -269,7 +269,7 @@ resume_userspace: andi s1, s0, _TIF_WORK_MASK bnez s1, work_pending -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER call user_enter_callable #endif diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index ba449c47effd..9232411a8821 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -71,7 +71,7 @@ config SPARC64 select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_SYSCALL_TRACEPOINTS - select HAVE_CONTEXT_TRACKING + select HAVE_CONTEXT_TRACKING_USER select HAVE_TIF_NOHZ select HAVE_DEBUG_KMEMLEAK select IOMMU_HELPER diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index c5fd4b450d9b..eef102765a7e 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -15,7 +15,7 @@ #include #include -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER # define SCHEDULE_USER schedule_user #else # define SCHEDULE_USER schedule diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index be0b95e51df6..b0a6dbbb760b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -186,8 +186,8 @@ config X86 select HAVE_ASM_MODVERSIONS select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL - select HAVE_CONTEXT_TRACKING if X86_64 - select HAVE_CONTEXT_TRACKING_OFFSTACK if HAVE_CONTEXT_TRACKING + select HAVE_CONTEXT_TRACKING_USER if X86_64 + select HAVE_CONTEXT_TRACKING_USER_OFFSTACK if HAVE_CONTEXT_TRACKING_USER select HAVE_C_RECORDMCOUNT select HAVE_OBJTOOL_MCOUNT if HAVE_OBJTOOL select HAVE_BUILDTIME_MCOUNT_SORT diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 0b0f0172cced..7927fed7bc83 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -33,7 +33,7 @@ config XTENSA select HAVE_ARCH_KCSAN select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK - select HAVE_CONTEXT_TRACKING + select HAVE_CONTEXT_TRACKING_USER select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_EXIT_THREAD diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index d72bcafae90c..fb67d85116e4 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -455,7 +455,7 @@ KABI_W or a3, a3, a2 abi_call trace_hardirqs_off 1: #endif -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER l32i abi_tmp0, a1, PT_PS bbci.l abi_tmp0, PS_UM_BIT, 1f abi_call user_exit_callable @@ -544,7 +544,7 @@ common_exception_return: j .Lrestore_state .Lexit_tif_loop_user: -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER abi_call user_enter_callable #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 63259fece7c7..e35ae66b4794 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -10,7 +10,7 @@ #include -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER extern void ct_cpu_track_user(int cpu); /* Called with interrupts disabled. */ @@ -52,7 +52,7 @@ static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; - if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) || + if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) || !context_tracking_enabled()) return 0; @@ -65,7 +65,7 @@ static inline enum ctx_state exception_enter(void) static inline void exception_exit(enum ctx_state prev_ctx) { - if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) && + if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) && context_tracking_enabled()) { if (prev_ctx != CONTEXT_KERNEL) ct_user_enter(prev_ctx); @@ -109,14 +109,14 @@ static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } static __always_inline bool context_tracking_guest_enter(void) { return false; } static inline void context_tracking_guest_exit(void) { } -#endif /* !CONFIG_CONTEXT_TRACKING */ +#endif /* !CONFIG_CONTEXT_TRACKING_USER */ #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) -#ifdef CONFIG_CONTEXT_TRACKING_FORCE +#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE extern void context_tracking_init(void); #else static inline void context_tracking_init(void) { } -#endif /* CONFIG_CONTEXT_TRACKING_FORCE */ +#endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */ #endif diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index edc7b46376a6..2b46afe105a9 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -22,7 +22,7 @@ struct context_tracking { } state; }; -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER extern struct static_key_false context_tracking_key; DECLARE_PER_CPU(struct context_tracking, context_tracking); @@ -45,6 +45,6 @@ static inline bool context_tracking_enabled_this_cpu(void) static __always_inline bool context_tracking_enabled(void) { return false; } static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; } static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; } -#endif /* CONFIG_CONTEXT_TRACKING */ +#endif /* CONFIG_CONTEXT_TRACKING_USER */ #endif diff --git a/init/Kconfig b/init/Kconfig index c7900e8975f1..06454d19e2f0 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -494,11 +494,11 @@ config VIRT_CPU_ACCOUNTING_NATIVE config VIRT_CPU_ACCOUNTING_GEN bool "Full dynticks CPU time accounting" - depends on HAVE_CONTEXT_TRACKING + depends on HAVE_CONTEXT_TRACKING_USER depends on HAVE_VIRT_CPU_ACCOUNTING_GEN depends on GENERIC_CLOCKEVENTS select VIRT_CPU_ACCOUNTING - select CONTEXT_TRACKING + select CONTEXT_TRACKING_USER help Select this option to enable task and CPU time accounting on full dynticks systems. This accounting is implemented by watching every diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index d361bd52e4e1..f3dec1be2bf6 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -22,6 +22,8 @@ #include #include +#ifdef CONFIG_CONTEXT_TRACKING_USER + #define CREATE_TRACE_POINTS #include @@ -252,7 +254,7 @@ void __init ct_cpu_track_user(int cpu) initialized = true; } -#ifdef CONFIG_CONTEXT_TRACKING_FORCE +#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE void __init context_tracking_init(void) { int cpu; @@ -261,3 +263,5 @@ void __init context_tracking_init(void) ct_cpu_track_user(cpu); } #endif + +#endif /* #ifdef CONFIG_CONTEXT_TRACKING_USER */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index da0bf6fe9ecd..883167a57bf9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6559,7 +6559,7 @@ void __sched schedule_idle(void) } while (need_resched()); } -#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) +#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) asmlinkage __visible void __sched schedule_user(void) { /* diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 27b7868b5c30..41f99bcfe9e6 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -73,6 +73,9 @@ config TIME_KUNIT_TEST If unsure, say N. +config CONTEXT_TRACKING + bool + if GENERIC_CLOCKEVENTS menu "Timers subsystem" @@ -111,7 +114,7 @@ config NO_HZ_FULL # NO_HZ_COMMON dependency # We need at least one periodic CPU for timekeeping depends on SMP - depends on HAVE_CONTEXT_TRACKING + depends on HAVE_CONTEXT_TRACKING_USER # VIRT_CPU_ACCOUNTING_GEN dependency depends on HAVE_VIRT_CPU_ACCOUNTING_GEN select NO_HZ_COMMON @@ -137,31 +140,37 @@ config NO_HZ_FULL endchoice -config CONTEXT_TRACKING - bool +config CONTEXT_TRACKING_USER + bool + depends on HAVE_CONTEXT_TRACKING_USER + select CONTEXT_TRACKING + help + Track transitions between kernel and user on behalf of RCU and + tickless cputime accounting. The former case relies on context + tracking to enter/exit RCU extended quiescent states. -config CONTEXT_TRACKING_FORCE - bool "Force context tracking" - depends on CONTEXT_TRACKING +config CONTEXT_TRACKING_USER_FORCE + bool "Force user context tracking" + depends on CONTEXT_TRACKING_USER default y if !NO_HZ_FULL help The major pre-requirement for full dynticks to work is to - support the context tracking subsystem. But there are also + support the user context tracking subsystem. But there are also other dependencies to provide in order to make the full dynticks working. This option stands for testing when an arch implements the - context tracking backend but doesn't yet fulfill all the + user context tracking backend but doesn't yet fulfill all the requirements to make the full dynticks feature working. Without the full dynticks, there is no way to test the support - for context tracking and the subsystems that rely on it: RCU + for user context tracking and the subsystems that rely on it: RCU userspace extended quiescent state and tickless cputime accounting. This option copes with the absence of the full - dynticks subsystem by forcing the context tracking on all + dynticks subsystem by forcing the user context tracking on all CPUs in the system. Say Y only if you're working on the development of an - architecture backend for the context tracking. + architecture backend for the user context tracking. Say N otherwise, this option brings an overhead that you don't want in production. -- cgit v1.2.3 From e67198cc05b8ecbb7b8e2d8ef9fb5c8d26821873 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 8 Jun 2022 16:40:25 +0200 Subject: context_tracking: Take idle eqs entrypoints over RCU The RCU dynticks counter is going to be merged into the context tracking subsystem. Start with moving the idle extended quiescent states entrypoints to context tracking. For now those are dumb redirections to existing RCU calls. [ paulmck: Apply kernel test robot feedback. ] Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Joel Fernandes Cc: Boqun Feng Cc: Nicolas Saenz Julienne Cc: Marcelo Tosatti Cc: Xiongfeng Wang Cc: Yu Liao Cc: Phil Auld Cc: Paul Gortmaker Cc: Alex Belits Signed-off-by: Paul E. McKenney Reviewed-by: Nicolas Saenz Julienne Tested-by: Nicolas Saenz Julienne --- Documentation/RCU/stallwarn.rst | 4 ++-- arch/arm/mach-imx/cpuidle-imx6q.c | 5 +++-- drivers/acpi/processor_idle.c | 5 +++-- drivers/cpuidle/cpuidle.c | 9 +++++---- include/linux/context_tracking.h | 8 ++++++++ include/linux/rcupdate.h | 2 +- kernel/context_tracking.c | 15 +++++++++++++++ kernel/locking/lockdep.c | 2 +- kernel/rcu/Kconfig | 2 ++ kernel/rcu/tree.c | 2 -- kernel/rcu/update.c | 2 +- kernel/sched/idle.c | 10 +++++----- kernel/sched/sched.h | 1 + kernel/time/Kconfig | 6 ++++++ 14 files changed, 53 insertions(+), 20 deletions(-) (limited to 'arch/arm') diff --git a/Documentation/RCU/stallwarn.rst b/Documentation/RCU/stallwarn.rst index 794837eb519b..b95bda7755fa 100644 --- a/Documentation/RCU/stallwarn.rst +++ b/Documentation/RCU/stallwarn.rst @@ -97,8 +97,8 @@ warnings: which will include additional debugging information. - A low-level kernel issue that either fails to invoke one of the - variants of rcu_user_enter(), rcu_user_exit(), rcu_idle_enter(), - rcu_idle_exit(), rcu_irq_enter(), or rcu_irq_exit() on the one + variants of rcu_user_enter(), rcu_user_exit(), ct_idle_enter(), + ct_idle_exit(), rcu_irq_enter(), or rcu_irq_exit() on the one hand, or that invokes one of them too many times on the other. Historically, the most frequent issue has been an omission of either irq_enter() or irq_exit(), which in turn invoke diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c index 094337dc1bc7..d086cbae09c3 100644 --- a/arch/arm/mach-imx/cpuidle-imx6q.c +++ b/arch/arm/mach-imx/cpuidle-imx6q.c @@ -3,6 +3,7 @@ * Copyright (C) 2012 Freescale Semiconductor, Inc. */ +#include #include #include #include @@ -24,9 +25,9 @@ static int imx6q_enter_wait(struct cpuidle_device *dev, imx6_set_lpm(WAIT_UNCLOCKED); raw_spin_unlock(&cpuidle_lock); - rcu_idle_enter(); + ct_idle_enter(); cpu_do_idle(); - rcu_idle_exit(); + ct_idle_exit(); raw_spin_lock(&cpuidle_lock); if (num_idle_cpus-- == num_online_cpus()) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 6a5572a1a80c..1401d193a2df 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -23,6 +23,7 @@ #include #include #include +#include /* * Include the apic definitions for x86 to have the APIC timer related defines @@ -647,11 +648,11 @@ static int acpi_idle_enter_bm(struct cpuidle_driver *drv, raw_spin_unlock(&c3_lock); } - rcu_idle_enter(); + ct_idle_enter(); acpi_idle_do_entry(cx); - rcu_idle_exit(); + ct_idle_exit(); /* Re-enable bus master arbitration */ if (dis_bm) { diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index ef2ea1b12cd8..62dd956025f3 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "cpuidle.h" @@ -150,12 +151,12 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, */ stop_critical_timings(); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) - rcu_idle_enter(); + ct_idle_enter(); target_state->enter_s2idle(dev, drv, index); if (WARN_ON_ONCE(!irqs_disabled())) local_irq_disable(); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) - rcu_idle_exit(); + ct_idle_exit(); tick_unfreeze(); start_critical_timings(); @@ -233,10 +234,10 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, stop_critical_timings(); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) - rcu_idle_enter(); + ct_idle_enter(); entered_state = target_state->enter(dev, drv, index); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) - rcu_idle_exit(); + ct_idle_exit(); start_critical_timings(); sched_clock_idle_wakeup_event(); diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index e35ae66b4794..01abadb2f993 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -119,4 +119,12 @@ extern void context_tracking_init(void); static inline void context_tracking_init(void) { } #endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */ +#ifdef CONFIG_CONTEXT_TRACKING_IDLE +extern void ct_idle_enter(void); +extern void ct_idle_exit(void); +#else +static inline void ct_idle_enter(void) { } +static inline void ct_idle_exit(void) { } +#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */ + #endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 1a32036c918c..6ebe754501c3 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -128,7 +128,7 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { } * @a: Code that RCU needs to pay attention to. * * RCU read-side critical sections are forbidden in the inner idle loop, - * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU + * that is, between the ct_idle_enter() and the ct_idle_exit() -- RCU * will happily ignore any such read-side critical sections. However, * things like powertop need tracepoints in the inner idle loop. * diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index f3dec1be2bf6..c0b3798d4e94 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -22,6 +22,21 @@ #include #include + +#ifdef CONFIG_CONTEXT_TRACKING_IDLE +noinstr void ct_idle_enter(void) +{ + rcu_idle_enter(); +} +EXPORT_SYMBOL_GPL(ct_idle_enter); + +void ct_idle_exit(void) +{ + rcu_idle_exit(); +} +EXPORT_SYMBOL_GPL(ct_idle_exit); +#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */ + #ifdef CONFIG_CONTEXT_TRACKING_USER #define CREATE_TRACE_POINTS diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index f06b91ca6482..5ea690cb4f7a 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -6570,7 +6570,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) /* * If a CPU is in the RCU-free window in idle (ie: in the section - * between rcu_idle_enter() and rcu_idle_exit(), then RCU + * between ct_idle_enter() and ct_idle_exit(), then RCU * considers that CPU to be in an "extended quiescent state", * which means that RCU will be completely ignoring that CPU. * Therefore, rcu_read_lock() and friends have absolutely no diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index 1c630e573548..3fa24e63d6f9 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -8,6 +8,8 @@ menu "RCU Subsystem" config TREE_RCU bool default y if SMP + # Dynticks-idle tracking + select CONTEXT_TRACKING_IDLE help This option selects the RCU implementation that is designed for very large SMP system with hundreds or diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9a5edab5558c..051fed0844b6 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -664,7 +664,6 @@ void noinstr rcu_idle_enter(void) WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled()); rcu_eqs_enter(false); } -EXPORT_SYMBOL_GPL(rcu_idle_enter); #ifdef CONFIG_NO_HZ_FULL @@ -904,7 +903,6 @@ void noinstr rcu_idle_exit(void) rcu_eqs_exit(false); raw_local_irq_restore(flags); } -EXPORT_SYMBOL_GPL(rcu_idle_exit); #ifdef CONFIG_NO_HZ_FULL /** diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index fc7fef575606..147214b2cd68 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -85,7 +85,7 @@ module_param(rcu_normal_after_boot, int, 0444); * and while lockdep is disabled. * * Note that if the CPU is in the idle loop from an RCU point of view (ie: - * that we are in the section between rcu_idle_enter() and rcu_idle_exit()) + * that we are in the section between ct_idle_enter() and ct_idle_exit()) * then rcu_read_lock_held() sets ``*ret`` to false even if the CPU did an * rcu_read_lock(). The reason for this is that RCU ignores CPUs that are * in such a section, considering these as in extended quiescent state, diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 328cccbee444..f26ab2675f7d 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -53,14 +53,14 @@ static noinline int __cpuidle cpu_idle_poll(void) { trace_cpu_idle(0, smp_processor_id()); stop_critical_timings(); - rcu_idle_enter(); + ct_idle_enter(); local_irq_enable(); while (!tif_need_resched() && (cpu_idle_force_poll || tick_check_broadcast_expired())) cpu_relax(); - rcu_idle_exit(); + ct_idle_exit(); start_critical_timings(); trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); @@ -98,12 +98,12 @@ void __cpuidle default_idle_call(void) * * Trace IRQs enable here, then switch off RCU, and have * arch_cpu_idle() use raw_local_irq_enable(). Note that - * rcu_idle_enter() relies on lockdep IRQ state, so switch that + * ct_idle_enter() relies on lockdep IRQ state, so switch that * last -- this is very similar to the entry code. */ trace_hardirqs_on_prepare(); lockdep_hardirqs_on_prepare(); - rcu_idle_enter(); + ct_idle_enter(); lockdep_hardirqs_on(_THIS_IP_); arch_cpu_idle(); @@ -116,7 +116,7 @@ void __cpuidle default_idle_call(void) */ raw_local_irq_disable(); lockdep_hardirqs_off(_THIS_IP_); - rcu_idle_exit(); + ct_idle_exit(); lockdep_hardirqs_on(_THIS_IP_); raw_local_irq_enable(); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 47b89a0fc6e5..0cfe2d0af294 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 41f99bcfe9e6..a41753be1a2b 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -76,6 +76,12 @@ config TIME_KUNIT_TEST config CONTEXT_TRACKING bool +config CONTEXT_TRACKING_IDLE + bool + select CONTEXT_TRACKING + help + Tracks idle state on behalf of RCU. + if GENERIC_CLOCKEVENTS menu "Timers subsystem" -- cgit v1.2.3