diff options
author | Linus Torvalds | 2023-06-27 14:03:21 -0700 |
---|---|---|
committer | Linus Torvalds | 2023-06-27 14:03:21 -0700 |
commit | ed3b7923a816ded62dccef377c9ee346c7d3b1b4 (patch) | |
tree | 41d46fc399c231088a370e7b3e488a93342fa681 /drivers/clocksource | |
parent | e8f75c0270d930ef675fee22d74d1a3250e96962 (diff) | |
parent | ebb83d84e49b54369b0db67136a5fe1087124dcc (diff) |
Merge tag 'sched-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"Scheduler SMP load-balancer improvements:
- Avoid unnecessary migrations within SMT domains on hybrid systems.
Problem:
On hybrid CPU systems, (processors with a mixture of
higher-frequency SMT cores and lower-frequency non-SMT cores),
under the old code lower-priority CPUs pulled tasks from the
higher-priority cores if more than one SMT sibling was busy -
resulting in many unnecessary task migrations.
Solution:
The new code improves the load balancer to recognize SMT cores
with more than one busy sibling and allows lower-priority CPUs
to pull tasks, which avoids superfluous migrations and lets
lower-priority cores inspect all SMT siblings for the busiest
queue.
- Implement the 'runnable boosting' feature in the EAS balancer:
consider CPU contention in frequency, EAS max util & load-balance
busiest CPU selection.
This improves CPU utilization for certain workloads, while leaves
other key workloads unchanged.
Scheduler infrastructure improvements:
- Rewrite the scheduler topology setup code by consolidating it into
the build_sched_topology() helper function and building it
dynamically on the fly.
- Resolve the local_clock() vs. noinstr complications by rewriting
the code: provide separate sched_clock_noinstr() and
local_clock_noinstr() functions to be used in instrumentation code,
and make sure it is all instrumentation-safe.
Fixes:
- Fix a kthread_park() race with wait_woken()
- Fix misc wait_task_inactive() bugs unearthed by the -rt merge:
- Fix UP PREEMPT bug by unifying the SMP and UP implementations
- Fix task_struct::saved_state handling
- Fix various rq clock update bugs, unearthed by turning on the rq
clock debugging code.
- Fix the PSI WINDOW_MIN_US trigger limit, which was easy to trigger
by creating enough cgroups, by removing the warnign and restricting
window size triggers to PSI file write-permission or
CAP_SYS_RESOURCE.
- Propagate SMT flags in the topology when removing degenerate domain
- Fix grub_reclaim() calculation bug in the deadline scheduler code
- Avoid resetting the min update period when it is unnecessary, in
psi_trigger_destroy().
- Don't balance a task to its current running CPU in load_balance(),
which was possible on certain NUMA topologies with overlapping
groups.
- Fix the sched-debug printing of rq->nr_uninterruptible
Cleanups:
- Address various -Wmissing-prototype warnings, as a preparation to
(maybe) enable this warning in the future.
- Remove unused code
- Mark more functions __init
- Fix shadow-variable warnings"
* tag 'sched-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (50 commits)
sched/core: Avoid multiple calling update_rq_clock() in __cfsb_csd_unthrottle()
sched/core: Avoid double calling update_rq_clock() in __balance_push_cpu_stop()
sched/core: Fixed missing rq clock update before calling set_rq_offline()
sched/deadline: Update GRUB description in the documentation
sched/deadline: Fix bandwidth reclaim equation in GRUB
sched/wait: Fix a kthread_park race with wait_woken()
sched/topology: Mark set_sched_topology() __init
sched/fair: Rename variable cpu_util eff_util
arm64/arch_timer: Fix MMIO byteswap
sched/fair, cpufreq: Introduce 'runnable boosting'
sched/fair: Refactor CPU utilization functions
cpuidle: Use local_clock_noinstr()
sched/clock: Provide local_clock_noinstr()
x86/tsc: Provide sched_clock_noinstr()
clocksource: hyper-v: Provide noinstr sched_clock()
clocksource: hyper-v: Adjust hv_read_tsc_page_tsc() to avoid special casing U64_MAX
x86/vdso: Fix gettimeofday masking
math64: Always inline u128 version of mul_u64_u64_shr()
s390/time: Provide sched_clock_noinstr()
loongarch: Provide noinstr sched_clock_read()
...
Diffstat (limited to 'drivers/clocksource')
-rw-r--r-- | drivers/clocksource/arm_arch_timer.c | 54 | ||||
-rw-r--r-- | drivers/clocksource/hyperv_timer.c | 42 |
2 files changed, 66 insertions, 30 deletions
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index e09d4427f604..e733a2a1927a 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -191,22 +191,40 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, return val; } -static notrace u64 arch_counter_get_cntpct_stable(void) +static noinstr u64 raw_counter_get_cntpct_stable(void) { return __arch_counter_get_cntpct_stable(); } -static notrace u64 arch_counter_get_cntpct(void) +static notrace u64 arch_counter_get_cntpct_stable(void) +{ + u64 val; + preempt_disable_notrace(); + val = __arch_counter_get_cntpct_stable(); + preempt_enable_notrace(); + return val; +} + +static noinstr u64 arch_counter_get_cntpct(void) { return __arch_counter_get_cntpct(); } -static notrace u64 arch_counter_get_cntvct_stable(void) +static noinstr u64 raw_counter_get_cntvct_stable(void) { return __arch_counter_get_cntvct_stable(); } -static notrace u64 arch_counter_get_cntvct(void) +static notrace u64 arch_counter_get_cntvct_stable(void) +{ + u64 val; + preempt_disable_notrace(); + val = __arch_counter_get_cntvct_stable(); + preempt_enable_notrace(); + return val; +} + +static noinstr u64 arch_counter_get_cntvct(void) { return __arch_counter_get_cntvct(); } @@ -753,14 +771,14 @@ static int arch_timer_set_next_event_phys(unsigned long evt, return 0; } -static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo) +static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo) { u32 cnt_lo, cnt_hi, tmp_hi; do { - cnt_hi = readl_relaxed(t->base + offset_lo + 4); - cnt_lo = readl_relaxed(t->base + offset_lo); - tmp_hi = readl_relaxed(t->base + offset_lo + 4); + cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4)); + cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo)); + tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4)); } while (cnt_hi != tmp_hi); return ((u64) cnt_hi << 32) | cnt_lo; @@ -1060,7 +1078,7 @@ bool arch_timer_evtstrm_available(void) return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available); } -static u64 arch_counter_get_cntvct_mem(void) +static noinstr u64 arch_counter_get_cntvct_mem(void) { return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO); } @@ -1074,6 +1092,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void) static void __init arch_counter_register(unsigned type) { + u64 (*scr)(void); u64 start_count; int width; @@ -1083,21 +1102,28 @@ static void __init arch_counter_register(unsigned type) if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) { - if (arch_timer_counter_has_wa()) + if (arch_timer_counter_has_wa()) { rd = arch_counter_get_cntvct_stable; - else + scr = raw_counter_get_cntvct_stable; + } else { rd = arch_counter_get_cntvct; + scr = arch_counter_get_cntvct; + } } else { - if (arch_timer_counter_has_wa()) + if (arch_timer_counter_has_wa()) { rd = arch_counter_get_cntpct_stable; - else + scr = raw_counter_get_cntpct_stable; + } else { rd = arch_counter_get_cntpct; + scr = arch_counter_get_cntpct; + } } arch_timer_read_counter = rd; clocksource_counter.vdso_clock_mode = vdso_default; } else { arch_timer_read_counter = arch_counter_get_cntvct_mem; + scr = arch_counter_get_cntvct_mem; } width = arch_counter_get_width(); @@ -1113,7 +1139,7 @@ static void __init arch_counter_register(unsigned type) timecounter_init(&arch_timer_kvm_info.timecounter, &cyclecounter, start_count); - sched_clock_register(arch_timer_read_counter, width, arch_timer_rate); + sched_clock_register(scr, width, arch_timer_rate); } static void arch_timer_stop(struct clock_event_device *clk) diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index 9fc008c16636..e56307a81f4d 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -365,6 +365,20 @@ void hv_stimer_global_cleanup(void) } EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup); +static __always_inline u64 read_hv_clock_msr(void) +{ + /* + * Read the partition counter to get the current tick count. This count + * is set to 0 when the partition is created and is incremented in 100 + * nanosecond units. + * + * Use hv_raw_get_register() because this function is used from + * noinstr. Notable; while HV_REGISTER_TIME_REF_COUNT is a synthetic + * register it doesn't need the GHCB path. + */ + return hv_raw_get_register(HV_REGISTER_TIME_REF_COUNT); +} + /* * Code and definitions for the Hyper-V clocksources. Two * clocksources are defined: one that reads the Hyper-V defined MSR, and @@ -393,14 +407,20 @@ struct ms_hyperv_tsc_page *hv_get_tsc_page(void) } EXPORT_SYMBOL_GPL(hv_get_tsc_page); -static u64 notrace read_hv_clock_tsc(void) +static __always_inline u64 read_hv_clock_tsc(void) { - u64 current_tick = hv_read_tsc_page(hv_get_tsc_page()); + u64 cur_tsc, time; - if (current_tick == U64_MAX) - current_tick = hv_get_register(HV_REGISTER_TIME_REF_COUNT); + /* + * The Hyper-V Top-Level Function Spec (TLFS), section Timers, + * subsection Refererence Counter, guarantees that the TSC and MSR + * times are in sync and monotonic. Therefore we can fall back + * to the MSR in case the TSC page indicates unavailability. + */ + if (!hv_read_tsc_page_tsc(tsc_page, &cur_tsc, &time)) + time = read_hv_clock_msr(); - return current_tick; + return time; } static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg) @@ -408,7 +428,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg) return read_hv_clock_tsc(); } -static u64 notrace read_hv_sched_clock_tsc(void) +static u64 noinstr read_hv_sched_clock_tsc(void) { return (read_hv_clock_tsc() - hv_sched_clock_offset) * (NSEC_PER_SEC / HV_CLOCK_HZ); @@ -460,16 +480,6 @@ static struct clocksource hyperv_cs_tsc = { #endif }; -static u64 notrace read_hv_clock_msr(void) -{ - /* - * Read the partition counter to get the current tick count. This count - * is set to 0 when the partition is created and is incremented in - * 100 nanosecond units. - */ - return hv_get_register(HV_REGISTER_TIME_REF_COUNT); -} - static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg) { return read_hv_clock_msr(); |