diff options
author | Ingo Molnar | 2021-03-06 13:00:58 +0100 |
---|---|---|
committer | Ingo Molnar | 2021-03-06 13:00:58 +0100 |
commit | a500fc918f7b8dc3dff2e6c74f3e73e856c18248 (patch) | |
tree | 9f580b803731bec6b598ec280f56abc6b13e1a97 /kernel/smp.c | |
parent | d43f17a1da25373580ebb466de7d0641acbf6fd6 (diff) | |
parent | bdb1050ee1faaec1e78c15de8b1959176f26c655 (diff) |
Merge branch 'locking/core' into x86/mm, to resolve conflict
There's a non-trivial conflict between the parallel TLB flush
framework and the IPI flush debugging code - merge them
manually.
Conflicts:
kernel/smp.c
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 274 |
1 files changed, 263 insertions, 11 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index af0d51da84a2..e21074900006 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -24,14 +24,70 @@ #include <linux/sched/clock.h> #include <linux/nmi.h> #include <linux/sched/debug.h> +#include <linux/jump_label.h> #include "smpboot.h" #include "sched/smp.h" #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK) +#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG +union cfd_seq_cnt { + u64 val; + struct { + u64 src:16; + u64 dst:16; +#define CFD_SEQ_NOCPU 0xffff + u64 type:4; +#define CFD_SEQ_QUEUE 0 +#define CFD_SEQ_IPI 1 +#define CFD_SEQ_NOIPI 2 +#define CFD_SEQ_PING 3 +#define CFD_SEQ_PINGED 4 +#define CFD_SEQ_HANDLE 5 +#define CFD_SEQ_DEQUEUE 6 +#define CFD_SEQ_IDLE 7 +#define CFD_SEQ_GOTIPI 8 +#define CFD_SEQ_HDLEND 9 + u64 cnt:28; + } u; +}; + +static char *seq_type[] = { + [CFD_SEQ_QUEUE] = "queue", + [CFD_SEQ_IPI] = "ipi", + [CFD_SEQ_NOIPI] = "noipi", + [CFD_SEQ_PING] = "ping", + [CFD_SEQ_PINGED] = "pinged", + [CFD_SEQ_HANDLE] = "handle", + [CFD_SEQ_DEQUEUE] = "dequeue (src CPU 0 == empty)", + [CFD_SEQ_IDLE] = "idle", + [CFD_SEQ_GOTIPI] = "gotipi", + [CFD_SEQ_HDLEND] = "hdlend (src CPU 0 == early)", +}; + +struct cfd_seq_local { + u64 ping; + u64 pinged; + u64 handle; + u64 dequeue; + u64 idle; + u64 gotipi; + u64 hdlend; +}; +#endif + +struct cfd_percpu { + call_single_data_t csd; +#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG + u64 seq_queue; + u64 seq_ipi; + u64 seq_noipi; +#endif +}; + struct call_function_data { - call_single_data_t __percpu *csd; + struct cfd_percpu __percpu *pcpu; cpumask_var_t cpumask; cpumask_var_t cpumask_ipi; }; @@ -54,8 +110,8 @@ int smpcfd_prepare_cpu(unsigned int cpu) free_cpumask_var(cfd->cpumask); return -ENOMEM; } - cfd->csd = alloc_percpu(call_single_data_t); - if (!cfd->csd) { + cfd->pcpu = alloc_percpu(struct cfd_percpu); + if (!cfd->pcpu) { free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); return -ENOMEM; @@ -70,7 +126,7 @@ int smpcfd_dead_cpu(unsigned int cpu) free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); - free_percpu(cfd->csd); + free_percpu(cfd->pcpu); return 0; } @@ -102,15 +158,60 @@ void __init call_function_init(void) #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG +static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled); +static DEFINE_STATIC_KEY_FALSE(csdlock_debug_extended); + +static int __init csdlock_debug(char *str) +{ + unsigned int val = 0; + + if (str && !strcmp(str, "ext")) { + val = 1; + static_branch_enable(&csdlock_debug_extended); + } else + get_option(&str, &val); + + if (val) + static_branch_enable(&csdlock_debug_enabled); + + return 0; +} +early_param("csdlock_debug", csdlock_debug); + static DEFINE_PER_CPU(call_single_data_t *, cur_csd); static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func); static DEFINE_PER_CPU(void *, cur_csd_info); +static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local); #define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC) static atomic_t csd_bug_count = ATOMIC_INIT(0); +static u64 cfd_seq; + +#define CFD_SEQ(s, d, t, c) \ + (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c } + +static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type) +{ + union cfd_seq_cnt new, old; + + new = CFD_SEQ(src, dst, type, 0); + + do { + old.val = READ_ONCE(cfd_seq); + new.u.cnt = old.u.cnt + 1; + } while (cmpxchg(&cfd_seq, old.val, new.val) != old.val); + + return old.val; +} + +#define cfd_seq_store(var, src, dst, type) \ + do { \ + if (static_branch_unlikely(&csdlock_debug_extended)) \ + var = cfd_seq_inc(src, dst, type); \ + } while (0) /* Record current CSD work for current CPU, NULL to erase. */ -static void csd_lock_record(call_single_data_t *csd) +static void __csd_lock_record(call_single_data_t *csd) { if (!csd) { smp_mb(); /* NULL cur_csd after unlock. */ @@ -125,7 +226,13 @@ static void csd_lock_record(call_single_data_t *csd) /* Or before unlock, as the case may be. */ } -static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd) +static __always_inline void csd_lock_record(call_single_data_t *csd) +{ + if (static_branch_unlikely(&csdlock_debug_enabled)) + __csd_lock_record(csd); +} + +static int csd_lock_wait_getcpu(call_single_data_t *csd) { unsigned int csd_type; @@ -135,12 +242,86 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd) return -1; } +static void cfd_seq_data_add(u64 val, unsigned int src, unsigned int dst, + unsigned int type, union cfd_seq_cnt *data, + unsigned int *n_data, unsigned int now) +{ + union cfd_seq_cnt new[2]; + unsigned int i, j, k; + + new[0].val = val; + new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1); + + for (i = 0; i < 2; i++) { + if (new[i].u.cnt <= now) + new[i].u.cnt |= 0x80000000U; + for (j = 0; j < *n_data; j++) { + if (new[i].u.cnt == data[j].u.cnt) { + /* Direct read value trumps generated one. */ + if (i == 0) + data[j].val = new[i].val; + break; + } + if (new[i].u.cnt < data[j].u.cnt) { + for (k = *n_data; k > j; k--) + data[k].val = data[k - 1].val; + data[j].val = new[i].val; + (*n_data)++; + break; + } + } + if (j == *n_data) { + data[j].val = new[i].val; + (*n_data)++; + } + } +} + +static const char *csd_lock_get_type(unsigned int type) +{ + return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type]; +} + +static void csd_lock_print_extended(call_single_data_t *csd, int cpu) +{ + struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu); + unsigned int srccpu = csd->node.src; + struct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu); + struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); + unsigned int now; + union cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)]; + unsigned int n_data = 0, i; + + data[0].val = READ_ONCE(cfd_seq); + now = data[0].u.cnt; + + cfd_seq_data_add(pcpu->seq_queue, srccpu, cpu, CFD_SEQ_QUEUE, data, &n_data, now); + cfd_seq_data_add(pcpu->seq_ipi, srccpu, cpu, CFD_SEQ_IPI, data, &n_data, now); + cfd_seq_data_add(pcpu->seq_noipi, srccpu, cpu, CFD_SEQ_NOIPI, data, &n_data, now); + + cfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING, data, &n_data, now); + cfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now); + + cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now); + cfd_seq_data_add(seq->gotipi, CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI, data, &n_data, now); + cfd_seq_data_add(seq->handle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE, data, &n_data, now); + cfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now); + cfd_seq_data_add(seq->hdlend, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND, data, &n_data, now); + + for (i = 0; i < n_data; i++) { + pr_alert("\tcsd: cnt(%07x): %04x->%04x %s\n", + data[i].u.cnt & ~0x80000000U, data[i].u.src, + data[i].u.dst, csd_lock_get_type(data[i].u.type)); + } + pr_alert("\tcsd: cnt now: %07x\n", now); +} + /* * Complain if too much time spent waiting. Note that only * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU, * so waiting on other types gets much less information. */ -static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id) +static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id) { int cpu = -1; int cpux; @@ -184,6 +365,8 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request"); } if (cpu >= 0) { + if (static_branch_unlikely(&csdlock_debug_extended)) + csd_lock_print_extended(csd, cpu); if (!trigger_single_cpu_backtrace(cpu)) dump_cpu_task(cpu); if (!cpu_cur_csd) { @@ -204,7 +387,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. */ -static __always_inline void csd_lock_wait(call_single_data_t *csd) +static void __csd_lock_wait(call_single_data_t *csd) { int bug_id = 0; u64 ts0, ts1; @@ -218,7 +401,36 @@ static __always_inline void csd_lock_wait(call_single_data_t *csd) smp_acquire__after_ctrl_dep(); } +static __always_inline void csd_lock_wait(call_single_data_t *csd) +{ + if (static_branch_unlikely(&csdlock_debug_enabled)) { + __csd_lock_wait(csd); + return; + } + + smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); +} + +static void __smp_call_single_queue_debug(int cpu, struct llist_node *node) +{ + unsigned int this_cpu = smp_processor_id(); + struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local); + struct call_function_data *cfd = this_cpu_ptr(&cfd_data); + struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); + + cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE); + if (llist_add(node, &per_cpu(call_single_queue, cpu))) { + cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI); + cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING); + send_call_function_single_ipi(cpu); + cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED); + } else { + cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI); + } +} #else +#define cfd_seq_store(var, src, dst, type) + static void csd_lock_record(call_single_data_t *csd) { } @@ -256,6 +468,19 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); void __smp_call_single_queue(int cpu, struct llist_node *node) { +#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG + if (static_branch_unlikely(&csdlock_debug_extended)) { + unsigned int type; + + type = CSD_TYPE(container_of(node, call_single_data_t, + node.llist)); + if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) { + __smp_call_single_queue_debug(cpu, node); + return; + } + } +#endif + /* * The list addition should be visible before sending the IPI * handler locks the list to pull the entry off it because of @@ -314,6 +539,8 @@ static int generic_exec_single(int cpu, call_single_data_t *csd) */ void generic_smp_call_function_single_interrupt(void) { + cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU, + smp_processor_id(), CFD_SEQ_GOTIPI); flush_smp_call_function_queue(true); } @@ -341,7 +568,13 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) lockdep_assert_irqs_disabled(); head = this_cpu_ptr(&call_single_queue); + cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->handle, CFD_SEQ_NOCPU, + smp_processor_id(), CFD_SEQ_HANDLE); entry = llist_del_all(head); + cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->dequeue, + /* Special meaning of source cpu: 0 == queue empty */ + entry ? CFD_SEQ_NOCPU : 0, + smp_processor_id(), CFD_SEQ_DEQUEUE); entry = llist_reverse_order(entry); /* There shouldn't be any pending callbacks on an offline CPU. */ @@ -400,8 +633,12 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) } } - if (!entry) + if (!entry) { + cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, + 0, smp_processor_id(), + CFD_SEQ_HDLEND); return; + } /* * Second; run all !SYNC callbacks. @@ -439,6 +676,9 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) */ if (entry) sched_ttwu_pending(entry); + + cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, CFD_SEQ_NOCPU, + smp_processor_id(), CFD_SEQ_HDLEND); } void flush_smp_call_function_from_idle(void) @@ -448,6 +688,8 @@ void flush_smp_call_function_from_idle(void) if (llist_empty(this_cpu_ptr(&call_single_queue))) return; + cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU, + smp_processor_id(), CFD_SEQ_IDLE); local_irq_save(flags); flush_smp_call_function_queue(true); if (local_softirq_pending()) @@ -667,7 +909,8 @@ static void smp_call_function_many_cond(const struct cpumask *mask, cpumask_clear(cfd->cpumask_ipi); for_each_cpu(cpu, cfd->cpumask) { - call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); + struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); + call_single_data_t *csd = &pcpu->csd; if (cond_func && !cond_func(cpu, info)) continue; @@ -681,13 +924,20 @@ static void smp_call_function_many_cond(const struct cpumask *mask, csd->node.src = smp_processor_id(); csd->node.dst = cpu; #endif + cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE); if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) { __cpumask_set_cpu(cpu, cfd->cpumask_ipi); nr_cpus++; last_cpu = cpu; + + cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI); + } else { + cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI); } } + cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING); + /* * Choose the most efficient way to send an IPI. Note that the * number of CPUs might be zero due to concurrent changes to the @@ -697,6 +947,8 @@ static void smp_call_function_many_cond(const struct cpumask *mask, send_call_function_single_ipi(last_cpu); else if (likely(nr_cpus > 1)) arch_send_call_function_ipi_mask(cfd->cpumask_ipi); + + cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED); } if (run_local && (!cond_func || cond_func(this_cpu, info))) { @@ -711,7 +963,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask, for_each_cpu(cpu, cfd->cpumask) { call_single_data_t *csd; - csd = per_cpu_ptr(cfd->csd, cpu); + csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd; csd_lock_wait(csd); } } |