diff options
author | Bart Van Assche | 2019-04-03 16:35:52 -0700 |
---|---|---|
committer | Ingo Molnar | 2019-04-10 13:45:59 +0200 |
commit | 90c1cba2b3b3851c151229f61801919b2904d437 (patch) | |
tree | 57e97c7d41521ec536fa3d546e4e7e1639aa1520 /kernel | |
parent | 771acc7e4a6e5dba779cb1a7fd851a164bc81033 (diff) |
locking/lockdep: Zap lock classes even with lock debugging disabled
The following commit:
a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use")
changed the behavior of lockdep_free_key_range() from
unconditionally zapping lock classes into only zapping lock classes if
debug_lock == true. Not zapping lock classes if debug_lock == false leaves
dangling pointers in several lockdep datastructures, e.g. lock_class::name
in the all_lock_classes list.
The shell command "cat /proc/lockdep" causes the kernel to iterate the
all_lock_classes list. Hence the "unable to handle kernel paging request" cash
that Shenghui encountered by running cat /proc/lockdep.
Since the new behavior can cause cat /proc/lockdep to crash, restore the
pre-v5.1 behavior.
This patch avoids that cat /proc/lockdep triggers the following crash
with debug_lock == false:
BUG: unable to handle kernel paging request at fffffbfff40ca448
RIP: 0010:__asan_load1+0x28/0x50
Call Trace:
string+0xac/0x180
vsnprintf+0x23e/0x820
seq_vprintf+0x82/0xc0
seq_printf+0x92/0xb0
print_name+0x34/0xb0
l_show+0x184/0x200
seq_read+0x59e/0x6c0
proc_reg_read+0x11f/0x170
__vfs_read+0x4d/0x90
vfs_read+0xc5/0x1f0
ksys_read+0xab/0x130
__x64_sys_read+0x43/0x50
do_syscall_64+0x71/0x210
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Reported-by: shenghui <shhuiw@foxmail.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use") # v5.1-rc1.
Link: https://lkml.kernel.org/r/20190403233552.124673-1-bvanassche@acm.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/locking/lockdep.c | 29 |
1 files changed, 12 insertions, 17 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 34cdcbedda49..e16766ff184b 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4689,8 +4689,8 @@ static void free_zapped_rcu(struct rcu_head *ch) return; raw_local_irq_save(flags); - if (!graph_lock()) - goto out_irq; + arch_spin_lock(&lockdep_lock); + current->lockdep_recursion = 1; /* closed head */ pf = delayed_free.pf + (delayed_free.index ^ 1); @@ -4702,8 +4702,8 @@ static void free_zapped_rcu(struct rcu_head *ch) */ call_rcu_zapped(delayed_free.pf + delayed_free.index); - graph_unlock(); -out_irq: + current->lockdep_recursion = 0; + arch_spin_unlock(&lockdep_lock); raw_local_irq_restore(flags); } @@ -4744,21 +4744,17 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size) { struct pending_free *pf; unsigned long flags; - int locked; init_data_structures_once(); raw_local_irq_save(flags); - locked = graph_lock(); - if (!locked) - goto out_irq; - + arch_spin_lock(&lockdep_lock); + current->lockdep_recursion = 1; pf = get_pending_free(); __lockdep_free_key_range(pf, start, size); call_rcu_zapped(pf); - - graph_unlock(); -out_irq: + current->lockdep_recursion = 0; + arch_spin_unlock(&lockdep_lock); raw_local_irq_restore(flags); /* @@ -4911,9 +4907,8 @@ void lockdep_unregister_key(struct lock_class_key *key) return; raw_local_irq_save(flags); - if (!graph_lock()) - goto out_irq; - + arch_spin_lock(&lockdep_lock); + current->lockdep_recursion = 1; pf = get_pending_free(); hlist_for_each_entry_rcu(k, hash_head, hash_entry) { if (k == key) { @@ -4925,8 +4920,8 @@ void lockdep_unregister_key(struct lock_class_key *key) WARN_ON_ONCE(!found); __lockdep_free_key_range(pf, key, 1); call_rcu_zapped(pf); - graph_unlock(); -out_irq: + current->lockdep_recursion = 0; + arch_spin_unlock(&lockdep_lock); raw_local_irq_restore(flags); /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ |