diff options
author | Linus Torvalds | 2019-05-06 13:50:15 -0700 |
---|---|---|
committer | Linus Torvalds | 2019-05-06 13:50:15 -0700 |
commit | 007dc78fea62610bf06829e38f1d8c69b6ea5af6 (patch) | |
tree | 683af90696ed7a237dedd48030bfd649e5822955 /include | |
parent | 2f1835dffa949f560dfa3ed63c0bfc10944b461c (diff) | |
parent | d671002be6bdd7f77a771e23bf3e95d1f16775e6 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"Here are the locking changes in this cycle:
- rwsem unification and simpler micro-optimizations to prepare for
more intrusive (and more lucrative) scalability improvements in
v5.3 (Waiman Long)
- Lockdep irq state tracking flag usage cleanups (Frederic
Weisbecker)
- static key improvements (Jakub Kicinski, Peter Zijlstra)
- misc updates, cleanups and smaller fixes"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (26 commits)
locking/lockdep: Remove unnecessary unlikely()
locking/static_key: Don't take sleeping locks in __static_key_slow_dec_deferred()
locking/static_key: Factor out the fast path of static_key_slow_dec()
locking/static_key: Add support for deferred static branches
locking/lockdep: Test all incompatible scenarios at once in check_irq_usage()
locking/lockdep: Avoid bogus Clang warning
locking/lockdep: Generate LOCKF_ bit composites
locking/lockdep: Use expanded masks on find_usage_*() functions
locking/lockdep: Map remaining magic numbers to lock usage mask names
locking/lockdep: Move valid_state() inside CONFIG_TRACE_IRQFLAGS && CONFIG_PROVE_LOCKING
locking/rwsem: Prevent unneeded warning during locking selftest
locking/rwsem: Optimize rwsem structure for uncontended lock acquisition
locking/rwsem: Enable lock event counting
locking/lock_events: Don't show pvqspinlock events on bare metal
locking/lock_events: Make lock_events available for all archs & other locks
locking/qspinlock_stat: Introduce generic lockevent_*() counting APIs
locking/rwsem: Enhance DEBUG_RWSEMS_WARN_ON() macro
locking/rwsem: Add debug check for __down_read*()
locking/rwsem: Micro-optimize rwsem_try_read_lock_unqueued()
locking/rwsem: Move rwsem internal function declarations to rwsem-xadd.h
...
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/rwsem.h | 140 | ||||
-rw-r--r-- | include/linux/jump_label_ratelimit.h | 64 | ||||
-rw-r--r-- | include/linux/lockdep.h | 2 | ||||
-rw-r--r-- | include/linux/rwsem-spinlock.h | 47 | ||||
-rw-r--r-- | include/linux/rwsem.h | 37 |
5 files changed, 78 insertions, 212 deletions
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h deleted file mode 100644 index 93e67a055a4d..000000000000 --- a/include/asm-generic/rwsem.h +++ /dev/null @@ -1,140 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_GENERIC_RWSEM_H -#define _ASM_GENERIC_RWSEM_H - -#ifndef _LINUX_RWSEM_H -#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." -#endif - -#ifdef __KERNEL__ - -/* - * R/W semaphores originally for PPC using the stuff in lib/rwsem.c. - * Adapted largely from include/asm-i386/rwsem.h - * by Paul Mackerras <paulus@samba.org>. - */ - -/* - * the semaphore definition - */ -#ifdef CONFIG_64BIT -# define RWSEM_ACTIVE_MASK 0xffffffffL -#else -# define RWSEM_ACTIVE_MASK 0x0000ffffL -#endif - -#define RWSEM_UNLOCKED_VALUE 0x00000000L -#define RWSEM_ACTIVE_BIAS 0x00000001L -#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - -/* - * lock for reading - */ -static inline void __down_read(struct rw_semaphore *sem) -{ - if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) - rwsem_down_read_failed(sem); -} - -static inline int __down_read_killable(struct rw_semaphore *sem) -{ - if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) { - if (IS_ERR(rwsem_down_read_failed_killable(sem))) - return -EINTR; - } - - return 0; -} - -static inline int __down_read_trylock(struct rw_semaphore *sem) -{ - long tmp; - - while ((tmp = atomic_long_read(&sem->count)) >= 0) { - if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, - tmp + RWSEM_ACTIVE_READ_BIAS)) { - return 1; - } - } - return 0; -} - -/* - * lock for writing - */ -static inline void __down_write(struct rw_semaphore *sem) -{ - long tmp; - - tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, - &sem->count); - if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) - rwsem_down_write_failed(sem); -} - -static inline int __down_write_killable(struct rw_semaphore *sem) -{ - long tmp; - - tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, - &sem->count); - if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) - if (IS_ERR(rwsem_down_write_failed_killable(sem))) - return -EINTR; - return 0; -} - -static inline int __down_write_trylock(struct rw_semaphore *sem) -{ - long tmp; - - tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, - RWSEM_ACTIVE_WRITE_BIAS); - return tmp == RWSEM_UNLOCKED_VALUE; -} - -/* - * unlock after reading - */ -static inline void __up_read(struct rw_semaphore *sem) -{ - long tmp; - - tmp = atomic_long_dec_return_release(&sem->count); - if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) - rwsem_wake(sem); -} - -/* - * unlock after writing - */ -static inline void __up_write(struct rw_semaphore *sem) -{ - if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS, - &sem->count) < 0)) - rwsem_wake(sem); -} - -/* - * downgrade write lock to read lock - */ -static inline void __downgrade_write(struct rw_semaphore *sem) -{ - long tmp; - - /* - * When downgrading from exclusive to shared ownership, - * anything inside the write-locked region cannot leak - * into the read side. In contrast, anything in the - * read-locked region is ok to be re-ordered into the - * write side. As such, rely on RELEASE semantics. - */ - tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count); - if (tmp < 0) - rwsem_downgrade_wake(sem); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_GENERIC_RWSEM_H */ diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index a49f2b45b3f0..42710d5949ba 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h @@ -12,21 +12,79 @@ struct static_key_deferred { struct delayed_work work; }; -extern void static_key_slow_dec_deferred(struct static_key_deferred *key); -extern void static_key_deferred_flush(struct static_key_deferred *key); +struct static_key_true_deferred { + struct static_key_true key; + unsigned long timeout; + struct delayed_work work; +}; + +struct static_key_false_deferred { + struct static_key_false key; + unsigned long timeout; + struct delayed_work work; +}; + +#define static_key_slow_dec_deferred(x) \ + __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) +#define static_branch_slow_dec_deferred(x) \ + __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) + +#define static_key_deferred_flush(x) \ + __static_key_deferred_flush((x), &(x)->work) + +extern void +__static_key_slow_dec_deferred(struct static_key *key, + struct delayed_work *work, + unsigned long timeout); +extern void __static_key_deferred_flush(void *key, struct delayed_work *work); extern void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); +extern void jump_label_update_timeout(struct work_struct *work); + +#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ + struct static_key_true_deferred name = { \ + .key = { STATIC_KEY_INIT_TRUE }, \ + .timeout = (rl), \ + .work = __DELAYED_WORK_INITIALIZER((name).work, \ + jump_label_update_timeout, \ + 0), \ + } + +#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ + struct static_key_false_deferred name = { \ + .key = { STATIC_KEY_INIT_FALSE }, \ + .timeout = (rl), \ + .work = __DELAYED_WORK_INITIALIZER((name).work, \ + jump_label_update_timeout, \ + 0), \ + } + +#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key) + #else /* !CONFIG_JUMP_LABEL */ struct static_key_deferred { struct static_key key; }; +struct static_key_true_deferred { + struct static_key_true key; +}; +struct static_key_false_deferred { + struct static_key_false key; +}; +#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ + struct static_key_true_deferred name = { STATIC_KEY_TRUE_INIT } +#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ + struct static_key_false_deferred name = { STATIC_KEY_FALSE_INIT } + +#define static_branch_slow_dec_deferred(x) static_branch_dec(&(x)->key) + static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) { STATIC_KEY_CHECK_USE(key); static_key_slow_dec(&key->key); } -static inline void static_key_deferred_flush(struct static_key_deferred *key) +static inline void static_key_deferred_flush(void *key) { STATIC_KEY_CHECK_USE(key); } diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 6f165d625320..6e2377e6c1d6 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -476,7 +476,7 @@ struct pin_cookie { }; #define NIL_COOKIE (struct pin_cookie){ } -#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; }) +#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h deleted file mode 100644 index e47568363e5e..000000000000 --- a/include/linux/rwsem-spinlock.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* rwsem-spinlock.h: fallback C implementation - * - * Copyright (c) 2001 David Howells (dhowells@redhat.com). - * - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de> - * - Derived also from comments by Linus - */ - -#ifndef _LINUX_RWSEM_SPINLOCK_H -#define _LINUX_RWSEM_SPINLOCK_H - -#ifndef _LINUX_RWSEM_H -#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead" -#endif - -#ifdef __KERNEL__ -/* - * the rw-semaphore definition - * - if count is 0 then there are no active readers or writers - * - if count is +ve then that is the number of active readers - * - if count is -1 then there is one active writer - * - if wait_list is not empty, then there are processes waiting for the semaphore - */ -struct rw_semaphore { - __s32 count; - raw_spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#define RWSEM_UNLOCKED_VALUE 0x00000000 - -extern void __down_read(struct rw_semaphore *sem); -extern int __must_check __down_read_killable(struct rw_semaphore *sem); -extern int __down_read_trylock(struct rw_semaphore *sem); -extern void __down_write(struct rw_semaphore *sem); -extern int __must_check __down_write_killable(struct rw_semaphore *sem); -extern int __down_write_trylock(struct rw_semaphore *sem); -extern void __up_read(struct rw_semaphore *sem); -extern void __up_write(struct rw_semaphore *sem); -extern void __downgrade_write(struct rw_semaphore *sem); -extern int rwsem_is_locked(struct rw_semaphore *sem); - -#endif /* __KERNEL__ */ -#endif /* _LINUX_RWSEM_SPINLOCK_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 67dbb57508b1..2ea18a3def04 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -20,25 +20,30 @@ #include <linux/osq_lock.h> #endif -struct rw_semaphore; - -#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -#include <linux/rwsem-spinlock.h> /* use a generic implementation */ -#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE -#else -/* All arch specific implementations share the same struct */ +/* + * For an uncontended rwsem, count and owner are the only fields a task + * needs to touch when acquiring the rwsem. So they are put next to each + * other to increase the chance that they will share the same cacheline. + * + * In a contended rwsem, the owner is likely the most frequently accessed + * field in the structure as the optimistic waiter that holds the osq lock + * will spin on owner. For an embedded rwsem, other hot fields in the + * containing structure should be moved further away from the rwsem to + * reduce the chance that they will share the same cacheline causing + * cacheline bouncing problem. + */ struct rw_semaphore { atomic_long_t count; - struct list_head wait_list; - raw_spinlock_t wait_lock; #ifdef CONFIG_RWSEM_SPIN_ON_OWNER - struct optimistic_spin_queue osq; /* spinner MCS lock */ /* * Write owner. Used as a speculative check to see * if the owner is running on the cpu. */ struct task_struct *owner; + struct optimistic_spin_queue osq; /* spinner MCS lock */ #endif + raw_spinlock_t wait_lock; + struct list_head wait_list; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -50,24 +55,14 @@ struct rw_semaphore { */ #define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L) -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -/* Include the arch specific part */ -#include <asm/rwsem.h> - /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) { return atomic_long_read(&sem->count) != 0; } +#define RWSEM_UNLOCKED_VALUE 0L #define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) -#endif /* Common initializer macros and functions */ |