diff options
author | Linus Torvalds | 2018-08-13 12:23:39 -0700 |
---|---|---|
committer | Linus Torvalds | 2018-08-13 12:23:39 -0700 |
commit | de5d1b39ea0b38a9f4dfb08966042b7b91e2df30 (patch) | |
tree | 3591bdac4fe6756b4e3dc68b2ed1c792c4104218 /net | |
parent | 1c594774283a7cfe6dc0f8ffdfb2dbfc497502c4 (diff) | |
parent | fd2efaa4eb5317c3a86357a83a7d456a1b86a0ac (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking/atomics update from Thomas Gleixner:
"The locking, atomics and memory model brains delivered:
- A larger update to the atomics code which reworks the ordering
barriers, consolidates the atomic primitives, provides the new
atomic64_fetch_add_unless() primitive and cleans up the include
hell.
- Simplify cmpxchg() instrumentation and add instrumentation for
xchg() and cmpxchg_double().
- Updates to the memory model and documentation"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits)
locking/atomics: Rework ordering barriers
locking/atomics: Instrument cmpxchg_double*()
locking/atomics: Instrument xchg()
locking/atomics: Simplify cmpxchg() instrumentation
locking/atomics/x86: Reduce arch_cmpxchg64*() instrumentation
tools/memory-model: Rename litmus tests to comply to norm7
tools/memory-model/Documentation: Fix typo, smb->smp
sched/Documentation: Update wake_up() & co. memory-barrier guarantees
locking/spinlock, sched/core: Clarify requirements for smp_mb__after_spinlock()
sched/core: Use smp_mb() in wake_woken_function()
tools/memory-model: Add informal LKMM documentation to MAINTAINERS
locking/atomics/Documentation: Describe atomic_set() as a write operation
tools/memory-model: Make scripts executable
tools/memory-model: Remove ACCESS_ONCE() from model
tools/memory-model: Remove ACCESS_ONCE() from recipes
locking/memory-barriers.txt/kokr: Update Korean translation to fix broken DMA vs. MMIO ordering example
MAINTAINERS: Add Daniel Lustig as an LKMM reviewer
tools/memory-model: Fix ISA2+pooncelock+pooncelock+pombonce name
tools/memory-model: Add litmus test for full multicopy atomicity
locking/refcount: Always allow checked forms
...
Diffstat (limited to 'net')
-rw-r--r-- | net/atm/pppoatm.c | 2 | ||||
-rw-r--r-- | net/rxrpc/call_object.c | 2 | ||||
-rw-r--r-- | net/rxrpc/conn_object.c | 4 | ||||
-rw-r--r-- | net/rxrpc/local_object.c | 2 | ||||
-rw-r--r-- | net/rxrpc/peer_object.c | 2 |
5 files changed, 6 insertions, 6 deletions
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index af8c4b38b746..d84227d75717 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -244,7 +244,7 @@ static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size) * the packet count limit, so... */ if (atm_may_send(pvcc->atmvcc, size) && - atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT)) + atomic_inc_not_zero(&pvcc->inflight)) return 1; /* diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index f6734d8cb01a..9486293fef5c 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -415,7 +415,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx, bool rxrpc_queue_call(struct rxrpc_call *call) { const void *here = __builtin_return_address(0); - int n = __atomic_add_unless(&call->usage, 1, 0); + int n = atomic_fetch_add_unless(&call->usage, 1, 0); if (n == 0) return false; if (rxrpc_queue_work(&call->processor)) diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 4c77a78a252a..77440a356b14 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -266,7 +266,7 @@ void rxrpc_kill_connection(struct rxrpc_connection *conn) bool rxrpc_queue_conn(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); - int n = __atomic_add_unless(&conn->usage, 1, 0); + int n = atomic_fetch_add_unless(&conn->usage, 1, 0); if (n == 0) return false; if (rxrpc_queue_work(&conn->processor)) @@ -309,7 +309,7 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn) const void *here = __builtin_return_address(0); if (conn) { - int n = __atomic_add_unless(&conn->usage, 1, 0); + int n = atomic_fetch_add_unless(&conn->usage, 1, 0); if (n > 0) trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here); else diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index b493e6b62740..777c3ed4cfc0 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -305,7 +305,7 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) const void *here = __builtin_return_address(0); if (local) { - int n = __atomic_add_unless(&local->usage, 1, 0); + int n = atomic_fetch_add_unless(&local->usage, 1, 0); if (n > 0) trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); else diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 24ec7cdcf332..1dc7648e3eff 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -406,7 +406,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer) const void *here = __builtin_return_address(0); if (peer) { - int n = __atomic_add_unless(&peer->usage, 1, 0); + int n = atomic_fetch_add_unless(&peer->usage, 1, 0); if (n > 0) trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here); else |