diff options
author | Maxim Mikityanskiy | 2021-06-01 15:07:59 +0300 |
---|---|---|
committer | David S. Miller | 2021-06-01 15:58:05 -0700 |
commit | 05fc8b6cbd4f979a6f25759c4a17dd5f657f7ecd (patch) | |
tree | 79d371e6863c76af7cba97aa92fb4b6972cf899f /net/tls | |
parent | f336d0b93ae978f12c5e27199f828da89b91e56a (diff) |
net/tls: Replace TLS_RX_SYNC_RUNNING with RCU
RCU synchronization is guaranteed to finish in finite time, unlike a
busy loop that polls a flag. This patch is a preparation for the bugfix
in the next patch, where the same synchronize_net() call will also be
used to sync with the TX datapath.
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls')
-rw-r--r-- | net/tls/tls_device.c | 10 |
1 files changed, 3 insertions, 7 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 76a6f8c2eec4..171752cd6910 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -680,15 +680,13 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx, struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); struct net_device *netdev; - if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags))) - return; - trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type); + rcu_read_lock(); netdev = READ_ONCE(tls_ctx->netdev); if (netdev) netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, TLS_OFFLOAD_CTX_DIR_RX); - clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags); + rcu_read_unlock(); TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC); } @@ -1300,9 +1298,7 @@ static int tls_device_down(struct net_device *netdev) netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_RX); WRITE_ONCE(ctx->netdev, NULL); - smp_mb__before_atomic(); /* pairs with test_and_set_bit() */ - while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags)) - usleep_range(10, 200); + synchronize_net(); dev_put(netdev); list_del_init(&ctx->list); |