diff options
author | Long Li | 2019-10-16 13:51:55 -0700 |
---|---|---|
committer | Steve French | 2019-11-25 01:16:30 -0600 |
commit | c21ce58eab1eda4c66507897207e20c82e62a5ac (patch) | |
tree | 102ac3e83e2af040b243246c8c771e4cef60c69d /fs | |
parent | 87bc2376fffae6821869c988edbb45a14527b258 (diff) |
cifs: smbd: Only queue work for error recovery on memory registration
It's not necessary to queue invalidated memory registration to work queue, as
all we need to do is to unmap the SG and make it usable again. This can save
CPU cycles in normal data paths as memory registration errors are rare and
normally only happens during reconnection.
Signed-off-by: Long Li <longli@microsoft.com>
Cc: stable@vger.kernel.org
Signed-off-by: Steve French <stfrench@microsoft.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/cifs/smbdirect.c | 26 |
1 files changed, 15 insertions, 11 deletions
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index d91f2f60e2df..5b1b97e9e0c9 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -2271,12 +2271,7 @@ static void smbd_mr_recovery_work(struct work_struct *work) int rc; list_for_each_entry(smbdirect_mr, &info->mr_list, list) { - if (smbdirect_mr->state == MR_INVALIDATED) - ib_dma_unmap_sg( - info->id->device, smbdirect_mr->sgl, - smbdirect_mr->sgl_count, - smbdirect_mr->dir); - else if (smbdirect_mr->state == MR_ERROR) { + if (smbdirect_mr->state == MR_ERROR) { /* recover this MR entry */ rc = ib_dereg_mr(smbdirect_mr->mr); @@ -2604,11 +2599,20 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) */ smbdirect_mr->state = MR_INVALIDATED; - /* - * Schedule the work to do MR recovery for future I/Os - * MR recovery is slow and we don't want it to block the current I/O - */ - queue_work(info->workqueue, &info->mr_recovery_work); + if (smbdirect_mr->state == MR_INVALIDATED) { + ib_dma_unmap_sg( + info->id->device, smbdirect_mr->sgl, + smbdirect_mr->sgl_count, + smbdirect_mr->dir); + smbdirect_mr->state = MR_READY; + if (atomic_inc_return(&info->mr_ready_count) == 1) + wake_up_interruptible(&info->wait_mr); + } else + /* + * Schedule the work to do MR recovery for future I/Os MR + * recovery is slow and don't want it to block current I/O + */ + queue_work(info->workqueue, &info->mr_recovery_work); done: if (atomic_dec_and_test(&info->mr_used_count)) |