aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorChuck Lever2005-06-22 17:16:28 +0000
committerTrond Myklebust2005-06-22 16:07:32 -0400
commitae3884621bf5b4caff7785b9a417f262202965b2 (patch)
tree47cc8b29485d45f1967521a26151957e09c9012c /net
parent20e5ac828dfd23b9080159c62a34f32d2dcd92fc (diff)
[PATCH] RPC: kick off socket connect operations faster
Make the socket transport kick the event queue to start socket connects immediately. This should improve responsiveness of applications that are sensitive to slow mount operations (like automounters). We are now also careful to cancel the connect worker before destroying the xprt. This eliminates a race where xprt_destroy can finish before the connect worker is even allowed to run. Test-plan: Destructive testing (unplugging the network temporarily). Connectathon with UDP and TCP. Hard-code impossibly small connect timeout. Version: Fri, 29 Apr 2005 15:32:01 -0400 Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprt.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 2b8789cf8db1..eca92405948f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -569,8 +569,11 @@ void xprt_connect(struct rpc_task *task)
if (xprt->sock != NULL)
schedule_delayed_work(&xprt->sock_connect,
RPC_REESTABLISH_TIMEOUT);
- else
+ else {
schedule_work(&xprt->sock_connect);
+ if (!RPC_IS_ASYNC(task))
+ flush_scheduled_work();
+ }
}
return;
out_write:
@@ -1685,6 +1688,10 @@ xprt_shutdown(struct rpc_xprt *xprt)
rpc_wake_up(&xprt->backlog);
wake_up(&xprt->cong_wait);
del_timer_sync(&xprt->timer);
+
+ /* synchronously wait for connect worker to finish */
+ cancel_delayed_work(&xprt->sock_connect);
+ flush_scheduled_work();
}
/*