aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/net/sock.h10
-rw-r--r--include/net/tcp.h2
2 files changed, 8 insertions, 4 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 56df440a950b..8ab05146a447 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1160,6 +1160,10 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
sk->sk_socket = sock;
}
+static inline wait_queue_head_t *sk_sleep(struct sock *sk)
+{
+ return sk->sk_sleep;
+}
/* Detach socket from process context.
* Announce socket dead, detach it from wait queue and inode.
* Note that parent inode held reference count on this struct sock,
@@ -1346,8 +1350,8 @@ static inline int sk_has_allocations(const struct sock *sk)
* tp->rcv_nxt check sock_def_readable
* ... {
* schedule ...
- * if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- * wake_up_interruptible(sk->sk_sleep)
+ * if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ * wake_up_interruptible(sk_sleep(sk))
* ...
* }
*
@@ -1368,7 +1372,7 @@ static inline int sk_has_sleeper(struct sock *sk)
* This memory barrier is paired in the sock_poll_wait.
*/
smp_mb__after_lock();
- return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
+ return sk_sleep(sk) && waitqueue_active(sk_sleep(sk));
}
/**
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 70c5159f4b36..b7d83d204a93 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -939,7 +939,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
tp->ucopy.memory = 0;
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
- wake_up_interruptible_sync_poll(sk->sk_sleep,
+ wake_up_interruptible_sync_poll(sk_sleep(sk),
POLLIN | POLLRDNORM | POLLRDBAND);
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,