diff options
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_io.c | 2 | ||||
-rw-r--r-- | drivers/isdn/capi/capi.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/tcp.c | 2 | ||||
-rw-r--r-- | include/linux/skbuff.h | 33 | ||||
-rw-r--r-- | net/atm/common.c | 2 | ||||
-rw-r--r-- | net/bluetooth/af_bluetooth.c | 4 | ||||
-rw-r--r-- | net/caif/caif_socket.c | 2 | ||||
-rw-r--r-- | net/core/datagram.c | 8 | ||||
-rw-r--r-- | net/core/sock.c | 2 | ||||
-rw-r--r-- | net/decnet/af_decnet.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 4 | ||||
-rw-r--r-- | net/ipv4/udp.c | 8 | ||||
-rw-r--r-- | net/nfc/llcp_sock.c | 4 | ||||
-rw-r--r-- | net/phonet/socket.c | 4 | ||||
-rw-r--r-- | net/sctp/socket.c | 6 | ||||
-rw-r--r-- | net/tipc/socket.c | 4 | ||||
-rw-r--r-- | net/unix/af_unix.c | 6 | ||||
-rw-r--r-- | net/vmw_vsock/af_vsock.c | 2 |
18 files changed, 56 insertions, 41 deletions
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 0891ab829b1b..98bc5a4cd5e7 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1702,7 +1702,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, return peekmsg(sk, msg, len, nonblock, flags); if (sk_can_busy_loop(sk) && - skb_queue_empty(&sk->sk_receive_queue) && + skb_queue_empty_lockless(&sk->sk_receive_queue) && sk->sk_state == TCP_ESTABLISHED) sk_busy_loop(sk, nonblock); diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index c92b405b7646..ba8619524231 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -744,7 +744,7 @@ capi_poll(struct file *file, poll_table *wait) poll_wait(file, &(cdev->recvwait), wait); mask = EPOLLOUT | EPOLLWRNORM; - if (!skb_queue_empty(&cdev->recvqueue)) + if (!skb_queue_empty_lockless(&cdev->recvqueue)) mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 770dbcbc999e..7544be84ab35 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2219,7 +2219,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx) struct nvme_tcp_queue *queue = hctx->driver_data; struct sock *sk = queue->sock->sk; - if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue)) + if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) sk_busy_loop(sk, true); nvme_tcp_try_recv(queue); return queue->nr_cqe; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a391147c03d4..64a395c7f689 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1496,6 +1496,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) } /** + * skb_queue_empty_lockless - check if a queue is empty + * @list: queue head + * + * Returns true if the queue is empty, false otherwise. + * This variant can be used in lockless contexts. + */ +static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list) +{ + return READ_ONCE(list->next) == (const struct sk_buff *) list; +} + + +/** * skb_queue_is_last - check if skb is the last entry in the queue * @list: queue head * @skb: buffer @@ -1848,9 +1861,11 @@ static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list) { - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; + /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */ + WRITE_ONCE(newsk->next, next); + WRITE_ONCE(newsk->prev, prev); + WRITE_ONCE(next->prev, newsk); + WRITE_ONCE(prev->next, newsk); list->qlen++; } @@ -1861,11 +1876,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list, struct sk_buff *first = list->next; struct sk_buff *last = list->prev; - first->prev = prev; - prev->next = first; + WRITE_ONCE(first->prev, prev); + WRITE_ONCE(prev->next, first); - last->next = next; - next->prev = last; + WRITE_ONCE(last->next, next); + WRITE_ONCE(next->prev, last); } /** @@ -2006,8 +2021,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) next = skb->next; prev = skb->prev; skb->next = skb->prev = NULL; - next->prev = prev; - prev->next = next; + WRITE_ONCE(next->prev, prev); + WRITE_ONCE(prev->next, next); } /** diff --git a/net/atm/common.c b/net/atm/common.c index b7528e77997c..0ce530af534d 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -668,7 +668,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait) mask |= EPOLLHUP; /* readable? */ - if (!skb_queue_empty(&sk->sk_receive_queue)) + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; /* writable? */ diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 94ddf19998c7..5f508c50649d 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -460,7 +460,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock, if (sk->sk_state == BT_LISTEN) return bt_accept_poll(sk); - if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) + if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); @@ -470,7 +470,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock, if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; - if (!skb_queue_empty(&sk->sk_receive_queue)) + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; if (sk->sk_state == BT_CLOSED) diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 13ea920600ae..ef14da50a981 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -953,7 +953,7 @@ static __poll_t caif_poll(struct file *file, mask |= EPOLLRDHUP; /* readable? */ - if (!skb_queue_empty(&sk->sk_receive_queue) || + if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) mask |= EPOLLIN | EPOLLRDNORM; diff --git a/net/core/datagram.c b/net/core/datagram.c index c210fc116103..da3c24ed129c 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -97,7 +97,7 @@ int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, if (error) goto out_err; - if (sk->sk_receive_queue.prev != skb) + if (READ_ONCE(sk->sk_receive_queue.prev) != skb) goto out; /* Socket shut down? */ @@ -278,7 +278,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags, break; sk_busy_loop(sk, flags & MSG_DONTWAIT); - } while (sk->sk_receive_queue.prev != *last); + } while (READ_ONCE(sk->sk_receive_queue.prev) != *last); error = -EAGAIN; @@ -767,7 +767,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock, mask = 0; /* exceptional events? */ - if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) + if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); @@ -777,7 +777,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock, mask |= EPOLLHUP; /* readable? */ - if (!skb_queue_empty(&sk->sk_receive_queue)) + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; /* Connection-based need to check for termination and startup */ diff --git a/net/core/sock.c b/net/core/sock.c index a515392ba84b..b8e758bcb6ad 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3600,7 +3600,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time) { struct sock *sk = p; - return !skb_queue_empty(&sk->sk_receive_queue) || + return !skb_queue_empty_lockless(&sk->sk_receive_queue) || sk_busy_loop_timeout(sk, start_time); } EXPORT_SYMBOL(sk_busy_loop_end); diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 0ea75286abf4..3349ea81f901 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -1205,7 +1205,7 @@ static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wai struct dn_scp *scp = DN_SK(sk); __poll_t mask = datagram_poll(file, sock, wait); - if (!skb_queue_empty(&scp->other_receive_queue)) + if (!skb_queue_empty_lockless(&scp->other_receive_queue)) mask |= EPOLLRDBAND; return mask; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 42187a3b82f4..d8876f0e9672 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -584,7 +584,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) } /* This barrier is coupled with smp_wmb() in tcp_reset() */ smp_rmb(); - if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) + if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR; return mask; @@ -1964,7 +1964,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); - if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && + if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) && (sk->sk_state == TCP_ESTABLISHED)) sk_busy_loop(sk, nonblock); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 14bc654b6842..345a3d43f5a6 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1577,7 +1577,7 @@ static int first_packet_length(struct sock *sk) spin_lock_bh(&rcvq->lock); skb = __first_packet_length(sk, rcvq, &total); - if (!skb && !skb_queue_empty(sk_queue)) { + if (!skb && !skb_queue_empty_lockless(sk_queue)) { spin_lock(&sk_queue->lock); skb_queue_splice_tail_init(sk_queue, rcvq); spin_unlock(&sk_queue->lock); @@ -1650,7 +1650,7 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, return skb; } - if (skb_queue_empty(sk_queue)) { + if (skb_queue_empty_lockless(sk_queue)) { spin_unlock_bh(&queue->lock); goto busy_check; } @@ -1676,7 +1676,7 @@ busy_check: break; sk_busy_loop(sk, flags & MSG_DONTWAIT); - } while (!skb_queue_empty(sk_queue)); + } while (!skb_queue_empty_lockless(sk_queue)); /* sk_queue is empty, reader_queue may contain peeked packets */ } while (timeo && @@ -2712,7 +2712,7 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait) __poll_t mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; - if (!skb_queue_empty(&udp_sk(sk)->reader_queue)) + if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) mask |= EPOLLIN | EPOLLRDNORM; /* Check for false positives due to checksum errors */ diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index ccdd790e163a..28604414dec1 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -554,11 +554,11 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock, if (sk->sk_state == LLCP_LISTEN) return llcp_accept_poll(sk); - if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) + if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); - if (!skb_queue_empty(&sk->sk_receive_queue)) + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; if (sk->sk_state == LLCP_CLOSED) diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 96ea9f254ae9..76d499f6af9a 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -338,9 +338,9 @@ static __poll_t pn_socket_poll(struct file *file, struct socket *sock, if (sk->sk_state == TCP_CLOSE) return EPOLLERR; - if (!skb_queue_empty(&sk->sk_receive_queue)) + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; - if (!skb_queue_empty(&pn->ctrlreq_queue)) + if (!skb_queue_empty_lockless(&pn->ctrlreq_queue)) mask |= EPOLLPRI; if (!mask && sk->sk_state == TCP_CLOSE_WAIT) return EPOLLHUP; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 5ca0ec0e823c..ca81e06df165 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -8476,7 +8476,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait) mask = 0; /* Is there any exceptional events? */ - if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) + if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) @@ -8485,7 +8485,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait) mask |= EPOLLHUP; /* Is it readable? Reconsider this code with TCP-style support. */ - if (!skb_queue_empty(&sk->sk_receive_queue)) + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; /* The association is either gone or not ready. */ @@ -8871,7 +8871,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, if (sk_can_busy_loop(sk)) { sk_busy_loop(sk, noblock); - if (!skb_queue_empty(&sk->sk_receive_queue)) + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) continue; } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index f8bbc4aab213..4b92b196cfa6 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -740,7 +740,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock, /* fall through */ case TIPC_LISTEN: case TIPC_CONNECTING: - if (!skb_queue_empty(&sk->sk_receive_queue)) + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) revents |= EPOLLIN | EPOLLRDNORM; break; case TIPC_OPEN: @@ -748,7 +748,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock, revents |= EPOLLOUT; if (!tipc_sk_type_connectionless(sk)) break; - if (skb_queue_empty(&sk->sk_receive_queue)) + if (skb_queue_empty_lockless(&sk->sk_receive_queue)) break; revents |= EPOLLIN | EPOLLRDNORM; break; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 67e87db5877f..0d8da809bea2 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2599,7 +2599,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; /* readable? */ - if (!skb_queue_empty(&sk->sk_receive_queue)) + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; /* Connection-based need to check for termination and startup */ @@ -2628,7 +2628,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, mask = 0; /* exceptional events? */ - if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) + if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); @@ -2638,7 +2638,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, mask |= EPOLLHUP; /* readable? */ - if (!skb_queue_empty(&sk->sk_receive_queue)) + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; /* Connection-based need to check for termination and startup */ diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 2ab43b2bba31..582a3e4dfce2 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -870,7 +870,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock, * the queue and write as long as the socket isn't shutdown for * sending. */ - if (!skb_queue_empty(&sk->sk_receive_queue) || + if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) { mask |= EPOLLIN | EPOLLRDNORM; } |