diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-03-21 12:25:54 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-03-21 12:25:54 -0700 |
commit | 2c41fab1c60b02626c8153a1806a7a1e5d62aaf1 (patch) | |
tree | f13c693ef22d4fd7b181846c0a45f0a4cea9943e /fs | |
parent | 1d4345eb51a185b2ecd11834dbddca79cb922eb5 (diff) | |
parent | 0031275d119efe16711cd93519b595e6f9b4b330 (diff) |
Merge tag 'io_uring-5.12-2021-03-21' of git://git.kernel.dk/linux-block
Pull io_uring followup fixes from Jens Axboe:
- The SIGSTOP change from Eric, so we properly ignore that for
PF_IO_WORKER threads.
- Disallow sending signals to PF_IO_WORKER threads in general, we're
not interested in having them funnel back to the io_uring owning
task.
- Stable fix from Stefan, ensuring we properly break links for short
send/sendmsg recv/recvmsg if MSG_WAITALL is set.
- Catch and loop when needing to run task_work before a PF_IO_WORKER
threads goes to sleep.
* tag 'io_uring-5.12-2021-03-21' of git://git.kernel.dk/linux-block:
io_uring: call req_set_fail_links() on short send[msg]()/recv[msg]() with MSG_WAITALL
io-wq: ensure task is running before processing task_work
signal: don't allow STOP on PF_IO_WORKER threads
signal: don't allow sending any signals to PF_IO_WORKER threads
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io-wq.c | 8 | ||||
-rw-r--r-- | fs/io_uring.c | 24 |
2 files changed, 26 insertions, 6 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c index e05f996d088f..3dc10bfd8c3b 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -386,13 +386,16 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe) return NULL; } -static void io_flush_signals(void) +static bool io_flush_signals(void) { if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) { + __set_current_state(TASK_RUNNING); if (current->task_works) task_work_run(); clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL); + return true; } + return false; } static void io_assign_current_work(struct io_worker *worker, @@ -499,7 +502,8 @@ loop: } __io_worker_idle(wqe, worker); raw_spin_unlock_irq(&wqe->lock); - io_flush_signals(); + if (io_flush_signals()) + continue; ret = schedule_timeout(WORKER_IDLE_TIMEOUT); if (try_to_freeze() || ret) continue; diff --git a/fs/io_uring.c b/fs/io_uring.c index ea2d3e120555..543551d70327 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4386,6 +4386,7 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) struct io_async_msghdr iomsg, *kmsg; struct socket *sock; unsigned flags; + int min_ret = 0; int ret; sock = sock_from_file(req->file); @@ -4406,6 +4407,9 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) else if (issue_flags & IO_URING_F_NONBLOCK) flags |= MSG_DONTWAIT; + if (flags & MSG_WAITALL) + min_ret = iov_iter_count(&kmsg->msg.msg_iter); + ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN) return io_setup_async_msg(req, kmsg); @@ -4416,7 +4420,7 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) if (kmsg->free_iov) kfree(kmsg->free_iov); req->flags &= ~REQ_F_NEED_CLEANUP; - if (ret < 0) + if (ret < min_ret) req_set_fail_links(req); __io_req_complete(req, issue_flags, ret, 0); return 0; @@ -4429,6 +4433,7 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags) struct iovec iov; struct socket *sock; unsigned flags; + int min_ret = 0; int ret; sock = sock_from_file(req->file); @@ -4450,6 +4455,9 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags) else if (issue_flags & IO_URING_F_NONBLOCK) flags |= MSG_DONTWAIT; + if (flags & MSG_WAITALL) + min_ret = iov_iter_count(&msg.msg_iter); + msg.msg_flags = flags; ret = sock_sendmsg(sock, &msg); if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN) @@ -4457,7 +4465,7 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags) if (ret == -ERESTARTSYS) ret = -EINTR; - if (ret < 0) + if (ret < min_ret) req_set_fail_links(req); __io_req_complete(req, issue_flags, ret, 0); return 0; @@ -4609,6 +4617,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) struct socket *sock; struct io_buffer *kbuf; unsigned flags; + int min_ret = 0; int ret, cflags = 0; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; @@ -4640,6 +4649,9 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) else if (force_nonblock) flags |= MSG_DONTWAIT; + if (flags & MSG_WAITALL) + min_ret = iov_iter_count(&kmsg->msg.msg_iter); + ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, kmsg->uaddr, flags); if (force_nonblock && ret == -EAGAIN) @@ -4653,7 +4665,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) if (kmsg->free_iov) kfree(kmsg->free_iov); req->flags &= ~REQ_F_NEED_CLEANUP; - if (ret < 0) + if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)))) req_set_fail_links(req); __io_req_complete(req, issue_flags, ret, cflags); return 0; @@ -4668,6 +4680,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) struct socket *sock; struct iovec iov; unsigned flags; + int min_ret = 0; int ret, cflags = 0; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; @@ -4699,6 +4712,9 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) else if (force_nonblock) flags |= MSG_DONTWAIT; + if (flags & MSG_WAITALL) + min_ret = iov_iter_count(&msg.msg_iter); + ret = sock_recvmsg(sock, &msg, flags); if (force_nonblock && ret == -EAGAIN) return -EAGAIN; @@ -4707,7 +4723,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) out_free: if (req->flags & REQ_F_BUFFER_SELECTED) cflags = io_put_recv_kbuf(req); - if (ret < 0) + if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)))) req_set_fail_links(req); __io_req_complete(req, issue_flags, ret, cflags); return 0; |