From 904fbcb115c85090484dfdffaf7f461d96fe8e53 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 8 May 2020 21:27:24 -0600 Subject: io_uring: remove 'fd is io_uring' from close path The attempt protecting us from closing the ring itself wasn't really complete, and we actually don't need it. The referencing of requests themselve, and the references they hold on the ring, ensures that the life time of the ring is sane. With the check removed, we can also remove the need to have the close operation fget() the file. Reported-by: Al Viro Signed-off-by: Jens Axboe --- fs/io_uring.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 979d9f977409..9fd1257c8404 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -786,7 +786,6 @@ static const struct io_op_def io_op_defs[] = { .needs_fs = 1, }, [IORING_OP_CLOSE] = { - .needs_file = 1, .file_table = 1, }, [IORING_OP_FILES_UPDATE] = { @@ -3399,10 +3398,6 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EBADF; req->close.fd = READ_ONCE(sqe->fd); - if (req->file->f_op == &io_uring_fops || - req->close.fd == req->ctx->ring_fd) - return -EBADF; - return 0; } @@ -3434,8 +3429,11 @@ static int io_close(struct io_kiocb *req, bool force_nonblock) req->close.put_file = NULL; ret = __close_fd_get_file(req->close.fd, &req->close.put_file); - if (ret < 0) + if (ret < 0) { + if (ret == -ENOENT) + ret = -EBADF; return ret; + } /* if the file has a flush method, be safe and punt to async */ if (req->close.put_file->f_op->flush && force_nonblock) { -- cgit v1.2.3 From 7d01bd745a8f52ff2883f661235139ab6e7d23e6 Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Fri, 8 May 2020 21:19:30 +0800 Subject: io_uring: remove obsolete 'state' parameter The "struct io_submit_state *state" parameter is not used, remove it. Signed-off-by: Xiaoguang Wang Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 9fd1257c8404..94870d63b16a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5629,7 +5629,7 @@ static inline void io_queue_link_head(struct io_kiocb *req) } static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_submit_state *state, struct io_kiocb **link) + struct io_kiocb **link) { struct io_ring_ctx *ctx = req->ctx; int ret; @@ -5893,7 +5893,7 @@ fail_req: trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, true, async); - err = io_submit_sqe(req, sqe, statep, &link); + err = io_submit_sqe(req, sqe, &link); if (err) goto fail_req; } -- cgit v1.2.3 From 8469508951d4a324b2df3b5bad75e99922c3b798 Mon Sep 17 00:00:00 2001 From: Xiaoming Ni Date: Mon, 11 May 2020 19:25:43 +0800 Subject: io_uring: remove duplicate semicolon at the end of line Remove duplicate semicolon at the end of line in io_file_from_index() Signed-off-by: Xiaoming Ni Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 94870d63b16a..d2e37215d05a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5364,7 +5364,7 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx, struct fixed_file_table *table; table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT]; - return table->files[index & IORING_FILE_TABLE_MASK];; + return table->files[index & IORING_FILE_TABLE_MASK]; } static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, -- cgit v1.2.3 From 0f158b4cf20e7983d5b33878a6aad118cfac4f05 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 14 May 2020 17:18:39 -0600 Subject: io_uring: name sq thread and ref completions We used to have three completions, now we just have two. With the two, let's not allocate them dynamically, just embed then in the ctx and name them appropriately. Signed-off-by: Jens Axboe --- fs/io_uring.c | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index d2e37215d05a..414e940323d4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -279,8 +279,8 @@ struct io_ring_ctx { const struct cred *creds; - /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */ - struct completion *completions; + struct completion ref_comp; + struct completion sq_thread_comp; /* if all else fails... */ struct io_kiocb *fallback_req; @@ -883,7 +883,7 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref) { struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); - complete(&ctx->completions[0]); + complete(&ctx->ref_comp); } static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) @@ -899,10 +899,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) if (!ctx->fallback_req) goto err; - ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL); - if (!ctx->completions) - goto err; - /* * Use 5 bits less than the max cq entries, that should give us around * 32 entries per hash list if totally full and uniformly spread. @@ -925,8 +921,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ctx->flags = p->flags; init_waitqueue_head(&ctx->cq_wait); INIT_LIST_HEAD(&ctx->cq_overflow_list); - init_completion(&ctx->completions[0]); - init_completion(&ctx->completions[1]); + init_completion(&ctx->ref_comp); + init_completion(&ctx->sq_thread_comp); idr_init(&ctx->io_buffer_idr); idr_init(&ctx->personality_idr); mutex_init(&ctx->uring_lock); @@ -942,7 +938,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) err: if (ctx->fallback_req) kmem_cache_free(req_cachep, ctx->fallback_req); - kfree(ctx->completions); kfree(ctx->cancel_hash); kfree(ctx); return NULL; @@ -5933,7 +5928,7 @@ static int io_sq_thread(void *data) unsigned long timeout; int ret = 0; - complete(&ctx->completions[1]); + complete(&ctx->sq_thread_comp); old_fs = get_fs(); set_fs(USER_DS); @@ -6212,7 +6207,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) static void io_sq_thread_stop(struct io_ring_ctx *ctx) { if (ctx->sqo_thread) { - wait_for_completion(&ctx->completions[1]); + wait_for_completion(&ctx->sq_thread_comp); /* * The park is a bit of a work-around, without it we get * warning spews on shutdown with SQPOLL set and affinity @@ -7241,7 +7236,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) ring_pages(ctx->sq_entries, ctx->cq_entries)); free_uid(ctx->user); put_cred(ctx->creds); - kfree(ctx->completions); kfree(ctx->cancel_hash); kmem_cache_free(req_cachep, ctx->fallback_req); kfree(ctx); @@ -7293,7 +7287,7 @@ static void io_ring_exit_work(struct work_struct *work) if (ctx->rings) io_cqring_overflow_flush(ctx, true); - wait_for_completion(&ctx->completions[0]); + wait_for_completion(&ctx->ref_comp); io_ring_ctx_free(ctx); } @@ -7992,7 +7986,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, * after we've killed the percpu ref. */ mutex_unlock(&ctx->uring_lock); - ret = wait_for_completion_interruptible(&ctx->completions[0]); + ret = wait_for_completion_interruptible(&ctx->ref_comp); mutex_lock(&ctx->uring_lock); if (ret) { percpu_ref_resurrect(&ctx->refs); @@ -8069,7 +8063,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, /* bring the ctx back to life */ percpu_ref_reinit(&ctx->refs); out: - reinit_completion(&ctx->completions[0]); + reinit_completion(&ctx->ref_comp); } return ret; } -- cgit v1.2.3 From 4a38aed2a0a729ccecd84dca5b76d827b9e1294d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 14 May 2020 17:21:15 -0600 Subject: io_uring: batch reap of dead file registrations We currently embed and queue a work item per fixed_file_ref_node that we update, but if the workload does a lot of these, then the associated kworker-events overhead can become quite noticeable. Since we rarely need to wait on these, batch them at 1 second intervals instead. If we do need to wait for them, we just flush the pending delayed work. Signed-off-by: Jens Axboe --- fs/io_uring.c | 54 ++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 414e940323d4..99dbd43442f2 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -191,7 +191,7 @@ struct fixed_file_ref_node { struct list_head node; struct list_head file_list; struct fixed_file_data *file_data; - struct work_struct work; + struct llist_node llist; }; struct fixed_file_data { @@ -327,6 +327,9 @@ struct io_ring_ctx { struct list_head inflight_list; } ____cacheline_aligned_in_smp; + struct delayed_work file_put_work; + struct llist_head file_put_llist; + struct work_struct exit_work; }; @@ -879,6 +882,8 @@ struct sock *io_uring_get_socket(struct file *file) } EXPORT_SYMBOL(io_uring_get_socket); +static void io_file_put_work(struct work_struct *work); + static void io_ring_ctx_ref_free(struct percpu_ref *ref) { struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); @@ -934,6 +939,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) init_waitqueue_head(&ctx->inflight_wait); spin_lock_init(&ctx->inflight_lock); INIT_LIST_HEAD(&ctx->inflight_list); + INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work); + init_llist_head(&ctx->file_put_llist); return ctx; err: if (ctx->fallback_req) @@ -6190,6 +6197,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) percpu_ref_kill(&data->refs); /* wait for all refs nodes to complete */ + flush_delayed_work(&ctx->file_put_work); wait_for_completion(&data->done); __io_sqe_files_unregister(ctx); @@ -6420,18 +6428,13 @@ struct io_file_put { struct file *file; }; -static void io_file_put_work(struct work_struct *work) +static void __io_file_put_work(struct fixed_file_ref_node *ref_node) { - struct fixed_file_ref_node *ref_node; - struct fixed_file_data *file_data; - struct io_ring_ctx *ctx; + struct fixed_file_data *file_data = ref_node->file_data; + struct io_ring_ctx *ctx = file_data->ctx; struct io_file_put *pfile, *tmp; unsigned long flags; - ref_node = container_of(work, struct fixed_file_ref_node, work); - file_data = ref_node->file_data; - ctx = file_data->ctx; - list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) { list_del_init(&pfile->list); io_ring_file_put(ctx, pfile->file); @@ -6447,13 +6450,42 @@ static void io_file_put_work(struct work_struct *work) percpu_ref_put(&file_data->refs); } +static void io_file_put_work(struct work_struct *work) +{ + struct io_ring_ctx *ctx; + struct llist_node *node; + + ctx = container_of(work, struct io_ring_ctx, file_put_work.work); + node = llist_del_all(&ctx->file_put_llist); + + while (node) { + struct fixed_file_ref_node *ref_node; + struct llist_node *next = node->next; + + ref_node = llist_entry(node, struct fixed_file_ref_node, llist); + __io_file_put_work(ref_node); + node = next; + } +} + static void io_file_data_ref_zero(struct percpu_ref *ref) { struct fixed_file_ref_node *ref_node; + struct io_ring_ctx *ctx; + bool first_add; + int delay = HZ; ref_node = container_of(ref, struct fixed_file_ref_node, refs); + ctx = ref_node->file_data->ctx; - queue_work(system_wq, &ref_node->work); + if (percpu_ref_is_dying(&ctx->file_data->refs)) + delay = 0; + + first_add = llist_add(&ref_node->llist, &ctx->file_put_llist); + if (!delay) + mod_delayed_work(system_wq, &ctx->file_put_work, 0); + else if (first_add) + queue_delayed_work(system_wq, &ctx->file_put_work, delay); } static struct fixed_file_ref_node *alloc_fixed_file_ref_node( @@ -6472,10 +6504,8 @@ static struct fixed_file_ref_node *alloc_fixed_file_ref_node( } INIT_LIST_HEAD(&ref_node->node); INIT_LIST_HEAD(&ref_node->file_list); - INIT_WORK(&ref_node->work, io_file_put_work); ref_node->file_data = ctx->file_data; return ref_node; - } static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node) -- cgit v1.2.3 From 18bceab101adde8f38de76016bc77f3f25cf22f4 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 15 May 2020 11:56:54 -0600 Subject: io_uring: allow POLL_ADD with double poll_wait() users Some file descriptors use separate waitqueues for their f_ops->poll() handler, most commonly one for read and one for write. The io_uring poll implementation doesn't work with that, as the 2nd poll_wait() call will cause the io_uring poll request to -EINVAL. This affects (at least) tty devices and /dev/random as well. This is a big problem for event loops where some file descriptors work, and others don't. With this fix, io_uring handles multiple waitqueues. Signed-off-by: Jens Axboe --- fs/io_uring.c | 218 +++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 146 insertions(+), 72 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 99dbd43442f2..982066844c5a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4099,27 +4099,6 @@ struct io_poll_table { int error; }; -static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, - struct wait_queue_head *head) -{ - if (unlikely(poll->head)) { - pt->error = -EINVAL; - return; - } - - pt->error = 0; - poll->head = head; - add_wait_queue(head, &poll->wait); -} - -static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, - struct poll_table_struct *p) -{ - struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); - - __io_queue_proc(&pt->req->apoll->poll, pt, head); -} - static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, __poll_t mask, task_work_func_t func) { @@ -4171,6 +4150,144 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) return false; } +static void io_poll_remove_double(struct io_kiocb *req) +{ + struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io; + + lockdep_assert_held(&req->ctx->completion_lock); + + if (poll && poll->head) { + struct wait_queue_head *head = poll->head; + + spin_lock(&head->lock); + list_del_init(&poll->wait.entry); + if (poll->wait.private) + refcount_dec(&req->refs); + poll->head = NULL; + spin_unlock(&head->lock); + } +} + +static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) +{ + struct io_ring_ctx *ctx = req->ctx; + + io_poll_remove_double(req); + req->poll.done = true; + io_cqring_fill_event(req, error ? error : mangle_poll(mask)); + io_commit_cqring(ctx); +} + +static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (io_poll_rewait(req, &req->poll)) { + spin_unlock_irq(&ctx->completion_lock); + return; + } + + hash_del(&req->hash_node); + io_poll_complete(req, req->result, 0); + req->flags |= REQ_F_COMP_LOCKED; + io_put_req_find_next(req, nxt); + spin_unlock_irq(&ctx->completion_lock); + + io_cqring_ev_posted(ctx); +} + +static void io_poll_task_func(struct callback_head *cb) +{ + struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); + struct io_kiocb *nxt = NULL; + + io_poll_task_handler(req, &nxt); + if (nxt) { + struct io_ring_ctx *ctx = nxt->ctx; + + mutex_lock(&ctx->uring_lock); + __io_queue_sqe(nxt, NULL); + mutex_unlock(&ctx->uring_lock); + } +} + +static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, + int sync, void *key) +{ + struct io_kiocb *req = wait->private; + struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io; + __poll_t mask = key_to_poll(key); + + /* for instances that support it check for an event match first: */ + if (mask && !(mask & poll->events)) + return 0; + + if (req->poll.head) { + bool done; + + spin_lock(&req->poll.head->lock); + done = list_empty(&req->poll.wait.entry); + if (!done) + list_del_init(&req->poll.wait.entry); + spin_unlock(&req->poll.head->lock); + if (!done) + __io_async_wake(req, poll, mask, io_poll_task_func); + } + refcount_dec(&req->refs); + return 1; +} + +static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events, + wait_queue_func_t wake_func) +{ + poll->head = NULL; + poll->done = false; + poll->canceled = false; + poll->events = events; + INIT_LIST_HEAD(&poll->wait.entry); + init_waitqueue_func_entry(&poll->wait, wake_func); +} + +static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, + struct wait_queue_head *head) +{ + struct io_kiocb *req = pt->req; + + /* + * If poll->head is already set, it's because the file being polled + * uses multiple waitqueues for poll handling (eg one for read, one + * for write). Setup a separate io_poll_iocb if this happens. + */ + if (unlikely(poll->head)) { + /* already have a 2nd entry, fail a third attempt */ + if (req->io) { + pt->error = -EINVAL; + return; + } + poll = kmalloc(sizeof(*poll), GFP_ATOMIC); + if (!poll) { + pt->error = -ENOMEM; + return; + } + io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake); + refcount_inc(&req->refs); + poll->wait.private = req; + req->io = (void *) poll; + } + + pt->error = 0; + poll->head = head; + add_wait_queue(head, &poll->wait); +} + +static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, + struct poll_table_struct *p) +{ + struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); + + __io_queue_proc(&pt->req->apoll->poll, pt, head); +} + static void io_async_task_func(struct callback_head *cb) { struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); @@ -4246,18 +4363,13 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req, bool cancel = false; poll->file = req->file; - poll->head = NULL; - poll->done = poll->canceled = false; - poll->events = mask; + io_init_poll_iocb(poll, mask, wake_func); + poll->wait.private = req; ipt->pt._key = mask; ipt->req = req; ipt->error = -EINVAL; - INIT_LIST_HEAD(&poll->wait.entry); - init_waitqueue_func_entry(&poll->wait, wake_func); - poll->wait.private = req; - mask = vfs_poll(req->file, &ipt->pt) & poll->events; spin_lock_irq(&ctx->completion_lock); @@ -4288,6 +4400,7 @@ static bool io_arm_poll_handler(struct io_kiocb *req) struct async_poll *apoll; struct io_poll_table ipt; __poll_t mask, ret; + bool had_io; if (!req->file || !file_can_poll(req->file)) return false; @@ -4302,6 +4415,7 @@ static bool io_arm_poll_handler(struct io_kiocb *req) req->flags |= REQ_F_POLLED; memcpy(&apoll->work, &req->work, sizeof(req->work)); + had_io = req->io != NULL; get_task_struct(current); req->task = current; @@ -4321,7 +4435,9 @@ static bool io_arm_poll_handler(struct io_kiocb *req) io_async_wake); if (ret) { ipt.error = 0; - apoll->poll.done = true; + /* only remove double add if we did it here */ + if (!had_io) + io_poll_remove_double(req); spin_unlock_irq(&ctx->completion_lock); memcpy(&req->work, &apoll->work, sizeof(req->work)); kfree(apoll); @@ -4354,6 +4470,7 @@ static bool io_poll_remove_one(struct io_kiocb *req) bool do_complete; if (req->opcode == IORING_OP_POLL_ADD) { + io_poll_remove_double(req); do_complete = __io_poll_remove_one(req, &req->poll); } else { apoll = req->apoll; @@ -4455,49 +4572,6 @@ static int io_poll_remove(struct io_kiocb *req) return 0; } -static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) -{ - struct io_ring_ctx *ctx = req->ctx; - - req->poll.done = true; - io_cqring_fill_event(req, error ? error : mangle_poll(mask)); - io_commit_cqring(ctx); -} - -static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt) -{ - struct io_ring_ctx *ctx = req->ctx; - struct io_poll_iocb *poll = &req->poll; - - if (io_poll_rewait(req, poll)) { - spin_unlock_irq(&ctx->completion_lock); - return; - } - - hash_del(&req->hash_node); - io_poll_complete(req, req->result, 0); - req->flags |= REQ_F_COMP_LOCKED; - io_put_req_find_next(req, nxt); - spin_unlock_irq(&ctx->completion_lock); - - io_cqring_ev_posted(ctx); -} - -static void io_poll_task_func(struct callback_head *cb) -{ - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - struct io_kiocb *nxt = NULL; - - io_poll_task_handler(req, &nxt); - if (nxt) { - struct io_ring_ctx *ctx = nxt->ctx; - - mutex_lock(&ctx->uring_lock); - __io_queue_sqe(nxt, NULL); - mutex_unlock(&ctx->uring_lock); - } -} - static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, void *key) { -- cgit v1.2.3 From 0d9b5b3af134cddfdc1dd31d41946a0ad389bbf2 Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Fri, 15 May 2020 18:38:04 +0200 Subject: io_uring: add 'cq_flags' field for the CQ ring This patch adds the new 'cq_flags' field that should be written by the application and read by the kernel. This new field is available to the userspace application through 'cq_off.flags'. We are using 4-bytes previously reserved and set to zero. This means that if the application finds this field to zero, then the new functionality is not supported. In the next patch we will introduce the first flag available. Signed-off-by: Stefano Garzarella Signed-off-by: Jens Axboe --- fs/io_uring.c | 10 +++++++++- include/uapi/linux/io_uring.h | 4 +++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 982066844c5a..02250693a406 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -142,7 +142,7 @@ struct io_rings { */ u32 sq_dropped; /* - * Runtime flags + * Runtime SQ flags * * Written by the kernel, shouldn't be modified by the * application. @@ -151,6 +151,13 @@ struct io_rings { * for IORING_SQ_NEED_WAKEUP after updating the sq tail. */ u32 sq_flags; + /* + * Runtime CQ flags + * + * Written by the application, shouldn't be modified by the + * kernel. + */ + u32 cq_flags; /* * Number of completion events lost because the queue was full; * this should be avoided by the application by making sure @@ -7930,6 +7937,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); p->cq_off.cqes = offsetof(struct io_rings, cqes); + p->cq_off.flags = offsetof(struct io_rings, cq_flags); p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index e48d746b8e2a..602bb0ece607 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -204,7 +204,9 @@ struct io_cqring_offsets { __u32 ring_entries; __u32 overflow; __u32 cqes; - __u64 resv[2]; + __u32 flags; + __u32 resv1; + __u64 resv2; }; /* -- cgit v1.2.3 From 7e55a19cf6e70ce08964b46dbbfbdb07fbc995fc Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Fri, 15 May 2020 18:38:05 +0200 Subject: io_uring: add IORING_CQ_EVENTFD_DISABLED to the CQ ring flags This new flag should be set/clear from the application to disable/enable eventfd notifications when a request is completed and queued to the CQ ring. Before this patch, notifications were always sent if an eventfd is registered, so IORING_CQ_EVENTFD_DISABLED is not set during the initialization. It will be up to the application to set the flag after initialization if no notifications are required at the beginning. Signed-off-by: Stefano Garzarella Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 ++ include/uapi/linux/io_uring.h | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index 02250693a406..f800b0b4498f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1153,6 +1153,8 @@ static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx) { if (!ctx->cq_ev_fd) return false; + if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) + return false; if (!ctx->eventfd_async) return true; return io_wq_current_is_worker(); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 602bb0ece607..8c5775df08b8 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -209,6 +209,13 @@ struct io_cqring_offsets { __u64 resv2; }; +/* + * cq_ring->flags + */ + +/* disable eventfd notifications */ +#define IORING_CQ_EVENTFD_DISABLED (1U << 0) + /* * io_uring_enter(2) flags */ -- cgit v1.2.3 From 6a4d07cde5778174a35ffc445c1d1388479563ee Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 15 May 2020 14:30:38 -0600 Subject: io_uring: file registration list and lock optimization There's no point in using list_del_init() on entries that are going away, and the associated lock is always used in process context so let's not use the IRQ disabling+saving variant of the spinlock. Signed-off-by: Jens Axboe --- fs/io_uring.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index f800b0b4498f..c7622a5ece2d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6264,16 +6264,15 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) struct fixed_file_data *data = ctx->file_data; struct fixed_file_ref_node *ref_node = NULL; unsigned nr_tables, i; - unsigned long flags; if (!data) return -ENXIO; - spin_lock_irqsave(&data->lock, flags); + spin_lock(&data->lock); if (!list_empty(&data->ref_list)) ref_node = list_first_entry(&data->ref_list, struct fixed_file_ref_node, node); - spin_unlock_irqrestore(&data->lock, flags); + spin_unlock(&data->lock); if (ref_node) percpu_ref_kill(&ref_node->refs); @@ -6516,17 +6515,16 @@ static void __io_file_put_work(struct fixed_file_ref_node *ref_node) struct fixed_file_data *file_data = ref_node->file_data; struct io_ring_ctx *ctx = file_data->ctx; struct io_file_put *pfile, *tmp; - unsigned long flags; list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) { - list_del_init(&pfile->list); + list_del(&pfile->list); io_ring_file_put(ctx, pfile->file); kfree(pfile); } - spin_lock_irqsave(&file_data->lock, flags); - list_del_init(&ref_node->node); - spin_unlock_irqrestore(&file_data->lock, flags); + spin_lock(&file_data->lock); + list_del(&ref_node->node); + spin_unlock(&file_data->lock); percpu_ref_exit(&ref_node->refs); kfree(ref_node); @@ -6606,7 +6604,6 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, int fd, ret = 0; unsigned i; struct fixed_file_ref_node *ref_node; - unsigned long flags; if (ctx->file_data) return -EBUSY; @@ -6714,9 +6711,9 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, } ctx->file_data->cur_refs = &ref_node->refs; - spin_lock_irqsave(&ctx->file_data->lock, flags); + spin_lock(&ctx->file_data->lock); list_add(&ref_node->node, &ctx->file_data->ref_list); - spin_unlock_irqrestore(&ctx->file_data->lock, flags); + spin_unlock(&ctx->file_data->lock); percpu_ref_get(&ctx->file_data->refs); return ret; } @@ -6792,7 +6789,6 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, __s32 __user *fds; int fd, i, err; __u32 done; - unsigned long flags; bool needs_switch = false; if (check_add_overflow(up->offset, nr_args, &done)) @@ -6857,10 +6853,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, if (needs_switch) { percpu_ref_kill(data->cur_refs); - spin_lock_irqsave(&data->lock, flags); + spin_lock(&data->lock); list_add(&ref_node->node, &data->ref_list); data->cur_refs = &ref_node->refs; - spin_unlock_irqrestore(&data->lock, flags); + spin_unlock(&data->lock); percpu_ref_get(&ctx->file_data->refs); } else destroy_fixed_file_ref_node(ref_node); -- cgit v1.2.3 From 3bfa5bcb26f0b52d7ae8416aa0618fff21aceaaf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 17 May 2020 13:54:12 -0600 Subject: io_uring: cleanup io_poll_remove_one() logic We only need apoll in the one section, do the juggling with the work restoration there. This removes a special case further down as well. No functional changes in this patch. Signed-off-by: Jens Axboe --- fs/io_uring.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index c7622a5ece2d..7b529270d0d2 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4470,33 +4470,32 @@ static bool __io_poll_remove_one(struct io_kiocb *req, do_complete = true; } spin_unlock(&poll->head->lock); + hash_del(&req->hash_node); return do_complete; } static bool io_poll_remove_one(struct io_kiocb *req) { - struct async_poll *apoll = NULL; bool do_complete; if (req->opcode == IORING_OP_POLL_ADD) { io_poll_remove_double(req); do_complete = __io_poll_remove_one(req, &req->poll); } else { - apoll = req->apoll; + struct async_poll *apoll = req->apoll; + /* non-poll requests have submit ref still */ - do_complete = __io_poll_remove_one(req, &req->apoll->poll); - if (do_complete) + do_complete = __io_poll_remove_one(req, &apoll->poll); + if (do_complete) { io_put_req(req); - } - - hash_del(&req->hash_node); - - if (do_complete && apoll) { - /* - * restore ->work because we need to call io_req_work_drop_env. - */ - memcpy(&req->work, &apoll->work, sizeof(req->work)); - kfree(apoll); + /* + * restore ->work because we will call + * io_req_work_drop_env below when dropping the + * final reference. + */ + memcpy(&req->work, &apoll->work, sizeof(req->work)); + kfree(apoll); + } } if (do_complete) { -- cgit v1.2.3 From 0cdaf760f42eb8e8a714c1cc017423e5da6d4936 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 May 2020 14:13:40 +0300 Subject: io_uring: remove req->needs_fixed_files A submission is "async" IIF it's done by SQPOLL thread. Instead of passing @async flag into io_submit_sqes(), deduce it from ctx->flags. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 7b529270d0d2..2d54d3365925 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -626,7 +626,6 @@ struct io_kiocb { struct io_async_ctx *io; int cflags; - bool needs_fixed_file; u8 opcode; struct io_ring_ctx *ctx; @@ -891,6 +890,11 @@ EXPORT_SYMBOL(io_uring_get_socket); static void io_file_put_work(struct work_struct *work); +static inline bool io_async_submit(struct io_ring_ctx *ctx) +{ + return ctx->flags & IORING_SETUP_SQPOLL; +} + static void io_ring_ctx_ref_free(struct percpu_ref *ref) { struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); @@ -5484,7 +5488,7 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req, bool fixed; fixed = (req->flags & REQ_F_FIXED_FILE) != 0; - if (unlikely(!fixed && req->needs_fixed_file)) + if (unlikely(!fixed && io_async_submit(req->ctx))) return -EBADF; return io_file_get(state, req, fd, &req->file, fixed); @@ -5857,7 +5861,7 @@ static inline void io_consume_sqe(struct io_ring_ctx *ctx) static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_submit_state *state, bool async) + struct io_submit_state *state) { unsigned int sqe_flags; int id; @@ -5878,7 +5882,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, refcount_set(&req->refs, 2); req->task = NULL; req->result = 0; - req->needs_fixed_file = async; INIT_IO_WORK(&req->work, io_wq_submit_work); if (unlikely(req->opcode >= IORING_OP_LAST)) @@ -5919,7 +5922,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, } static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, - struct file *ring_file, int ring_fd, bool async) + struct file *ring_file, int ring_fd) { struct io_submit_state state, *statep = NULL; struct io_kiocb *link = NULL; @@ -5963,7 +5966,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, break; } - err = io_init_req(ctx, req, sqe, statep, async); + err = io_init_req(ctx, req, sqe, statep); io_consume_sqe(ctx); /* will complete beyond this point, count as submitted */ submitted++; @@ -5976,7 +5979,7 @@ fail_req: } trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, - true, async); + true, io_async_submit(ctx)); err = io_submit_sqe(req, sqe, &link); if (err) goto fail_req; @@ -6115,7 +6118,7 @@ static int io_sq_thread(void *data) } mutex_lock(&ctx->uring_lock); - ret = io_submit_sqes(ctx, to_submit, NULL, -1, true); + ret = io_submit_sqes(ctx, to_submit, NULL, -1); mutex_unlock(&ctx->uring_lock); timeout = jiffies + ctx->sq_thread_idle; } @@ -7623,7 +7626,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, submitted = to_submit; } else if (to_submit) { mutex_lock(&ctx->uring_lock); - submitted = io_submit_sqes(ctx, to_submit, f.file, fd, false); + submitted = io_submit_sqes(ctx, to_submit, f.file, fd); mutex_unlock(&ctx->uring_lock); if (submitted != to_submit) -- cgit v1.2.3 From 9f13c35b33fddb186beab9ef21c555a01e45f4d7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 May 2020 14:13:41 +0300 Subject: io_uring: rename io_file_put() io_file_put() deals with flushing state's file refs, adding "state" to its name makes it a bit clearer. Also, avoid double check of state->file in __io_file_get() in some cases. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 2d54d3365925..99f39d766cfc 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1999,15 +1999,19 @@ static void io_iopoll_req_issued(struct io_kiocb *req) wake_up(&ctx->sqo_wait); } -static void io_file_put(struct io_submit_state *state) +static void __io_state_file_put(struct io_submit_state *state) { - if (state->file) { - int diff = state->has_refs - state->used_refs; + int diff = state->has_refs - state->used_refs; - if (diff) - fput_many(state->file, diff); - state->file = NULL; - } + if (diff) + fput_many(state->file, diff); + state->file = NULL; +} + +static inline void io_state_file_put(struct io_submit_state *state) +{ + if (state->file) + __io_state_file_put(state); } /* @@ -2026,7 +2030,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd) state->ios_left--; return state->file; } - io_file_put(state); + __io_state_file_put(state); } state->file = fget_many(fd, state->ios_left); if (!state->file) @@ -5790,7 +5794,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, static void io_submit_state_end(struct io_submit_state *state) { blk_finish_plug(&state->plug); - io_file_put(state); + io_state_file_put(state); if (state->free_reqs) kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); } -- cgit v1.2.3 From c11368a57be460de889696f6ff8815fbcacf4db2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 May 2020 14:13:42 +0300 Subject: io_uring: don't repeat valid flag list req->flags stores all sqe->flags. After checking that sqe->flags are valid set if IOSQE* flags, no need to double check it, just forward them all. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 99f39d766cfc..c43340396f6d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5915,9 +5915,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, } /* same numerical values with corresponding REQ_F_*, safe to copy */ - req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK | - IOSQE_ASYNC | IOSQE_FIXED_FILE | - IOSQE_BUFFER_SELECT | IOSQE_IO_LINK); + req->flags |= sqe_flags; if (!io_op_defs[req->opcode].needs_file) return 0; -- cgit v1.2.3 From 9dafdfc2f0a3ae551711098de3d7b621a469f11a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 May 2020 14:18:05 +0300 Subject: splice: export do_tee() export do_tee() for use in io_uring Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/splice.c | 3 +-- include/linux/splice.h | 3 +++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/splice.c b/fs/splice.c index fd0a1e7e5959..a1dd54de24d8 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1754,8 +1754,7 @@ static int link_pipe(struct pipe_inode_info *ipipe, * The 'flags' used are the SPLICE_F_* variants, currently the only * applicable one is SPLICE_F_NONBLOCK. */ -static long do_tee(struct file *in, struct file *out, size_t len, - unsigned int flags) +long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe = get_pipe_info(in); struct pipe_inode_info *opipe = get_pipe_info(out); diff --git a/include/linux/splice.h b/include/linux/splice.h index ebbbfea48aa0..5c47013f708e 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -82,6 +82,9 @@ extern long do_splice(struct file *in, loff_t __user *off_in, struct file *out, loff_t __user *off_out, size_t len, unsigned int flags); +extern long do_tee(struct file *in, struct file *out, size_t len, + unsigned int flags); + /* * for dynamic pipe sizing */ -- cgit v1.2.3 From f2a8d5c7a218b9c24befb756c4eb30aa550ce822 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 May 2020 14:18:06 +0300 Subject: io_uring: add tee(2) support Add IORING_OP_TEE implementing tee(2) support. Almost identical to splice bits, but without offsets. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 62 ++++++++++++++++++++++++++++++++++++++++--- include/uapi/linux/io_uring.h | 1 + 2 files changed, 60 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index c43340396f6d..026350b9c33f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -853,6 +853,11 @@ static const struct io_op_def io_op_defs[] = { }, [IORING_OP_PROVIDE_BUFFERS] = {}, [IORING_OP_REMOVE_BUFFERS] = {}, + [IORING_OP_TEE] = { + .needs_file = 1, + .hash_reg_file = 1, + .unbound_nonreg_file = 1, + }, }; static void io_wq_submit_work(struct io_wq_work **workptr); @@ -2748,7 +2753,8 @@ out_free: return ret; } -static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int __io_splice_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) { struct io_splice* sp = &req->splice; unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL; @@ -2758,8 +2764,6 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; sp->file_in = NULL; - sp->off_in = READ_ONCE(sqe->splice_off_in); - sp->off_out = READ_ONCE(sqe->off); sp->len = READ_ONCE(sqe->len); sp->flags = READ_ONCE(sqe->splice_flags); @@ -2778,6 +2782,46 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } +static int io_tee_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off)) + return -EINVAL; + return __io_splice_prep(req, sqe); +} + +static int io_tee(struct io_kiocb *req, bool force_nonblock) +{ + struct io_splice *sp = &req->splice; + struct file *in = sp->file_in; + struct file *out = sp->file_out; + unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED; + long ret = 0; + + if (force_nonblock) + return -EAGAIN; + if (sp->len) + ret = do_tee(in, out, sp->len, flags); + + io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); + req->flags &= ~REQ_F_NEED_CLEANUP; + + io_cqring_add_event(req, ret); + if (ret != sp->len) + req_set_fail_links(req); + io_put_req(req); + return 0; +} + +static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_splice* sp = &req->splice; + + sp->off_in = READ_ONCE(sqe->splice_off_in); + sp->off_out = READ_ONCE(sqe->off); + return __io_splice_prep(req, sqe); +} + static int io_splice(struct io_kiocb *req, bool force_nonblock) { struct io_splice *sp = &req->splice; @@ -5085,6 +5129,9 @@ static int io_req_defer_prep(struct io_kiocb *req, case IORING_OP_REMOVE_BUFFERS: ret = io_remove_buffers_prep(req, sqe); break; + case IORING_OP_TEE: + ret = io_tee_prep(req, sqe); + break; default: printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", req->opcode); @@ -5158,6 +5205,7 @@ static void io_cleanup_req(struct io_kiocb *req) putname(req->open.filename); break; case IORING_OP_SPLICE: + case IORING_OP_TEE: io_put_file(req, req->splice.file_in, (req->splice.flags & SPLICE_F_FD_IN_FIXED)); break; @@ -5388,6 +5436,14 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, } ret = io_remove_buffers(req, force_nonblock); break; + case IORING_OP_TEE: + if (sqe) { + ret = io_tee_prep(req, sqe); + if (ret < 0) + break; + } + ret = io_tee(req, force_nonblock); + break; default: ret = -EINVAL; break; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 8c5775df08b8..92c22699a5a7 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -129,6 +129,7 @@ enum { IORING_OP_SPLICE, IORING_OP_PROVIDE_BUFFERS, IORING_OP_REMOVE_BUFFERS, + IORING_OP_TEE, /* this goes last, obviously */ IORING_OP_LAST, -- cgit v1.2.3 From 310672552f4aea2ad50704711aa3cdd45f5441e9 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 17 May 2020 17:43:31 -0600 Subject: io_uring: async task poll trigger cleanup If the request is still hashed in io_async_task_func(), then it cannot have been canceled and it's pointless to check. So save that check. Signed-off-by: Jens Axboe --- fs/io_uring.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 026350b9c33f..50f079417911 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4354,7 +4354,7 @@ static void io_async_task_func(struct callback_head *cb) struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); struct async_poll *apoll = req->apoll; struct io_ring_ctx *ctx = req->ctx; - bool canceled; + bool canceled = false; trace_io_uring_task_run(req->ctx, req->opcode, req->user_data); @@ -4363,34 +4363,33 @@ static void io_async_task_func(struct callback_head *cb) return; } - if (hash_hashed(&req->hash_node)) + /* If req is still hashed, it cannot have been canceled. Don't check. */ + if (hash_hashed(&req->hash_node)) { hash_del(&req->hash_node); - - canceled = READ_ONCE(apoll->poll.canceled); - if (canceled) { - io_cqring_fill_event(req, -ECANCELED); - io_commit_cqring(ctx); + } else { + canceled = READ_ONCE(apoll->poll.canceled); + if (canceled) { + io_cqring_fill_event(req, -ECANCELED); + io_commit_cqring(ctx); + } } spin_unlock_irq(&ctx->completion_lock); /* restore ->work in case we need to retry again */ memcpy(&req->work, &apoll->work, sizeof(req->work)); + kfree(apoll); - if (canceled) { - kfree(apoll); + if (!canceled) { + __set_current_state(TASK_RUNNING); + mutex_lock(&ctx->uring_lock); + __io_queue_sqe(req, NULL); + mutex_unlock(&ctx->uring_lock); + } else { io_cqring_ev_posted(ctx); req_set_fail_links(req); io_double_put_req(req); - return; } - - __set_current_state(TASK_RUNNING); - mutex_lock(&ctx->uring_lock); - __io_queue_sqe(req, NULL); - mutex_unlock(&ctx->uring_lock); - - kfree(apoll); } static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync, -- cgit v1.2.3 From 6b668c9b7fc6fc0c313cdaee8b75d17f4d954ab5 Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Wed, 20 May 2020 15:35:03 +0800 Subject: io_uring: don't submit sqes when ctx->refs is dying When IORING_SETUP_SQPOLL is enabled, io_ring_ctx_wait_and_kill() will wait for sq thread to idle by busy loop: while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait)) cond_resched(); Above loop isn't very CPU friendly, it may introduce a short cpu burst on the current cpu. If ctx->refs is dying, we forbid sq_thread from submitting any further SQEs. Instead they just get discarded when we exit. Signed-off-by: Xiaoguang Wang Signed-off-by: Jens Axboe --- fs/io_uring.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 50f079417911..0b51f21e5432 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6175,7 +6175,8 @@ static int io_sq_thread(void *data) } mutex_lock(&ctx->uring_lock); - ret = io_submit_sqes(ctx, to_submit, NULL, -1); + if (likely(!percpu_ref_is_dying(&ctx->refs))) + ret = io_submit_sqes(ctx, to_submit, NULL, -1); mutex_unlock(&ctx->uring_lock); timeout = jiffies + ctx->sq_thread_idle; } @@ -7465,16 +7466,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) percpu_ref_kill(&ctx->refs); mutex_unlock(&ctx->uring_lock); - /* - * Wait for sq thread to idle, if we have one. It won't spin on new - * work after we've killed the ctx ref above. This is important to do - * before we cancel existing commands, as the thread could otherwise - * be queueing new work post that. If that's work we need to cancel, - * it could cause shutdown to hang. - */ - while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait)) - cond_resched(); - io_kill_timeouts(ctx); io_poll_remove_all(ctx); -- cgit v1.2.3 From 4518a3cc273cf82efdd36522fb1f13baad173c70 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 26 May 2020 20:34:02 +0300 Subject: io_uring: fix flush req->refs underflow In io_uring_cancel_files(), after refcount_sub_and_test() leaves 0 req->refs, it calls io_put_req(), which would also put a ref. Call io_free_req() instead. Cc: stable@vger.kernel.org Fixes: 2ca10259b418 ("io_uring: prune request from overflow list on flush") Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 0b51f21e5432..37422fcdaa7f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7534,7 +7534,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, * all we had, then we're done with this request. */ if (refcount_sub_and_test(2, &cancel_req->refs)) { - io_put_req(cancel_req); + io_free_req(cancel_req); finish_wait(&ctx->inflight_wait, &wait); continue; } -- cgit v1.2.3 From 733f5c95e6fdabd05b8dfc15e04512809c9652c2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 26 May 2020 20:34:03 +0300 Subject: io_uring: simplify io_timeout locking Move spin_lock_irq() earlier to have only 1 call site of it in io_timeout(). It makes the flow easier. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 37422fcdaa7f..4be8f9eb71e4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4845,6 +4845,7 @@ static int io_timeout(struct io_kiocb *req) u32 seq = req->sequence; data = &req->io->timeout; + spin_lock_irq(&ctx->completion_lock); /* * sqe->off holds how many events that need to occur for this @@ -4853,7 +4854,6 @@ static int io_timeout(struct io_kiocb *req) */ if (!count) { req->flags |= REQ_F_TIMEOUT_NOSEQ; - spin_lock_irq(&ctx->completion_lock); entry = ctx->timeout_list.prev; goto add; } @@ -4864,7 +4864,6 @@ static int io_timeout(struct io_kiocb *req) * Insertion sort, ensuring the first entry in the list is always * the one we need first. */ - spin_lock_irq(&ctx->completion_lock); list_for_each_prev(entry, &ctx->timeout_list) { struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); unsigned nxt_seq; -- cgit v1.2.3 From 56080b02ed6e71fbc0add2d05a32ed7361dd736a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 26 May 2020 20:34:04 +0300 Subject: io_uring: don't re-read sqe->off in timeout_prep() SQEs are user writable, don't read sqe->off twice in io_timeout_prep() Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 4be8f9eb71e4..f888b20d0a68 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4803,18 +4803,19 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, { struct io_timeout_data *data; unsigned flags; + u32 off = READ_ONCE(sqe->off); if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (sqe->ioprio || sqe->buf_index || sqe->len != 1) return -EINVAL; - if (sqe->off && is_timeout_link) + if (off && is_timeout_link) return -EINVAL; flags = READ_ONCE(sqe->timeout_flags); if (flags & ~IORING_TIMEOUT_ABS) return -EINVAL; - req->timeout.count = READ_ONCE(sqe->off); + req->timeout.count = off; if (!req->io && io_alloc_async_ctx(req)) return -ENOMEM; -- cgit v1.2.3 From 0451894522108d6c72934aff6ef89023743a9ed4 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 26 May 2020 20:34:05 +0300 Subject: io_uring: separate DRAIN flushing into a cold path io_commit_cqring() assembly doesn't look good with extra code handling drained requests. IOSQE_IO_DRAIN is slow and discouraged to be used in a hot path, so try to minimise its impact by putting it into a helper and doing a fast check. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index f888b20d0a68..0d98a529a93e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -982,19 +982,6 @@ static inline bool req_need_defer(struct io_kiocb *req) return false; } -static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) -{ - struct io_kiocb *req; - - req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list); - if (req && !req_need_defer(req)) { - list_del_init(&req->list); - return req; - } - - return NULL; -} - static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) { struct io_kiocb *req; @@ -1127,6 +1114,19 @@ static void io_kill_timeouts(struct io_ring_ctx *ctx) spin_unlock_irq(&ctx->completion_lock); } +static void __io_queue_deferred(struct io_ring_ctx *ctx) +{ + do { + struct io_kiocb *req = list_first_entry(&ctx->defer_list, + struct io_kiocb, list); + + if (req_need_defer(req)) + break; + list_del_init(&req->list); + io_queue_async_work(req); + } while (!list_empty(&ctx->defer_list)); +} + static void io_commit_cqring(struct io_ring_ctx *ctx) { struct io_kiocb *req; @@ -1136,8 +1136,8 @@ static void io_commit_cqring(struct io_ring_ctx *ctx) __io_commit_cqring(ctx); - while ((req = io_get_deferred_req(ctx)) != NULL) - io_queue_async_work(req); + if (unlikely(!list_empty(&ctx->defer_list))) + __io_queue_deferred(ctx); } static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) -- cgit v1.2.3 From 0bf0eefdab52d9f9f3a1eeda32a4fc7afe4e9219 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 26 May 2020 20:34:06 +0300 Subject: io_uring: get rid of manual punting in io_close io_close() was punting async manually to skip grabbing files. Use REQ_F_NO_FILE_TABLE instead, and pass it through the generic path with -EAGAIN. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 0d98a529a93e..8c7a6e3e7669 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3492,25 +3492,15 @@ static int io_close(struct io_kiocb *req, bool force_nonblock) req->close.put_file = NULL; ret = __close_fd_get_file(req->close.fd, &req->close.put_file); - if (ret < 0) { - if (ret == -ENOENT) - ret = -EBADF; - return ret; - } + if (ret < 0) + return (ret == -ENOENT) ? -EBADF : ret; /* if the file has a flush method, be safe and punt to async */ if (req->close.put_file->f_op->flush && force_nonblock) { - /* submission ref will be dropped, take it for async */ - refcount_inc(&req->refs); - + /* avoid grabbing files - we don't need the files */ + req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT; req->work.func = io_close_finish; - /* - * Do manual async queue here to avoid grabbing files - we don't - * need the files, and it'll cause io_close_finish() to close - * the file again and cause a double CQE entry for this request - */ - io_queue_async_work(req); - return 0; + return -EAGAIN; } /* -- cgit v1.2.3 From 1d9e1288039a47dc1189c3c1fed5cf3c215e94b7 Mon Sep 17 00:00:00 2001 From: Bijan Mottahedeh Date: Fri, 22 May 2020 21:31:16 -0700 Subject: io_uring: add io_statx structure Separate statx data from open in io_kiocb. No functional changes. Signed-off-by: Bijan Mottahedeh Signed-off-by: Jens Axboe --- fs/io_uring.c | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 8c7a6e3e7669..2d86bd7b2787 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -425,11 +425,7 @@ struct io_sr_msg { struct io_open { struct file *file; int dfd; - union { - unsigned mask; - }; struct filename *filename; - struct statx __user *buffer; struct open_how how; unsigned long nofile; }; @@ -481,6 +477,15 @@ struct io_provide_buf { __u16 bid; }; +struct io_statx { + struct file *file; + int dfd; + unsigned int mask; + unsigned int flags; + struct filename *filename; + struct statx __user *buffer; +}; + struct io_async_connect { struct sockaddr_storage address; }; @@ -622,6 +627,7 @@ struct io_kiocb { struct io_epoll epoll; struct io_splice splice; struct io_provide_buf pbuf; + struct io_statx statx; }; struct io_async_ctx *io; @@ -3381,19 +3387,19 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (req->flags & REQ_F_NEED_CLEANUP) return 0; - req->open.dfd = READ_ONCE(sqe->fd); - req->open.mask = READ_ONCE(sqe->len); + req->statx.dfd = READ_ONCE(sqe->fd); + req->statx.mask = READ_ONCE(sqe->len); fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); - req->open.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); - req->open.how.flags = READ_ONCE(sqe->statx_flags); + req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); + req->statx.flags = READ_ONCE(sqe->statx_flags); - if (vfs_stat_set_lookup_flags(&lookup_flags, req->open.how.flags)) + if (vfs_stat_set_lookup_flags(&lookup_flags, req->statx.flags)) return -EINVAL; - req->open.filename = getname_flags(fname, lookup_flags, NULL); - if (IS_ERR(req->open.filename)) { - ret = PTR_ERR(req->open.filename); - req->open.filename = NULL; + req->statx.filename = getname_flags(fname, lookup_flags, NULL); + if (IS_ERR(req->statx.filename)) { + ret = PTR_ERR(req->statx.filename); + req->statx.filename = NULL; return ret; } @@ -3403,7 +3409,7 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_statx(struct io_kiocb *req, bool force_nonblock) { - struct io_open *ctx = &req->open; + struct io_statx *ctx = &req->statx; unsigned lookup_flags; struct path path; struct kstat stat; @@ -3416,7 +3422,7 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock) return -EAGAIN; } - if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags)) + if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->flags)) return -EINVAL; retry: @@ -3428,7 +3434,7 @@ retry: if (ret) goto err; - ret = vfs_getattr(&path, &stat, ctx->mask, ctx->how.flags); + ret = vfs_getattr(&path, &stat, ctx->mask, ctx->flags); path_put(&path); if (retry_estale(ret, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; -- cgit v1.2.3 From 0018784fc84f636d473a0d2a65a34f9d01893c0a Mon Sep 17 00:00:00 2001 From: Bijan Mottahedeh Date: Fri, 22 May 2020 21:31:17 -0700 Subject: statx: allow system call to be invoked from io_uring This is a prepatory patch to allow io_uring to invoke statx directly. Signed-off-by: Bijan Mottahedeh Signed-off-by: Jens Axboe --- fs/internal.h | 2 ++ fs/stat.c | 32 +++++++++++++++++++------------- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/fs/internal.h b/fs/internal.h index aa5d45524e87..88a9793f96a1 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -188,3 +188,5 @@ int sb_init_dio_done_wq(struct super_block *sb); */ unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, int flags); int cp_statx(const struct kstat *stat, struct statx __user *buffer); +int do_statx(int dfd, const char __user *filename, unsigned flags, + unsigned int mask, struct statx __user *buffer); diff --git a/fs/stat.c b/fs/stat.c index 030008796479..65cf51fda2aa 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -567,6 +567,24 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer) return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; } +int do_statx(int dfd, const char __user *filename, unsigned flags, + unsigned int mask, struct statx __user *buffer) +{ + struct kstat stat; + int error; + + if (mask & STATX__RESERVED) + return -EINVAL; + if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) + return -EINVAL; + + error = vfs_statx(dfd, filename, flags, &stat, mask); + if (error) + return error; + + return cp_statx(&stat, buffer); +} + /** * sys_statx - System call to get enhanced stats * @dfd: Base directory to pathwalk from *or* fd to stat. @@ -583,19 +601,7 @@ SYSCALL_DEFINE5(statx, unsigned int, mask, struct statx __user *, buffer) { - struct kstat stat; - int error; - - if (mask & STATX__RESERVED) - return -EINVAL; - if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) - return -EINVAL; - - error = vfs_statx(dfd, filename, flags, &stat, mask); - if (error) - return error; - - return cp_statx(&stat, buffer); + return do_statx(dfd, filename, flags, mask, buffer); } #ifdef CONFIG_COMPAT -- cgit v1.2.3 From e62753e4e2926f249d088cc0517be5ed4efec6d6 Mon Sep 17 00:00:00 2001 From: Bijan Mottahedeh Date: Fri, 22 May 2020 21:31:18 -0700 Subject: io_uring: call statx directly Calling statx directly both simplifies the interface and avoids potential incompatibilities between sync and async invokations. Signed-off-by: Bijan Mottahedeh Signed-off-by: Jens Axboe --- fs/io_uring.c | 50 ++++---------------------------------------------- 1 file changed, 4 insertions(+), 46 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 2d86bd7b2787..de6547e68626 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -482,7 +482,7 @@ struct io_statx { int dfd; unsigned int mask; unsigned int flags; - struct filename *filename; + const char __user *filename; struct statx __user *buffer; }; @@ -3376,43 +3376,23 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock) static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - const char __user *fname; - unsigned lookup_flags; - int ret; - if (sqe->ioprio || sqe->buf_index) return -EINVAL; if (req->flags & REQ_F_FIXED_FILE) return -EBADF; - if (req->flags & REQ_F_NEED_CLEANUP) - return 0; req->statx.dfd = READ_ONCE(sqe->fd); req->statx.mask = READ_ONCE(sqe->len); - fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); + req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr)); req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); req->statx.flags = READ_ONCE(sqe->statx_flags); - if (vfs_stat_set_lookup_flags(&lookup_flags, req->statx.flags)) - return -EINVAL; - - req->statx.filename = getname_flags(fname, lookup_flags, NULL); - if (IS_ERR(req->statx.filename)) { - ret = PTR_ERR(req->statx.filename); - req->statx.filename = NULL; - return ret; - } - - req->flags |= REQ_F_NEED_CLEANUP; return 0; } static int io_statx(struct io_kiocb *req, bool force_nonblock) { struct io_statx *ctx = &req->statx; - unsigned lookup_flags; - struct path path; - struct kstat stat; int ret; if (force_nonblock) { @@ -3422,29 +3402,9 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock) return -EAGAIN; } - if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->flags)) - return -EINVAL; - -retry: - /* filename_lookup() drops it, keep a reference */ - ctx->filename->refcnt++; - - ret = filename_lookup(ctx->dfd, ctx->filename, lookup_flags, &path, - NULL); - if (ret) - goto err; + ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask, + ctx->buffer); - ret = vfs_getattr(&path, &stat, ctx->mask, ctx->flags); - path_put(&path); - if (retry_estale(ret, lookup_flags)) { - lookup_flags |= LOOKUP_REVAL; - goto retry; - } - if (!ret) - ret = cp_statx(&stat, ctx->buffer); -err: - putname(ctx->filename); - req->flags &= ~REQ_F_NEED_CLEANUP; if (ret < 0) req_set_fail_links(req); io_cqring_add_event(req, ret); @@ -5196,8 +5156,6 @@ static void io_cleanup_req(struct io_kiocb *req) break; case IORING_OP_OPENAT: case IORING_OP_OPENAT2: - case IORING_OP_STATX: - putname(req->open.filename); break; case IORING_OP_SPLICE: case IORING_OP_TEE: -- cgit v1.2.3 From 6f88cc176a3358c54bb6c38c8afee3f3a42faf54 Mon Sep 17 00:00:00 2001 From: Bijan Mottahedeh Date: Fri, 22 May 2020 21:31:19 -0700 Subject: statx: hide interfaces no longer used by io_uring The io_uring interfaces have been replaced by do_statx() and are no longer needed. Signed-off-by: Bijan Mottahedeh Signed-off-by: Jens Axboe --- fs/internal.h | 2 -- fs/stat.c | 5 +++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/internal.h b/fs/internal.h index 88a9793f96a1..cf2043c8ac3d 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -186,7 +186,5 @@ int sb_init_dio_done_wq(struct super_block *sb); /* * fs/stat.c: */ -unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, int flags); -int cp_statx(const struct kstat *stat, struct statx __user *buffer); int do_statx(int dfd, const char __user *filename, unsigned flags, unsigned int mask, struct statx __user *buffer); diff --git a/fs/stat.c b/fs/stat.c index 65cf51fda2aa..1b509d87265b 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -152,7 +152,8 @@ int vfs_statx_fd(unsigned int fd, struct kstat *stat, } EXPORT_SYMBOL(vfs_statx_fd); -inline unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, int flags) +static inline unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, + int flags) { if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0) @@ -533,7 +534,7 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, } #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ -noinline_for_stack int +static noinline_for_stack int cp_statx(const struct kstat *stat, struct statx __user *buffer) { struct statx tmp; -- cgit v1.2.3 From 360428f8c0cd857006a8a3f515946285370489ac Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 30 May 2020 14:54:17 +0300 Subject: io_uring: move timeouts flushing to a helper Separate flushing offset timeouts io_commit_cqring() by moving it into a helper. Just a preparation, makes following patches clearer. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index de6547e68626..ffd0ec7a5a7b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -988,23 +988,6 @@ static inline bool req_need_defer(struct io_kiocb *req) return false; } -static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) -{ - struct io_kiocb *req; - - req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list); - if (req) { - if (req->flags & REQ_F_TIMEOUT_NOSEQ) - return NULL; - if (!__req_need_defer(req)) { - list_del_init(&req->list); - return req; - } - } - - return NULL; -} - static void __io_commit_cqring(struct io_ring_ctx *ctx) { struct io_rings *rings = ctx->rings; @@ -1133,13 +1116,24 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx) } while (!list_empty(&ctx->defer_list)); } -static void io_commit_cqring(struct io_ring_ctx *ctx) +static void io_flush_timeouts(struct io_ring_ctx *ctx) { - struct io_kiocb *req; + while (!list_empty(&ctx->timeout_list)) { + struct io_kiocb *req = list_first_entry(&ctx->timeout_list, + struct io_kiocb, list); - while ((req = io_get_timeout_req(ctx)) != NULL) + if (req->flags & REQ_F_TIMEOUT_NOSEQ) + break; + if (__req_need_defer(req)) + break; + list_del_init(&req->list); io_kill_timeout(req); + } +} +static void io_commit_cqring(struct io_ring_ctx *ctx) +{ + io_flush_timeouts(ctx); __io_commit_cqring(ctx); if (unlikely(!list_empty(&ctx->defer_list))) -- cgit v1.2.3 From bfe68a221905de37e65394a6d58c1e5f3e545d2f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 30 May 2020 14:54:18 +0300 Subject: io_uring: off timeouts based only on completions Offset timeouts wait not for sqe->off non-timeout CQEs, but rather sqe->off + number of prior inflight requests. Wait exactly for sqe->off non-timeout completions Reported-by: Jens Axboe Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 65 +++++++++++++---------------------------------------------- 1 file changed, 14 insertions(+), 51 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index ffd0ec7a5a7b..9f11feb8a3ac 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -394,7 +394,8 @@ struct io_timeout { struct file *file; u64 addr; int flags; - u32 count; + u32 off; + u32 target_seq; }; struct io_rw { @@ -1124,8 +1125,10 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) if (req->flags & REQ_F_TIMEOUT_NOSEQ) break; - if (__req_need_defer(req)) + if (req->timeout.target_seq != ctx->cached_cq_tail + - atomic_read(&ctx->cq_timeouts)) break; + list_del_init(&req->list); io_kill_timeout(req); } @@ -4660,20 +4663,8 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) * We could be racing with timeout deletion. If the list is empty, * then timeout lookup already found it and will be handling it. */ - if (!list_empty(&req->list)) { - struct io_kiocb *prev; - - /* - * Adjust the reqs sequence before the current one because it - * will consume a slot in the cq_ring and the cq_tail - * pointer will be increased, otherwise other timeout reqs may - * return in advance without waiting for enough wait_nr. - */ - prev = req; - list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list) - prev->sequence++; + if (!list_empty(&req->list)) list_del_init(&req->list); - } io_cqring_fill_event(req, -ETIME); io_commit_cqring(ctx); @@ -4765,7 +4756,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (flags & ~IORING_TIMEOUT_ABS) return -EINVAL; - req->timeout.count = off; + req->timeout.off = off; if (!req->io && io_alloc_async_ctx(req)) return -ENOMEM; @@ -4789,13 +4780,10 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, static int io_timeout(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - struct io_timeout_data *data; + struct io_timeout_data *data = &req->io->timeout; struct list_head *entry; - unsigned span = 0; - u32 count = req->timeout.count; - u32 seq = req->sequence; + u32 tail, off = req->timeout.off; - data = &req->io->timeout; spin_lock_irq(&ctx->completion_lock); /* @@ -4803,13 +4791,14 @@ static int io_timeout(struct io_kiocb *req) * timeout event to be satisfied. If it isn't set, then this is * a pure timeout request, sequence isn't used. */ - if (!count) { + if (!off) { req->flags |= REQ_F_TIMEOUT_NOSEQ; entry = ctx->timeout_list.prev; goto add; } - req->sequence = seq + count; + tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); + req->timeout.target_seq = tail + off; /* * Insertion sort, ensuring the first entry in the list is always @@ -4817,39 +4806,13 @@ static int io_timeout(struct io_kiocb *req) */ list_for_each_prev(entry, &ctx->timeout_list) { struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); - unsigned nxt_seq; - long long tmp, tmp_nxt; - u32 nxt_offset = nxt->timeout.count; if (nxt->flags & REQ_F_TIMEOUT_NOSEQ) continue; - - /* - * Since seq + count can overflow, use type long - * long to store it. - */ - tmp = (long long)seq + count; - nxt_seq = nxt->sequence - nxt_offset; - tmp_nxt = (long long)nxt_seq + nxt_offset; - - /* - * cached_sq_head may overflow, and it will never overflow twice - * once there is some timeout req still be valid. - */ - if (seq < nxt_seq) - tmp += UINT_MAX; - - if (tmp > tmp_nxt) + /* nxt.seq is behind @tail, otherwise would've been completed */ + if (off >= nxt->timeout.target_seq - tail) break; - - /* - * Sequence of reqs after the insert one and itself should - * be adjusted because each timeout req consumes a slot. - */ - span++; - nxt->sequence++; } - req->sequence -= span; add: list_add(&req->list, entry); data->timer.function = io_timeout_fn; -- cgit v1.2.3 From 7b53d59859bc932b37895d2d37388e7fa29af7a5 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 30 May 2020 14:19:15 +0300 Subject: io_uring: fix overflowed reqs cancellation Overflowed requests in io_uring_cancel_files() should be shed only of inflight and overflowed refs. All other left references are owned by someone else. If refcount_sub_and_test() fails, it will go further and put put extra ref, don't do that. Also, don't need to do io_wq_cancel_work() for overflowed reqs, they will be let go shortly anyway. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 9f11feb8a3ac..732ec73ec3c0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7449,10 +7449,11 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, finish_wait(&ctx->inflight_wait, &wait); continue; } + } else { + io_wq_cancel_work(ctx->io_wq, &cancel_req->work); + io_put_req(cancel_req); } - io_wq_cancel_work(ctx->io_wq, &cancel_req->work); - io_put_req(cancel_req); schedule(); finish_wait(&ctx->inflight_wait, &wait); } -- cgit v1.2.3