diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-06-15 16:47:56 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-06-15 15:44:33 -0600 |
commit | 10c669040e9b3538e1732c8d40729636b17ce9dd (patch) | |
tree | fdea8650df3f3cf72e32be4a4dbd3e58cfb6b80c /fs | |
parent | 27f6b318dea2d7ccccc9dca416e59431838c2929 (diff) |
io_uring: switch !DRAIN fast path when possible
->drain_used is one way, which is not optimal if users use DRAIN but
very rarely. However, we can just clear it in io_drain_req() when all
drained before requests are gone. Also rename the flag to reflect the
change and be more clear about it.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/7f37a240857546a94df6348507edddacab150460.1623772051.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 6292b8da0a75..25106cf7e57c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -352,7 +352,7 @@ struct io_ring_ctx { unsigned int eventfd_async: 1; unsigned int restricted: 1; unsigned int off_timeout_used: 1; - unsigned int drain_used: 1; + unsigned int drain_active: 1; } ____cacheline_aligned_in_smp; /* submission data */ @@ -1346,10 +1346,10 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) static void io_commit_cqring(struct io_ring_ctx *ctx) { - if (unlikely(ctx->off_timeout_used || ctx->drain_used)) { + if (unlikely(ctx->off_timeout_used || ctx->drain_active)) { if (ctx->off_timeout_used) io_flush_timeouts(ctx); - if (ctx->drain_used) + if (ctx->drain_active) io_queue_deferred(ctx); } /* order cqe stores with ring update */ @@ -6004,8 +6004,10 @@ static bool io_drain_req(struct io_kiocb *req) /* Still need defer if there is pending req in defer list. */ if (likely(list_empty_careful(&ctx->defer_list) && - !(req->flags & REQ_F_IO_DRAIN))) + !(req->flags & REQ_F_IO_DRAIN))) { + ctx->drain_active = false; return false; + } seq = io_get_sequence(req); /* Still a chance to pass the sequence check */ @@ -6446,7 +6448,7 @@ static void __io_queue_sqe(struct io_kiocb *req) static inline void io_queue_sqe(struct io_kiocb *req) { - if (unlikely(req->ctx->drain_used) && io_drain_req(req)) + if (unlikely(req->ctx->drain_active) && io_drain_req(req)) return; if (likely(!(req->flags & REQ_F_FORCE_ASYNC))) { @@ -6572,7 +6574,7 @@ fail_req: } if (unlikely(req->flags & REQ_F_IO_DRAIN)) { - ctx->drain_used = true; + ctx->drain_active = true; /* * Taking sequential execution of a link, draining both sides |