summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-02-10 00:03:23 +0000
committerJens Axboe <axboe@kernel.dk>2021-02-10 07:28:43 -0700
commite5d1bc0a91f16959aa279aa3ee9fdc246d4bb382 (patch)
treeda1be612b0d777fa7f9ee84ef880c3a81728f035 /fs
parentc5eef2b9449ba267f53bfa7cf63d2bc93acbee32 (diff)
io_uring: defer flushing cached reqs
Awhile there are requests in the allocation cache -- use them, only if those ended go for the stashed memory in comp.free_list. As list manipulation are generally heavy and are not good for caches, flush them all or as much as can in one go. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> [axboe: return success/failure from io_flush_cached_reqs()] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c28
1 files changed, 19 insertions, 9 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index bff5bc4a2b6e..4a28032ba35b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1952,25 +1952,35 @@ static inline void io_req_complete(struct io_kiocb *req, long res)
__io_req_complete(req, 0, res, 0);
}
-static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
+static bool io_flush_cached_reqs(struct io_submit_state *state)
{
- struct io_submit_state *state = &ctx->submit_state;
-
- BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
-
- if (!list_empty(&state->comp.free_list)) {
- struct io_kiocb *req;
+ struct io_kiocb *req = NULL;
+ while (!list_empty(&state->comp.free_list)) {
req = list_first_entry(&state->comp.free_list, struct io_kiocb,
compl.list);
list_del(&req->compl.list);
- return req;
+ state->reqs[state->free_reqs++] = req;
+ if (state->free_reqs == ARRAY_SIZE(state->reqs))
+ break;
}
+ return req != NULL;
+}
+
+static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
+{
+ struct io_submit_state *state = &ctx->submit_state;
+
+ BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
+
if (!state->free_reqs) {
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
int ret;
+ if (io_flush_cached_reqs(state))
+ goto got_req;
+
ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
state->reqs);
@@ -1986,7 +1996,7 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
}
state->free_reqs = ret;
}
-
+got_req:
state->free_reqs--;
return state->reqs[state->free_reqs];
}