lkml.org 
[lkml]   [2020]   [Jan]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 2/8] io_uring: always pass non-null io_submit_state
Date
There is more harm than merit from conditionally passing
io_submit_state. Always pass non-null pointer. It shouldn't affect
performance, but even if so the gap will be closed by the following
commits. Also, in prepartion move plugging out of it.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
fs/io_uring.c | 33 ++++++++++++---------------------
1 file changed, 12 insertions(+), 21 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index c7b38e5f72a1..63a14002e395 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -577,8 +577,6 @@ struct io_kiocb {
#define IO_IOPOLL_BATCH 8

struct io_submit_state {
- struct blk_plug plug;
-
/*
* io_kiocb alloc cache
*/
@@ -1126,11 +1124,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct io_kiocb *req;

- if (!state) {
- req = kmem_cache_alloc(req_cachep, gfp);
- if (unlikely(!req))
- goto fallback;
- } else if (!state->free_reqs) {
+ if (!state->free_reqs) {
size_t sz;
int ret;

@@ -1771,9 +1765,6 @@ static void io_file_put(struct io_submit_state *state)
*/
static struct file *io_file_get(struct io_submit_state *state, int fd)
{
- if (!state)
- return fget(fd);
-
if (state->file) {
if (state->fd == fd) {
state->used_refs++;
@@ -4757,7 +4748,6 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
*/
static void io_submit_state_end(struct io_submit_state *state)
{
- blk_finish_plug(&state->plug);
io_file_put(state);
if (state->free_reqs)
kmem_cache_free_bulk(req_cachep, state->free_reqs,
@@ -4770,7 +4760,6 @@ static void io_submit_state_end(struct io_submit_state *state)
static void io_submit_state_start(struct io_submit_state *state,
unsigned int max_ios)
{
- blk_start_plug(&state->plug);
state->free_reqs = 0;
state->file = NULL;
state->ios_left = max_ios;
@@ -4836,7 +4825,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
struct file *ring_file, int ring_fd,
struct mm_struct **mm, bool async)
{
- struct io_submit_state state, *statep = NULL;
+ struct blk_plug plug;
+ struct io_submit_state state;
struct io_kiocb *link = NULL;
int i, submitted = 0;
bool mm_fault = false;
@@ -4854,10 +4844,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
if (!percpu_ref_tryget_many(&ctx->refs, nr))
return -EAGAIN;

- if (nr > IO_PLUG_THRESHOLD) {
- io_submit_state_start(&state, nr);
- statep = &state;
- }
+ io_submit_state_start(&state, nr);
+ if (nr > IO_PLUG_THRESHOLD)
+ blk_start_plug(&plug);

ctx->ring_fd = ring_fd;
ctx->ring_file = ring_file;
@@ -4866,7 +4855,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
const struct io_uring_sqe *sqe;
struct io_kiocb *req;

- req = io_get_req(ctx, statep);
+ req = io_get_req(ctx, &state);
if (unlikely(!req)) {
if (!submitted)
submitted = -EAGAIN;
@@ -4899,7 +4888,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
req->needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
true, async);
- if (!io_submit_sqe(req, sqe, statep, &link))
+ if (!io_submit_sqe(req, sqe, &state, &link))
break;
}

@@ -4907,8 +4896,10 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
percpu_ref_put_many(&ctx->refs, nr - submitted);
if (link)
io_queue_link_head(link);
- if (statep)
- io_submit_state_end(&state);
+
+ io_submit_state_end(&state);
+ if (nr > IO_PLUG_THRESHOLD)
+ blk_finish_plug(&plug);

/* Commit SQ ring head once we've consumed and submitted all SQEs */
io_commit_sqring(ctx);
--
2.24.0
\
 
 \ /
  Last update: 2020-01-24 22:42    [W:1.715 / U:0.008 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site