lkml.org 
[lkml]   [2022]   [Feb]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v5 4/4] io_uring: remove ring quiesce for io_uring_register
    Date
    Ring quiesce is currently only used for 2 opcodes
    IORING_REGISTER_ENABLE_RINGS and IORING_REGISTER_RESTRICTIONS.
    IORING_SETUP_R_DISABLED prevents submitting requests and
    so there will be no requests until IORING_REGISTER_ENABLE_RINGS
    is called. And IORING_REGISTER_RESTRICTIONS works only before
    IORING_REGISTER_ENABLE_RINGS is called. Hence ring quiesce is
    not needed for these opcodes and therefore io_uring_register.

    Signed-off-by: Usama Arif <usama.arif@bytedance.com>
    ---
    fs/io_uring.c | 69 ---------------------------------------------------
    1 file changed, 69 deletions(-)

    diff --git a/fs/io_uring.c b/fs/io_uring.c
    index 5ae51ea12f0f..89e4dd7e8995 100644
    --- a/fs/io_uring.c
    +++ b/fs/io_uring.c
    @@ -11022,64 +11022,6 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
    return ret;
    }

    -static bool io_register_op_must_quiesce(int op)
    -{
    - switch (op) {
    - case IORING_REGISTER_BUFFERS:
    - case IORING_UNREGISTER_BUFFERS:
    - case IORING_REGISTER_FILES:
    - case IORING_UNREGISTER_FILES:
    - case IORING_REGISTER_FILES_UPDATE:
    - case IORING_REGISTER_EVENTFD:
    - case IORING_REGISTER_EVENTFD_ASYNC:
    - case IORING_UNREGISTER_EVENTFD:
    - case IORING_REGISTER_PROBE:
    - case IORING_REGISTER_PERSONALITY:
    - case IORING_UNREGISTER_PERSONALITY:
    - case IORING_REGISTER_FILES2:
    - case IORING_REGISTER_FILES_UPDATE2:
    - case IORING_REGISTER_BUFFERS2:
    - case IORING_REGISTER_BUFFERS_UPDATE:
    - case IORING_REGISTER_IOWQ_AFF:
    - case IORING_UNREGISTER_IOWQ_AFF:
    - case IORING_REGISTER_IOWQ_MAX_WORKERS:
    - return false;
    - default:
    - return true;
    - }
    -}
    -
    -static __cold int io_ctx_quiesce(struct io_ring_ctx *ctx)
    -{
    - long ret;
    -
    - percpu_ref_kill(&ctx->refs);
    -
    - /*
    - * Drop uring mutex before waiting for references to exit. If another
    - * thread is currently inside io_uring_enter() it might need to grab the
    - * uring_lock to make progress. If we hold it here across the drain
    - * wait, then we can deadlock. It's safe to drop the mutex here, since
    - * no new references will come in after we've killed the percpu ref.
    - */
    - mutex_unlock(&ctx->uring_lock);
    - do {
    - ret = wait_for_completion_interruptible_timeout(&ctx->ref_comp, HZ);
    - if (ret) {
    - ret = min(0L, ret);
    - break;
    - }
    -
    - ret = io_run_task_work_sig();
    - io_req_caches_free(ctx);
    - } while (ret >= 0);
    - mutex_lock(&ctx->uring_lock);
    -
    - if (ret)
    - io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
    - return ret;
    -}
    -
    static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
    void __user *arg, unsigned nr_args)
    __releases(ctx->uring_lock)
    @@ -11103,12 +11045,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
    return -EACCES;
    }

    - if (io_register_op_must_quiesce(opcode)) {
    - ret = io_ctx_quiesce(ctx);
    - if (ret)
    - return ret;
    - }
    -
    switch (opcode) {
    case IORING_REGISTER_BUFFERS:
    ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
    @@ -11213,11 +11149,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
    break;
    }

    - if (io_register_op_must_quiesce(opcode)) {
    - /* bring the ctx back to life */
    - percpu_ref_reinit(&ctx->refs);
    - reinit_completion(&ctx->ref_comp);
    - }
    return ret;
    }

    --
    2.25.1
    \
     
     \ /
      Last update: 2022-02-04 00:35    [W:3.154 / U:0.148 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site