lkml.org 
[lkml]   [2020]   [Dec]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.10 506/717] io_uring: fix racy IOPOLL flush overflow
    Date
    From: Pavel Begunkov <asml.silence@gmail.com>

    [ Upstream commit 634578f800652035debba3098d8ab0d21af7c7a5 ]

    It's not safe to call io_cqring_overflow_flush() for IOPOLL mode without
    hodling uring_lock, because it does synchronisation differently. Make
    sure we have it.

    As for io_ring_exit_work(), we don't even need it there because
    io_ring_ctx_wait_and_kill() already set force flag making all overflowed
    requests to be dropped.

    Cc: <stable@vger.kernel.org> # 5.5+
    Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
    Signed-off-by: Jens Axboe <axboe@kernel.dk>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    fs/io_uring.c | 10 ++++++----
    1 file changed, 6 insertions(+), 4 deletions(-)

    diff --git a/fs/io_uring.c b/fs/io_uring.c
    index 0621f581943cd..b9d3209a5f9de 100644
    --- a/fs/io_uring.c
    +++ b/fs/io_uring.c
    @@ -8369,8 +8369,6 @@ static void io_ring_exit_work(struct work_struct *work)
    * as nobody else will be looking for them.
    */
    do {
    - if (ctx->rings)
    - io_cqring_overflow_flush(ctx, true, NULL, NULL);
    io_iopoll_try_reap_events(ctx);
    } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
    io_ring_ctx_free(ctx);
    @@ -8380,6 +8378,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
    {
    mutex_lock(&ctx->uring_lock);
    percpu_ref_kill(&ctx->refs);
    + if (ctx->rings)
    + io_cqring_overflow_flush(ctx, true, NULL, NULL);
    mutex_unlock(&ctx->uring_lock);

    io_kill_timeouts(ctx, NULL);
    @@ -8389,8 +8389,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
    io_wq_cancel_all(ctx->io_wq);

    /* if we failed setting up the ctx, we might not have any rings */
    - if (ctx->rings)
    - io_cqring_overflow_flush(ctx, true, NULL, NULL);
    io_iopoll_try_reap_events(ctx);
    idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);

    @@ -8654,7 +8652,9 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
    }

    io_cancel_defer_files(ctx, task, files);
    + io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
    io_cqring_overflow_flush(ctx, true, task, files);
    + io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));

    while (__io_uring_cancel_task_requests(ctx, task, files)) {
    io_run_task_work();
    @@ -8956,8 +8956,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
    */
    ret = 0;
    if (ctx->flags & IORING_SETUP_SQPOLL) {
    + io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
    if (!list_empty_careful(&ctx->cq_overflow_list))
    io_cqring_overflow_flush(ctx, false, NULL, NULL);
    + io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
    if (flags & IORING_ENTER_SQ_WAKEUP)
    wake_up(&ctx->sq_data->wait);
    if (flags & IORING_ENTER_SQ_WAIT)
    --
    2.27.0


    \
     
     \ /
      Last update: 2020-12-28 15:24    [W:2.536 / U:0.240 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site