lkml.org 
[lkml]   [2021]   [Nov]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.14 811/849] io-wq: serialize hash clear with wakeup
    Date
    From: Jens Axboe <axboe@kernel.dk>

    commit d3e3c102d107bb84251455a298cf475f24bab995 upstream.

    We need to ensure that we serialize the stalled and hash bits with the
    wait_queue wait handler, or we could be racing with someone modifying
    the hashed state after we find it busy, but before we then give up and
    wait for it to be cleared. This can cause random delays or stalls when
    handling buffered writes for many files, where some of these files cause
    hash collisions between the worker threads.

    Cc: stable@vger.kernel.org
    Reported-by: Daniel Black <daniel@mariadb.org>
    Fixes: e941894eae31 ("io-wq: make buffered file write hashed work map per-ctx")
    Signed-off-by: Jens Axboe <axboe@kernel.dk>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    fs/io-wq.c | 19 ++++++++++++++++---
    1 file changed, 16 insertions(+), 3 deletions(-)

    --- a/fs/io-wq.c
    +++ b/fs/io-wq.c
    @@ -401,9 +401,10 @@ static inline unsigned int io_get_work_h
    return work->flags >> IO_WQ_HASH_SHIFT;
    }

    -static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
    +static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
    {
    struct io_wq *wq = wqe->wq;
    + bool ret = false;

    spin_lock_irq(&wq->hash->wait.lock);
    if (list_empty(&wqe->wait.entry)) {
    @@ -411,9 +412,11 @@ static void io_wait_on_hash(struct io_wq
    if (!test_bit(hash, &wq->hash->map)) {
    __set_current_state(TASK_RUNNING);
    list_del_init(&wqe->wait.entry);
    + ret = true;
    }
    }
    spin_unlock_irq(&wq->hash->wait.lock);
    + return ret;
    }

    /*
    @@ -474,14 +477,21 @@ static struct io_wq_work *io_get_next_wo
    }

    if (stall_hash != -1U) {
    + bool unstalled;
    +
    /*
    * Set this before dropping the lock to avoid racing with new
    * work being added and clearing the stalled bit.
    */
    wqe->flags |= IO_WQE_FLAG_STALLED;
    raw_spin_unlock(&wqe->lock);
    - io_wait_on_hash(wqe, stall_hash);
    + unstalled = io_wait_on_hash(wqe, stall_hash);
    raw_spin_lock(&wqe->lock);
    + if (unstalled) {
    + wqe->flags &= ~IO_WQE_FLAG_STALLED;
    + if (wq_has_sleeper(&wqe->wq->hash->wait))
    + wake_up(&wqe->wq->hash->wait);
    + }
    }

    return NULL;
    @@ -562,11 +572,14 @@ get_next:
    io_wqe_enqueue(wqe, linked);

    if (hash != -1U && !next_hashed) {
    + /* serialize hash clear with wake_up() */
    + spin_lock_irq(&wq->hash->wait.lock);
    clear_bit(hash, &wq->hash->map);
    + wqe->flags &= ~IO_WQE_FLAG_STALLED;
    + spin_unlock_irq(&wq->hash->wait.lock);
    if (wq_has_sleeper(&wq->hash->wait))
    wake_up(&wq->hash->wait);
    raw_spin_lock_irq(&wqe->lock);
    - wqe->flags &= ~IO_WQE_FLAG_STALLED;
    /* skip unnecessary unlock-lock wqe->lock */
    if (!work)
    goto get_next;

    \
     
     \ /
      Last update: 2021-11-16 00:25    [W:4.050 / U:0.064 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site