lkml.org 
[lkml]   [2020]   [Nov]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.4 070/151] nvme-rdma: avoid race between time out and tear down
    Date
    From: Chao Leng <lengchao@huawei.com>

    [ Upstream commit 3017013dcc82a4862bd1e140f8b762cfc594008d ]

    Now use teardown_lock to serialize for time out and tear down. This may
    cause abnormal: first cancel all request in tear down, then time out may
    complete the request again, but the request may already be freed or
    restarted.

    To avoid race between time out and tear down, in tear down process,
    first we quiesce the queue, and then delete the timer and cancel
    the time out work for the queue. At the same time we need to delete
    teardown_lock.

    Signed-off-by: Chao Leng <lengchao@huawei.com>
    Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
    Signed-off-by: Christoph Hellwig <hch@lst.de>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    drivers/nvme/host/rdma.c | 12 ++----------
    1 file changed, 2 insertions(+), 10 deletions(-)

    diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
    index e957ad0a07f58..cfd437f7750e1 100644
    --- a/drivers/nvme/host/rdma.c
    +++ b/drivers/nvme/host/rdma.c
    @@ -110,7 +110,6 @@ struct nvme_rdma_ctrl {
    struct sockaddr_storage src_addr;

    struct nvme_ctrl ctrl;
    - struct mutex teardown_lock;
    bool use_inline_data;
    u32 io_queues[HCTX_MAX_TYPES];
    };
    @@ -933,8 +932,8 @@ out_free_io_queues:
    static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
    bool remove)
    {
    - mutex_lock(&ctrl->teardown_lock);
    blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
    + blk_sync_queue(ctrl->ctrl.admin_q);
    nvme_rdma_stop_queue(&ctrl->queues[0]);
    if (ctrl->ctrl.admin_tagset) {
    blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
    @@ -944,16 +943,15 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
    if (remove)
    blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
    nvme_rdma_destroy_admin_queue(ctrl, remove);
    - mutex_unlock(&ctrl->teardown_lock);
    }

    static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
    bool remove)
    {
    - mutex_lock(&ctrl->teardown_lock);
    if (ctrl->ctrl.queue_count > 1) {
    nvme_start_freeze(&ctrl->ctrl);
    nvme_stop_queues(&ctrl->ctrl);
    + nvme_sync_io_queues(&ctrl->ctrl);
    nvme_rdma_stop_io_queues(ctrl);
    if (ctrl->ctrl.tagset) {
    blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
    @@ -964,7 +962,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
    nvme_start_queues(&ctrl->ctrl);
    nvme_rdma_destroy_io_queues(ctrl, remove);
    }
    - mutex_unlock(&ctrl->teardown_lock);
    }

    static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
    @@ -1728,16 +1725,12 @@ static void nvme_rdma_complete_timed_out(struct request *rq)
    {
    struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
    struct nvme_rdma_queue *queue = req->queue;
    - struct nvme_rdma_ctrl *ctrl = queue->ctrl;

    - /* fence other contexts that may complete the command */
    - mutex_lock(&ctrl->teardown_lock);
    nvme_rdma_stop_queue(queue);
    if (!blk_mq_request_completed(rq)) {
    nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
    blk_mq_complete_request(rq);
    }
    - mutex_unlock(&ctrl->teardown_lock);
    }

    static enum blk_eh_timer_return
    @@ -2029,7 +2022,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
    return ERR_PTR(-ENOMEM);
    ctrl->ctrl.opts = opts;
    INIT_LIST_HEAD(&ctrl->list);
    - mutex_init(&ctrl->teardown_lock);

    if (!(opts->mask & NVMF_OPT_TRSVCID)) {
    opts->trsvcid =
    --
    2.27.0


    \
     
     \ /
      Last update: 2020-11-17 14:39    [W:4.045 / U:0.060 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site