lkml.org 
[lkml]   [2020]   [Oct]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.9 452/757] RDMA/cma: Combine cma_ndev_work with cma_work
    Date
    From: Jason Gunthorpe <jgg@nvidia.com>

    [ Upstream commit 7e85bcda8bfe883f4244672ed79f81b7762a1a7e ]

    These are the same thing, except that cma_ndev_work doesn't have a state
    transition. Signal no state transition by setting old_state and new_state
    == 0.

    In all cases the handler function should not be called once
    rdma_destroy_id() has progressed passed setting the state.

    Link: https://lore.kernel.org/r/20200902081122.745412-6-leon@kernel.org
    Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
    Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    drivers/infiniband/core/cma.c | 38 +++++++----------------------------
    1 file changed, 7 insertions(+), 31 deletions(-)

    diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
    index 5888311b21198..9c3cb8549ac72 100644
    --- a/drivers/infiniband/core/cma.c
    +++ b/drivers/infiniband/core/cma.c
    @@ -363,12 +363,6 @@ struct cma_work {
    struct rdma_cm_event event;
    };

    -struct cma_ndev_work {
    - struct work_struct work;
    - struct rdma_id_private *id;
    - struct rdma_cm_event event;
    -};
    -
    struct iboe_mcast_work {
    struct work_struct work;
    struct rdma_id_private *id;
    @@ -2647,32 +2641,14 @@ static void cma_work_handler(struct work_struct *_work)
    struct rdma_id_private *id_priv = work->id;

    mutex_lock(&id_priv->handler_mutex);
    - if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
    + if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
    + READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
    goto out_unlock;
    -
    - if (cma_cm_event_handler(id_priv, &work->event)) {
    - cma_id_put(id_priv);
    - destroy_id_handler_unlock(id_priv);
    - goto out_free;
    + if (work->old_state != 0 || work->new_state != 0) {
    + if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
    + goto out_unlock;
    }

    -out_unlock:
    - mutex_unlock(&id_priv->handler_mutex);
    - cma_id_put(id_priv);
    -out_free:
    - kfree(work);
    -}
    -
    -static void cma_ndev_work_handler(struct work_struct *_work)
    -{
    - struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
    - struct rdma_id_private *id_priv = work->id;
    -
    - mutex_lock(&id_priv->handler_mutex);
    - if (id_priv->state == RDMA_CM_DESTROYING ||
    - id_priv->state == RDMA_CM_DEVICE_REMOVAL)
    - goto out_unlock;
    -
    if (cma_cm_event_handler(id_priv, &work->event)) {
    cma_id_put(id_priv);
    destroy_id_handler_unlock(id_priv);
    @@ -4656,7 +4632,7 @@ EXPORT_SYMBOL(rdma_leave_multicast);
    static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
    {
    struct rdma_dev_addr *dev_addr;
    - struct cma_ndev_work *work;
    + struct cma_work *work;

    dev_addr = &id_priv->id.route.addr.dev_addr;

    @@ -4669,7 +4645,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
    if (!work)
    return -ENOMEM;

    - INIT_WORK(&work->work, cma_ndev_work_handler);
    + INIT_WORK(&work->work, cma_work_handler);
    work->id = id_priv;
    work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
    cma_id_get(id_priv);
    --
    2.25.1


    \
     
     \ /
      Last update: 2020-10-27 17:10    [W:4.035 / U:0.144 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site