lkml.org 
[lkml]   [2021]   [Jul]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.13 568/800] RDMA/cma: Protect RMW with qp_mutex
    Date
    From: Håkon Bugge <haakon.bugge@oracle.com>

    [ Upstream commit ca0c448d2b9f43e3175835d536853854ef544e22 ]

    The struct rdma_id_private contains three bit-fields, tos_set,
    timeout_set, and min_rnr_timer_set. These are set by accessor functions
    without any synchronization. If two or all accessor functions are invoked
    in close proximity in time, there will be Read-Modify-Write from several
    contexts to the same variable, and the result will be intermittent.

    Fixed by protecting the bit-fields by the qp_mutex in the accessor
    functions.

    The consumer of timeout_set and min_rnr_timer_set is in
    rdma_init_qp_attr(), which is called with qp_mutex held for connected
    QPs. Explicit locking is added for the consumers of tos and tos_set.

    This commit depends on ("RDMA/cma: Remove unnecessary INIT->INIT
    transition"), since the call to rdma_init_qp_attr() from
    cma_init_conn_qp() does not hold the qp_mutex.

    Fixes: 2c1619edef61 ("IB/cma: Define option to set ack timeout and pack tos_set")
    Fixes: 3aeffc46afde ("IB/cma: Introduce rdma_set_min_rnr_timer()")
    Link: https://lore.kernel.org/r/1624369197-24578-3-git-send-email-haakon.bugge@oracle.com
    Signed-off-by: Håkon Bugge <haakon.bugge@oracle.com>
    Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    drivers/infiniband/core/cma.c | 20 +++++++++++++++++++-
    1 file changed, 19 insertions(+), 1 deletion(-)

    diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
    index ab148a696c0c..a5ec61ac11cc 100644
    --- a/drivers/infiniband/core/cma.c
    +++ b/drivers/infiniband/core/cma.c
    @@ -2472,8 +2472,10 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
    if (IS_ERR(id))
    return PTR_ERR(id);

    + mutex_lock(&id_priv->qp_mutex);
    id->tos = id_priv->tos;
    id->tos_set = id_priv->tos_set;
    + mutex_unlock(&id_priv->qp_mutex);
    id->afonly = id_priv->afonly;
    id_priv->cm_id.iw = id;

    @@ -2534,8 +2536,10 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
    cma_id_get(id_priv);
    dev_id_priv->internal_id = 1;
    dev_id_priv->afonly = id_priv->afonly;
    + mutex_lock(&id_priv->qp_mutex);
    dev_id_priv->tos_set = id_priv->tos_set;
    dev_id_priv->tos = id_priv->tos;
    + mutex_unlock(&id_priv->qp_mutex);

    ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
    if (ret)
    @@ -2582,8 +2586,10 @@ void rdma_set_service_type(struct rdma_cm_id *id, int tos)
    struct rdma_id_private *id_priv;

    id_priv = container_of(id, struct rdma_id_private, id);
    + mutex_lock(&id_priv->qp_mutex);
    id_priv->tos = (u8) tos;
    id_priv->tos_set = true;
    + mutex_unlock(&id_priv->qp_mutex);
    }
    EXPORT_SYMBOL(rdma_set_service_type);

    @@ -2610,8 +2616,10 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
    return -EINVAL;

    id_priv = container_of(id, struct rdma_id_private, id);
    + mutex_lock(&id_priv->qp_mutex);
    id_priv->timeout = timeout;
    id_priv->timeout_set = true;
    + mutex_unlock(&id_priv->qp_mutex);

    return 0;
    }
    @@ -2647,8 +2655,10 @@ int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
    return -EINVAL;

    id_priv = container_of(id, struct rdma_id_private, id);
    + mutex_lock(&id_priv->qp_mutex);
    id_priv->min_rnr_timer = min_rnr_timer;
    id_priv->min_rnr_timer_set = true;
    + mutex_unlock(&id_priv->qp_mutex);

    return 0;
    }
    @@ -3034,8 +3044,11 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)

    u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
    rdma_start_port(id_priv->cma_dev->device)];
    - u8 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
    + u8 tos;

    + mutex_lock(&id_priv->qp_mutex);
    + tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
    + mutex_unlock(&id_priv->qp_mutex);

    work = kzalloc(sizeof *work, GFP_KERNEL);
    if (!work)
    @@ -3082,8 +3095,10 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
    * PacketLifeTime = local ACK timeout/2
    * as a reasonable approximation for RoCE networks.
    */
    + mutex_lock(&id_priv->qp_mutex);
    route->path_rec->packet_life_time = id_priv->timeout_set ?
    id_priv->timeout - 1 : CMA_IBOE_PACKET_LIFETIME;
    + mutex_unlock(&id_priv->qp_mutex);

    if (!route->path_rec->mtu) {
    ret = -EINVAL;
    @@ -4107,8 +4122,11 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
    if (IS_ERR(cm_id))
    return PTR_ERR(cm_id);

    + mutex_lock(&id_priv->qp_mutex);
    cm_id->tos = id_priv->tos;
    cm_id->tos_set = id_priv->tos_set;
    + mutex_unlock(&id_priv->qp_mutex);
    +
    id_priv->cm_id.iw = cm_id;

    memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
    --
    2.30.2


    \
     
     \ /
      Last update: 2021-07-12 10:52    [W:2.800 / U:0.080 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site