lkml.org 
[lkml]   [2020]   [Oct]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.8 379/633] RDMA/mlx5: Make mkeys always owned by the kernels PD when not enabled
    Date
    From: Jason Gunthorpe <jgg@nvidia.com>

    [ Upstream commit 5eb29f0d13a66502b91954597270003c90fb66c5 ]

    Any mkey that is not enabled and assigned to userspace should have the PD
    set to a kernel owned PD.

    When cache entries are created for the first time the PDN is set to 0,
    which is probably a kernel PD, but be explicit.

    When a MR is registered using the hybrid reg_create with UMR xlt & enable
    the disabled mkey is pointing at the user PD, keep it pointing at the
    kernel until a UMR enables it and sets the user PD.

    Fixes: 9ec4483a3f0f ("IB/mlx5: Move MRs to a kernel PD when freeing them to the MR cache")
    Link: https://lore.kernel.org/r/20200914112653.345244-4-leon@kernel.org
    Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
    Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    drivers/infiniband/hw/mlx5/mr.c | 51 +++++++++++++++++----------------
    1 file changed, 26 insertions(+), 25 deletions(-)

    diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
    index 8d975c4d93060..85c9a1ffdbb64 100644
    --- a/drivers/infiniband/hw/mlx5/mr.c
    +++ b/drivers/infiniband/hw/mlx5/mr.c
    @@ -50,6 +50,29 @@ enum {
    static void
    create_mkey_callback(int status, struct mlx5_async_work *context);

    +static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
    + struct ib_pd *pd)
    +{
    + struct mlx5_ib_dev *dev = to_mdev(pd->device);
    +
    + MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
    + MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
    + MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
    + MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
    + MLX5_SET(mkc, mkc, lr, 1);
    +
    + if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
    + MLX5_SET(mkc, mkc, relaxed_ordering_write,
    + !!(acc & IB_ACCESS_RELAXED_ORDERING));
    + if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
    + MLX5_SET(mkc, mkc, relaxed_ordering_read,
    + !!(acc & IB_ACCESS_RELAXED_ORDERING));
    +
    + MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
    + MLX5_SET(mkc, mkc, qpn, 0xffffff);
    + MLX5_SET64(mkc, mkc, start_addr, start_addr);
    +}
    +
    static void
    assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
    u32 *in)
    @@ -152,12 +175,12 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
    mr->cache_ent = ent;
    mr->dev = ent->dev;

    + set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
    MLX5_SET(mkc, mkc, free, 1);
    MLX5_SET(mkc, mkc, umr_en, 1);
    MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
    MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);

    - MLX5_SET(mkc, mkc, qpn, 0xffffff);
    MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
    MLX5_SET(mkc, mkc, log_page_size, ent->page);
    return mr;
    @@ -774,29 +797,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
    return 0;
    }

    -static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
    - struct ib_pd *pd)
    -{
    - struct mlx5_ib_dev *dev = to_mdev(pd->device);
    -
    - MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
    - MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
    - MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
    - MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
    - MLX5_SET(mkc, mkc, lr, 1);
    -
    - if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
    - MLX5_SET(mkc, mkc, relaxed_ordering_write,
    - !!(acc & IB_ACCESS_RELAXED_ORDERING));
    - if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
    - MLX5_SET(mkc, mkc, relaxed_ordering_read,
    - !!(acc & IB_ACCESS_RELAXED_ORDERING));
    -
    - MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
    - MLX5_SET(mkc, mkc, qpn, 0xffffff);
    - MLX5_SET64(mkc, mkc, start_addr, start_addr);
    -}
    -
    struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
    {
    struct mlx5_ib_dev *dev = to_mdev(pd->device);
    @@ -1190,7 +1190,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
    MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));

    mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
    - set_mkc_access_pd_addr_fields(mkc, access_flags, virt_addr, pd);
    + set_mkc_access_pd_addr_fields(mkc, access_flags, virt_addr,
    + populate ? pd : dev->umrc.pd);
    MLX5_SET(mkc, mkc, free, !populate);
    MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
    MLX5_SET(mkc, mkc, umr_en, 1);
    --
    2.25.1


    \
     
     \ /
      Last update: 2020-10-27 16:14    [W:4.308 / U:0.032 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site