lkml.org 
[lkml]   [2022]   [Jan]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 5.15 05/72] Revert "RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow"
Date
From: Maor Gottlieb <maorg@nvidia.com>

commit 4163cb3d1980383220ad7043002b930995dcba33 upstream.

This patch is not the full fix and still causes to call traces
during mlx5_ib_dereg_mr().

This reverts commit f0ae4afe3d35e67db042c58a52909e06262b740f.

Fixes: f0ae4afe3d35 ("RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow")
Link: https://lore.kernel.org/r/20211222101312.1358616-1-maorg@nvidia.com
Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
Acked-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
drivers/infiniband/hw/mlx5/mlx5_ib.h | 6 +++---
drivers/infiniband/hw/mlx5/mr.c | 28 +++++++++++++++-------------
2 files changed, 18 insertions(+), 16 deletions(-)

--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -641,6 +641,7 @@ struct mlx5_ib_mr {

/* User MR data */
struct mlx5_cache_ent *cache_ent;
+ struct ib_umem *umem;

/* This is zero'd when the MR is allocated */
union {
@@ -652,7 +653,7 @@ struct mlx5_ib_mr {
struct list_head list;
};

- /* Used only by kernel MRs */
+ /* Used only by kernel MRs (umem == NULL) */
struct {
void *descs;
void *descs_alloc;
@@ -674,9 +675,8 @@ struct mlx5_ib_mr {
int data_length;
};

- /* Used only by User MRs */
+ /* Used only by User MRs (umem != NULL) */
struct {
- struct ib_umem *umem;
unsigned int page_shift;
/* Current access_flags */
int access_flags;
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1911,18 +1911,19 @@ err:
return ret;
}

-static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
+static void
+mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
- struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
- int size = mr->max_descs * mr->desc_size;
-
- if (!mr->descs)
- return;
-
- dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
- DMA_TO_DEVICE);
- kfree(mr->descs_alloc);
- mr->descs = NULL;
+ if (!mr->umem && mr->descs) {
+ struct ib_device *device = mr->ibmr.device;
+ int size = mr->max_descs * mr->desc_size;
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
+ DMA_TO_DEVICE);
+ kfree(mr->descs_alloc);
+ mr->descs = NULL;
+ }
}

int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
@@ -1998,8 +1999,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr,
if (mr->cache_ent) {
mlx5_mr_cache_free(dev, mr);
} else {
- if (!udata)
- mlx5_free_priv_descs(mr);
+ mlx5_free_priv_descs(mr);
kfree(mr);
}
return 0;
@@ -2086,6 +2086,7 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_
if (err)
goto err_free_in;

+ mr->umem = NULL;
kfree(in);

return mr;
@@ -2212,6 +2213,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(
}

mr->ibmr.device = pd->device;
+ mr->umem = NULL;

switch (mr_type) {
case IB_MR_TYPE_MEM_REG:

\
 
 \ /
  Last update: 2022-01-10 08:42    [W:0.205 / U:3.740 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site