lkml.org 
[lkml]   [2021]   [May]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH 13/16] nvme-pci: Convert to using dma_map_sg_p2pdma for p2pdma pages
From
Date
On 4/8/21 10:01 AM, Logan Gunthorpe wrote:
> Convert to using dma_map_sg_p2pdma() for PCI p2pdma pages.
>
> This should be equivalent but allows for heterogeneous scatterlists
> with both P2PDMA and regular pages. However, P2PDMA support will be
> slightly more restricted (only dma-direct and dma-iommu are currently
> supported).
>
> Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
> ---
> drivers/nvme/host/pci.c | 28 ++++++++--------------------
> 1 file changed, 8 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index 14f092973792..a1ed07ff38b7 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -577,17 +577,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
>
> }
>
> -static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
> -{
> - struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
> -
> - if (is_pci_p2pdma_page(sg_page(iod->sg)))
> - pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
> - rq_dma_dir(req));
> - else
> - dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
> -}
> -
> static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
> {
> struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
> @@ -600,7 +589,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
>
> WARN_ON_ONCE(!iod->nents);
>
> - nvme_unmap_sg(dev, req);
> + dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));


Nice simplification!


> if (iod->npages == 0)
> dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
> iod->first_dma);
> @@ -868,14 +857,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
> if (!iod->nents)
> goto out_free_sg;
>
> - if (is_pci_p2pdma_page(sg_page(iod->sg)))
> - nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
> - iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
> - else
> - nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
> - rq_dma_dir(req), DMA_ATTR_NO_WARN);
> - if (!nr_mapped)
> + nr_mapped = dma_map_sg_p2pdma_attrs(dev->dev, iod->sg, iod->nents,
> + rq_dma_dir(req), DMA_ATTR_NO_WARN);
> + if (nr_mapped < 0) {
> + if (nr_mapped != -ENOMEM)
> + ret = BLK_STS_TARGET;
> goto out_free_sg;
> + }

But now the "nr_mapped == 0" case is no longer doing an early out_free_sg.
Is that OK?

>
> iod->use_sgl = nvme_pci_use_sgls(dev, req);
> if (iod->use_sgl)
> @@ -887,7 +875,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
> return BLK_STS_OK;
>
> out_unmap_sg:
> - nvme_unmap_sg(dev, req);
> + dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
> out_free_sg:
> mempool_free(iod->sg, dev->iod_mempool);
> return ret;
>

thanks,
--
John Hubbard
NVIDIA

\
 
 \ /
  Last update: 2021-05-03 03:35    [W:0.638 / U:0.216 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site