lkml.org 
[lkml]   [2021]   [Nov]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC PATCH v4 2/2] RDMA/rxe: Add dma-buf support
    Date
    Implement a ib device operation ‘reg_user_mr_dmabuf’. Generate a
    rxe_map from the memory space linked the passed dma-buf.

    Signed-off-by: Shunsuke Mie <mie@igel.co.jp>
    ---
    drivers/infiniband/sw/rxe/rxe_loc.h | 2 +
    drivers/infiniband/sw/rxe/rxe_mr.c | 113 ++++++++++++++++++++++++++
    drivers/infiniband/sw/rxe/rxe_verbs.c | 34 ++++++++
    3 files changed, 149 insertions(+)

    diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
    index 1ca43b859d80..8bc19ea1a376 100644
    --- a/drivers/infiniband/sw/rxe/rxe_loc.h
    +++ b/drivers/infiniband/sw/rxe/rxe_loc.h
    @@ -75,6 +75,8 @@ u8 rxe_get_next_key(u32 last_key);
    void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
    int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
    int access, struct rxe_mr *mr);
    +int rxe_mr_dmabuf_init_user(struct rxe_pd *pd, int fd, u64 start, u64 length,
    + u64 iova, int access, struct rxe_mr *mr);
    int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
    int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
    enum rxe_mr_copy_dir dir);
    diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
    index 53271df10e47..b954e5647f82 100644
    --- a/drivers/infiniband/sw/rxe/rxe_mr.c
    +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
    @@ -4,6 +4,8 @@
    * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
    */

    +#include <linux/dma-buf.h>
    +#include <linux/dma-buf-map.h>
    #include "rxe.h"
    #include "rxe_loc.h"

    @@ -245,6 +247,114 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
    return err;
    }

    +static int rxe_map_dmabuf_mr(struct rxe_mr *mr,
    + struct ib_umem_dmabuf *umem_dmabuf)
    +{
    + struct rxe_map_set *set;
    + struct rxe_phys_buf *buf = NULL;
    + struct rxe_map **map;
    + void *vaddr;
    + int num_buf = 0;
    + int err;
    + size_t remain;
    + struct dma_buf_map dmabuf_map;
    +
    + err = dma_buf_vmap(umem_dmabuf->dmabuf, &dmabuf_map);
    + if (err || dmabuf_map.is_iomem)
    + goto err_out;
    +
    + set = mr->cur_map_set;
    + set->page_shift = PAGE_SHIFT;
    + set->page_mask = PAGE_SIZE - 1;
    +
    + map = set->map;
    + buf = map[0]->buf;
    +
    + vaddr = dmabuf_map.vaddr;
    + remain = umem_dmabuf->dmabuf->size;
    +
    + for (; remain; vaddr += PAGE_SIZE) {
    + if (num_buf >= RXE_BUF_PER_MAP) {
    + map++;
    + buf = map[0]->buf;
    + num_buf = 0;
    + }
    +
    + buf->addr = (uintptr_t)vaddr;
    + if (remain >= PAGE_SIZE)
    + buf->size = PAGE_SIZE;
    + else
    + buf->size = remain;
    + remain -= buf->size;
    +
    + num_buf++;
    + buf++;
    + }
    +
    + return 0;
    +
    +err_out:
    + return err;
    +}
    +
    +static void rxe_unmap_dmabuf_mr(struct rxe_mr *mr)
    +{
    + struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
    + struct rxe_map *map = mr->cur_map_set->map[0];
    + struct dma_buf_map dma_buf_map =
    + DMA_BUF_MAP_INIT_VADDR((void *)(uintptr_t)map->buf->addr);
    +
    + dma_buf_vunmap(umem_dmabuf->dmabuf, &dma_buf_map);
    +}
    +
    +int rxe_mr_dmabuf_init_user(struct rxe_pd *pd, int fd, u64 start, u64 length,
    + u64 iova, int access, struct rxe_mr *mr)
    +{
    + struct ib_umem_dmabuf *umem_dmabuf;
    + struct rxe_map_set *set;
    + int err;
    +
    + umem_dmabuf = ib_umem_dmabuf_get(pd->ibpd.device, start, length, fd,
    + access, NULL);
    + if (IS_ERR(umem_dmabuf)) {
    + err = PTR_ERR(umem_dmabuf);
    + goto err_out;
    + }
    +
    + rxe_mr_init(access, mr);
    +
    + err = rxe_mr_alloc(mr, ib_umem_num_pages(&umem_dmabuf->umem), 0);
    + if (err) {
    + pr_warn("%s: Unable to allocate memory for map\n", __func__);
    + goto err_release_umem;
    + }
    +
    + mr->ibmr.pd = &pd->ibpd;
    + mr->umem = &umem_dmabuf->umem;
    + mr->access = access;
    + mr->state = RXE_MR_STATE_VALID;
    + mr->type = IB_MR_TYPE_USER;
    +
    + set = mr->cur_map_set;
    + set->length = length;
    + set->iova = iova;
    + set->va = start;
    + set->offset = ib_umem_offset(mr->umem);
    +
    + err = rxe_map_dmabuf_mr(mr, umem_dmabuf);
    + if (err)
    + goto err_free_map_set;
    +
    + return 0;
    +
    +err_free_map_set:
    + rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
    +err_release_umem:
    + ib_umem_release(&umem_dmabuf->umem);
    +err_out:
    + return err;
    +}
    +
    int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
    {
    int err;
    @@ -703,6 +813,9 @@ void rxe_mr_cleanup(struct rxe_pool_entry *arg)
    {
    struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);

    + if (mr->umem && mr->umem->is_dmabuf)
    + rxe_unmap_dmabuf_mr(mr);
    +
    ib_umem_release(mr->umem);

    if (mr->cur_map_set)
    diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
    index 0aa0d7e52773..dc7d27b3cb90 100644
    --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
    +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
    @@ -940,6 +940,39 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
    return ERR_PTR(err);
    }

    +static struct ib_mr *rxe_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
    + u64 length, u64 iova, int fd,
    + int access, struct ib_udata *udata)
    +{
    + int err;
    + struct rxe_dev *rxe = to_rdev(ibpd->device);
    + struct rxe_pd *pd = to_rpd(ibpd);
    + struct rxe_mr *mr;
    +
    + mr = rxe_alloc(&rxe->mr_pool);
    + if (!mr) {
    + err = -ENOMEM;
    + goto err2;
    + }
    +
    + rxe_add_index(mr);
    +
    + rxe_add_ref(pd);
    +
    + err = rxe_mr_dmabuf_init_user(pd, fd, start, length, iova, access, mr);
    + if (err)
    + goto err3;
    +
    + return &mr->ibmr;
    +
    +err3:
    + rxe_drop_ref(pd);
    + rxe_drop_index(mr);
    + rxe_drop_ref(mr);
    +err2:
    + return ERR_PTR(err);
    +}
    +
    static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
    u32 max_num_sg)
    {
    @@ -1105,6 +1138,7 @@ static const struct ib_device_ops rxe_dev_ops = {
    .query_qp = rxe_query_qp,
    .query_srq = rxe_query_srq,
    .reg_user_mr = rxe_reg_user_mr,
    + .reg_user_mr_dmabuf = rxe_reg_user_mr_dmabuf,
    .req_notify_cq = rxe_req_notify_cq,
    .resize_cq = rxe_resize_cq,

    --
    2.17.1
    \
     
     \ /
      Last update: 2021-11-22 12:10    [W:3.696 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site