lkml.org 
[lkml]   [2020]   [Nov]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v11 02/13] vfio: VFIO_IOMMU_CACHE_INVALIDATE
    Date
    From: "Liu, Yi L" <yi.l.liu@linux.intel.com>

    When the guest "owns" the stage 1 translation structures, the host
    IOMMU driver has no knowledge of caching structure updates unless
    the guest invalidation requests are trapped and passed down to the
    host.

    This patch adds the VFIO_IOMMU_CACHE_INVALIDATE ioctl with aims
    at propagating guest stage1 IOMMU cache invalidations to the host.

    Signed-off-by: Liu, Yi L <yi.l.liu@linux.intel.com>
    Signed-off-by: Eric Auger <eric.auger@redhat.com>

    ---
    v10 -> v11:
    - renamed ustruct into cache_inv

    v8 -> v9:
    - change the ioctl ID

    v6 -> v7:
    - Use iommu_capsule struct
    - renamed vfio_iommu_for_each_dev into vfio_iommu_lookup_dev
    due to checkpatch error related to for_each_dev suffix

    v2 -> v3:
    - introduce vfio_iommu_for_each_dev back in this patch

    v1 -> v2:
    - s/TLB/CACHE
    - remove vfio_iommu_task usage
    - commit message rewording
    ---
    drivers/vfio/vfio_iommu_type1.c | 58 +++++++++++++++++++++++++++++++++
    include/uapi/linux/vfio.h | 13 ++++++++
    2 files changed, 71 insertions(+)

    diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
    index 87ddd9e882dc..966909f542f1 100644
    --- a/drivers/vfio/vfio_iommu_type1.c
    +++ b/drivers/vfio/vfio_iommu_type1.c
    @@ -143,6 +143,34 @@ struct vfio_regions {
    #define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX)
    #define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)

    +struct domain_capsule {
    + struct iommu_domain *domain;
    + void *data;
    +};
    +
    +/* iommu->lock must be held */
    +static int
    +vfio_iommu_lookup_dev(struct vfio_iommu *iommu,
    + int (*fn)(struct device *dev, void *data),
    + unsigned long arg)
    +{
    + struct domain_capsule dc = {.data = &arg};
    + struct vfio_domain *d;
    + struct vfio_group *g;
    + int ret = 0;
    +
    + list_for_each_entry(d, &iommu->domain_list, next) {
    + dc.domain = d->domain;
    + list_for_each_entry(g, &d->group_list, next) {
    + ret = iommu_group_for_each_dev(g->iommu_group,
    + &dc, fn);
    + if (ret)
    + break;
    + }
    + }
    + return ret;
    +}
    +
    static int put_pfn(unsigned long pfn, int prot);

    static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
    @@ -2621,6 +2649,13 @@ vfio_attach_pasid_table(struct vfio_iommu *iommu, unsigned long arg)
    mutex_unlock(&iommu->lock);
    return ret;
    }
    +static int vfio_cache_inv_fn(struct device *dev, void *data)
    +{
    + struct domain_capsule *dc = (struct domain_capsule *)data;
    + unsigned long arg = *(unsigned long *)dc->data;
    +
    + return iommu_uapi_cache_invalidate(dc->domain, dev, (void __user *)arg);
    +}

    static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
    struct vfio_info_cap *caps)
    @@ -2810,6 +2845,27 @@ static int vfio_iommu_type1_set_pasid_table(struct vfio_iommu *iommu,
    return ret;
    }

    +static int vfio_iommu_type1_cache_invalidate(struct vfio_iommu *iommu,
    + unsigned long arg)
    +{
    + struct vfio_iommu_type1_cache_invalidate cache_inv;
    + unsigned long minsz;
    + int ret;
    +
    + minsz = offsetofend(struct vfio_iommu_type1_cache_invalidate, flags);
    +
    + if (copy_from_user(&cache_inv, (void __user *)arg, minsz))
    + return -EFAULT;
    +
    + if (cache_inv.argsz < minsz || cache_inv.flags)
    + return -EINVAL;
    +
    + mutex_lock(&iommu->lock);
    + ret = vfio_iommu_lookup_dev(iommu, vfio_cache_inv_fn, arg + minsz);
    + mutex_unlock(&iommu->lock);
    + return ret;
    +}
    +
    static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
    unsigned long arg)
    {
    @@ -2932,6 +2988,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
    return vfio_iommu_type1_dirty_pages(iommu, arg);
    case VFIO_IOMMU_SET_PASID_TABLE:
    return vfio_iommu_type1_set_pasid_table(iommu, arg);
    + case VFIO_IOMMU_CACHE_INVALIDATE:
    + return vfio_iommu_type1_cache_invalidate(iommu, arg);
    default:
    return -ENOTTY;
    }
    diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
    index 78ce3ce6c331..0e6d94cc2ba4 100644
    --- a/include/uapi/linux/vfio.h
    +++ b/include/uapi/linux/vfio.h
    @@ -1199,6 +1199,19 @@ struct vfio_iommu_type1_set_pasid_table {

    #define VFIO_IOMMU_SET_PASID_TABLE _IO(VFIO_TYPE, VFIO_BASE + 22)

    +/**
    + * VFIO_IOMMU_CACHE_INVALIDATE - _IOWR(VFIO_TYPE, VFIO_BASE + 23,
    + * struct vfio_iommu_type1_cache_invalidate)
    + *
    + * Propagate guest IOMMU cache invalidation to the host.
    + */
    +struct vfio_iommu_type1_cache_invalidate {
    + __u32 argsz;
    + __u32 flags;
    + struct iommu_cache_invalidate_info info;
    +};
    +#define VFIO_IOMMU_CACHE_INVALIDATE _IO(VFIO_TYPE, VFIO_BASE + 23)
    +
    /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */

    /*
    --
    2.21.3
    \
     
     \ /
      Last update: 2020-11-16 12:02    [W:3.278 / U:0.024 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site