lkml.org 
[lkml]   [2018]   [Feb]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v5] vfio/type1: Adopt fast IOTLB flush interface when unmap IOVAs
Date
VFIO IOMMU type1 currently upmaps IOVA pages synchronously, which requires
IOTLB flushing for every unmapping. This results in large IOTLB flushing
overhead when handling pass-through devices has a large number of mapped
IOVAs. This can be avoided by using the new IOTLB flushing interface.

Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Joerg Roedel <joro@8bytes.org>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---

Changes from v4 (https://lkml.org/lkml/2018/1/31/153)
* Change return type from ssize_t back to size_t since we no longer
changing IOMMU API. Also update error handling logic accordingly.
* In unmap_unpin_fast(), also sync when failing to allocate entry.
* Some code restructuring and variable renaming.

drivers/vfio/vfio_iommu_type1.c | 128 ++++++++++++++++++++++++++++++++++++----
1 file changed, 117 insertions(+), 11 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index e30e29a..6041530 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -102,6 +102,13 @@ struct vfio_pfn {
atomic_t ref_count;
};

+struct vfio_regions {
+ struct list_head list;
+ dma_addr_t iova;
+ phys_addr_t phys;
+ size_t len;
+};
+
#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
(!list_empty(&iommu->domain_list))

@@ -648,11 +655,102 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
return i > npage ? npage : (i > 0 ? i : -EINVAL);
}

+static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
+ struct list_head *regions)
+{
+ long unlocked = 0;
+ struct vfio_regions *entry, *next;
+
+ iommu_tlb_sync(domain->domain);
+
+ list_for_each_entry_safe(entry, next, regions, list) {
+ unlocked += vfio_unpin_pages_remote(dma,
+ entry->iova,
+ entry->phys >> PAGE_SHIFT,
+ entry->len >> PAGE_SHIFT,
+ false);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
+ cond_resched();
+
+ return unlocked;
+}
+
+/*
+ * Generally, VFIO needs to unpin remote pages after each IOTLB flush.
+ * Therefore, when using IOTLB flush sync interface, VFIO need to keep track
+ * of these regions (currently using a list).
+ *
+ * This value specifies maximum number of regions for each IOTLB flush sync.
+ */
+#define VFIO_IOMMU_TLB_SYNC_MAX 512
+
+static size_t unmap_unpin_fast(struct vfio_domain *domain,
+ struct vfio_dma *dma, dma_addr_t *iova,
+ size_t len, phys_addr_t phys, long *unlocked,
+ struct list_head *unmapped_list,
+ int *unmapped_cnt)
+{
+ size_t unmapped = 0;
+ struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+
+ if (entry) {
+ unmapped = iommu_unmap_fast(domain->domain, *iova, len);
+
+ if (!unmapped) {
+ kfree(entry);
+ } else {
+ iommu_tlb_range_add(domain->domain, *iova, unmapped);
+ entry->iova = *iova;
+ entry->phys = phys;
+ entry->len = unmapped;
+ list_add_tail(&entry->list, unmapped_list);
+
+ *iova += unmapped;
+ (*unmapped_cnt)++;
+ }
+ }
+
+ /*
+ * Sync if the number of fast-unmap regions hits the limit
+ * or in case of errors.
+ */
+ if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) {
+ *unlocked += vfio_sync_unpin(dma, domain,
+ unmapped_list);
+ *unmapped_cnt = 0;
+ }
+
+ return unmapped;
+}
+
+static size_t unmap_unpin_slow(struct vfio_domain *domain,
+ struct vfio_dma *dma, dma_addr_t *iova,
+ size_t len, phys_addr_t phys,
+ long *unlocked)
+{
+ size_t unmapped = iommu_unmap(domain->domain, *iova, len);
+
+ if (unmapped) {
+ *unlocked += vfio_unpin_pages_remote(dma, *iova,
+ phys >> PAGE_SHIFT,
+ unmapped >> PAGE_SHIFT,
+ false);
+ *iova += unmapped;
+ cond_resched();
+ }
+ return unmapped;
+}
+
static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
bool do_accounting)
{
dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
struct vfio_domain *domain, *d;
+ struct list_head unmapped_region_list;
+ int unmapped_region_cnt = 0;
long unlocked = 0;

if (!dma->size)
@@ -661,6 +759,8 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
return 0;

+ INIT_LIST_HEAD(&unmapped_region_list);
+
/*
* We use the IOMMU to track the physical addresses, otherwise we'd
* need a much more complicated tracking system. Unfortunately that
@@ -698,20 +798,26 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
break;
}

- unmapped = iommu_unmap(domain->domain, iova, len);
- if (WARN_ON(!unmapped))
- break;
-
- unlocked += vfio_unpin_pages_remote(dma, iova,
- phys >> PAGE_SHIFT,
- unmapped >> PAGE_SHIFT,
- false);
- iova += unmapped;
-
- cond_resched();
+ /*
+ * First, try to use fast unmap/unpin. In case of failure,
+ * switch to slow unmap/unpin path.
+ */
+ unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
+ &unlocked, &unmapped_region_list,
+ &unmapped_region_cnt);
+ if (!unmapped) {
+ unmapped = unmap_unpin_slow(domain, dma, &iova, len,
+ phys, &unlocked);
+ if (WARN_ON(!unmapped))
+ break;
+ }
}

dma->iommu_mapped = false;
+
+ if (unmapped_region_cnt)
+ unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list);
+
if (do_accounting) {
vfio_lock_acct(dma->task, -unlocked, NULL);
return 0;
--
1.8.3.1
\
 
 \ /
  Last update: 2018-02-01 07:28    [W:0.068 / U:2.928 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site