lkml.org 
[lkml]   [2022]   [Nov]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 7/7] iommu/s390: flush queued IOVAs on RPCIT out of resource indication
Date
When RPCIT indicates that the underlying hypervisor has run out of
resources it often means that its IOVA space is exhausted and IOVAs need
to be freed before new ones can be created. By triggering a flush of the
IOVA queue we can get the queued IOVAs freed and also get the new
mapping established during the global flush.

Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
---
drivers/iommu/dma-iommu.c | 14 +++++++++-----
drivers/iommu/dma-iommu.h | 1 +
drivers/iommu/s390-iommu.c | 7 +++++--
3 files changed, 15 insertions(+), 7 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 3801cdf11aa8..54e7f63fd0d9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -188,19 +188,23 @@ static void fq_flush_single(struct iommu_dma_cookie *cookie)
spin_unlock_irqrestore(&fq->lock, flags);
}

-static void fq_flush_timeout(struct timer_list *t)
+void iommu_dma_flush_fq(struct iommu_dma_cookie *cookie)
{
- struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
-
- atomic_set(&cookie->fq_timer_on, 0);
fq_flush_iotlb(cookie);
-
if (cookie->fq_domain->type == IOMMU_DOMAIN_DMA_FQ)
fq_flush_percpu(cookie);
else
fq_flush_single(cookie);
}

+static void fq_flush_timeout(struct timer_list *t)
+{
+ struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
+
+ atomic_set(&cookie->fq_timer_on, 0);
+ iommu_dma_flush_fq(cookie);
+}
+
static void queue_iova(struct iommu_dma_cookie *cookie,
unsigned long pfn, unsigned long pages,
struct list_head *freelist)
diff --git a/drivers/iommu/dma-iommu.h b/drivers/iommu/dma-iommu.h
index 942790009292..cac06030aa26 100644
--- a/drivers/iommu/dma-iommu.h
+++ b/drivers/iommu/dma-iommu.h
@@ -13,6 +13,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain);
void iommu_put_dma_cookie(struct iommu_domain *domain);

int iommu_dma_init_fq(struct iommu_domain *domain);
+void iommu_dma_flush_fq(struct iommu_dma_cookie *cookie);

void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);

diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 087bb2acff30..9c2782c4043e 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -538,14 +538,17 @@ static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
{
struct s390_domain *s390_domain = to_s390_domain(domain);
struct zpci_dev *zdev;
+ int rc;

rcu_read_lock();
list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
if (!zdev->tlb_refresh)
continue;
atomic64_inc(&s390_domain->ctrs.sync_map_rpcits);
- zpci_refresh_trans((u64)zdev->fh << 32,
- iova, size);
+ rc = zpci_refresh_trans((u64)zdev->fh << 32,
+ iova, size);
+ if (rc == -ENOMEM)
+ iommu_dma_flush_fq(domain->iova_cookie);
}
rcu_read_unlock();
}
--
2.34.1
\
 
 \ /
  Last update: 2022-11-16 18:20    [W:0.151 / U:0.168 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site