lkml.org 
[lkml]   [2021]   [Oct]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] iommu/iova: Add support for IOVA max alignment tuning
Date
IOVAs are aligned to the smallest PAGE_SIZE order, where the requested
IOVA can fit. But this might not work for all use-cases. It can cause
IOVA fragmentation in some multimedia and 8K video use-cases that may
require larger buffers to be allocated and mapped.

When the above allocation pattern is used with the current alignment
scheme, the IOVA space could be quickly exhausted for 32bit devices.

In order to get better IOVA space utilization and reduce fragmentation,
a new kernel command line parameter is introduced to make the alignment
limit configurable by the user during boot.

Signed-off-by: Georgi Djakov <quic_c_gdjako@quicinc.com>
---
Documentation/admin-guide/kernel-parameters.txt | 8 ++++++++
drivers/iommu/iova.c | 26 ++++++++++++++++++++++++-
2 files changed, 33 insertions(+), 1 deletion(-)

diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index ad94a2aa9819..630246dc691f 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2056,6 +2056,14 @@
forcing Dual Address Cycle for PCI cards supporting
greater than 32-bit addressing.

+ iommu.max_align_shift=
+ [ARM64, X86] Limit the alignment of IOVAs to a maximum
+ PAGE_SIZE order. Larger IOVAs will be aligned to this
+ specified order. The order is expressed as a power of
+ two multiplied by the PAGE_SIZE.
+ Format: { "4" | "5" | "6" | "7" | "8" | "9" }
+ Default: 9
+
iommu.strict= [ARM64, X86] Configure TLB invalidation behaviour
Format: { "0" | "1" }
0 - Lazy mode.
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 9e8bc802ac05..5a8c86871735 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -15,6 +15,9 @@
/* The anchor node sits above the top of the usable address space */
#define IOVA_ANCHOR ~0UL

+#define IOMMU_DEFAULT_IOVA_MAX_ALIGN_SHIFT 9
+static unsigned long iommu_max_align_shift __read_mostly = IOMMU_DEFAULT_IOVA_MAX_ALIGN_SHIFT;
+
static bool iova_rcache_insert(struct iova_domain *iovad,
unsigned long pfn,
unsigned long size);
@@ -27,6 +30,27 @@ static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
static void fq_flush_timeout(struct timer_list *t);

+static unsigned long limit_align_shift(struct iova_domain *iovad, unsigned long shift)
+{
+ unsigned long max_align_shift;
+
+ max_align_shift = iommu_max_align_shift + PAGE_SHIFT - iova_shift(iovad);
+ return min_t(unsigned long, max_align_shift, shift);
+}
+
+static int __init iommu_set_def_max_align_shift(char *str)
+{
+ unsigned long max_align_shift;
+
+ int ret = kstrtoul(str, 10, &max_align_shift);
+
+ if (!ret)
+ iommu_max_align_shift = max_align_shift;
+
+ return 0;
+}
+early_param("iommu.max_align_shift", iommu_set_def_max_align_shift);
+
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
{
struct iova_domain *iovad;
@@ -242,7 +266,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;

if (size_aligned)
- align_mask <<= fls_long(size - 1);
+ align_mask <<= limit_align_shift(iovad, fls_long(size - 1));

/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
\
 
 \ /
  Last update: 2021-10-13 20:12    [W:0.208 / U:1.044 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site