Messages in this thread Patch in this message | | | From | Robin Murphy <> | Subject | [PATCH 2/2] iommu/iova: Manage the depot list size | Date | Mon, 14 Aug 2023 18:53:34 +0100 |
| |
Automatically scaling the depot up to suit the peak capacity of a workload is all well and good, but it would be nice to have a way to scale it back down again if the workload changes. To that end, add automatic reclaim that will gradually free unused magazines if the depot size remains above a reasonable threshold for long enough.
Signed-off-by: Robin Murphy <robin.murphy@arm.com> --- drivers/iommu/iova.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index d2de6fb0e9f4..76a7d694708e 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -11,6 +11,7 @@ #include <linux/smp.h> #include <linux/bitops.h> #include <linux/cpu.h> +#include <linux/workqueue.h> /* The anchor node sits above the top of the usable address space */ #define IOVA_ANCHOR ~0UL @@ -626,6 +627,8 @@ EXPORT_SYMBOL_GPL(reserve_iova); */ #define IOVA_MAG_SIZE 127 +#define IOVA_DEPOT_DELAY msecs_to_jiffies(100) + struct iova_magazine { /* * Only full magazines are inserted into the depot, so we can avoid @@ -646,8 +649,11 @@ struct iova_cpu_rcache { struct iova_rcache { spinlock_t lock; + unsigned int depot_size; struct iova_magazine *depot; struct iova_cpu_rcache __percpu *cpu_rcaches; + struct iova_domain *iovad; + struct delayed_work work; }; static struct iova_magazine *iova_magazine_alloc(gfp_t flags) @@ -728,6 +734,7 @@ static struct iova_magazine *iova_depot_pop(struct iova_rcache *rcache) rcache->depot = mag->next; mag->size = IOVA_MAG_SIZE; + rcache->depot_size--; return mag; } @@ -735,6 +742,24 @@ static void iova_depot_push(struct iova_rcache *rcache, struct iova_magazine *ma { mag->next = rcache->depot; rcache->depot = mag; + rcache->depot_size++; +} + +static void iova_depot_work_func(struct work_struct *work) +{ + struct iova_rcache *rcache = container_of(work, typeof(*rcache), work.work); + struct iova_magazine *mag = NULL; + + spin_lock(&rcache->lock); + if (rcache->depot_size > num_online_cpus()) + mag = iova_depot_pop(rcache); + spin_unlock(&rcache->lock); + + if (mag) { + iova_magazine_free_pfns(mag, rcache->iovad); + iova_magazine_free(mag); + schedule_delayed_work(&rcache->work, msecs_to_jiffies(IOVA_DEPOT_DELAY)); + } } int iova_domain_init_rcaches(struct iova_domain *iovad) @@ -754,6 +779,8 @@ int iova_domain_init_rcaches(struct iova_domain *iovad) rcache = &iovad->rcaches[i]; spin_lock_init(&rcache->lock); + rcache->iovad = iovad; + INIT_DELAYED_WORK(&rcache->work, iova_depot_work_func); rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size()); if (!rcache->cpu_rcaches) { @@ -814,6 +841,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, spin_lock(&rcache->lock); iova_depot_push(rcache, cpu_rcache->loaded); spin_unlock(&rcache->lock); + schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY); cpu_rcache->loaded = new_mag; can_insert = true; @@ -915,6 +943,7 @@ static void free_iova_rcaches(struct iova_domain *iovad) iova_magazine_free(cpu_rcache->prev); } free_percpu(rcache->cpu_rcaches); + cancel_delayed_work_sync(&rcache->work); while ((mag = iova_depot_pop(rcache))) iova_magazine_free(mag); } -- 2.39.2.101.g768bb238c484.dirty
| |