lkml.org 
[lkml]   [2021]   [Apr]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 8/8] drm/msm: Support evicting GEM objects to swap
Date
From: Rob Clark <robdclark@chromium.org>

Now that tracking is wired up for potentially evictable GEM objects,
wire up shrinker and the remaining GEM bits for unpinning backing pages
of inactive objects.

Signed-off-by: Rob Clark <robdclark@chromium.org>
---
drivers/gpu/drm/msm/msm_gem.c | 23 ++++++++++++++++
drivers/gpu/drm/msm/msm_gem_shrinker.c | 37 +++++++++++++++++++++++++-
drivers/gpu/drm/msm/msm_gpu_trace.h | 13 +++++++++
3 files changed, 72 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 163a1d30b5c9..2b731cf42294 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -759,6 +759,29 @@ void msm_gem_purge(struct drm_gem_object *obj)
0, (loff_t)-1);
}

+/**
+ * Unpin the backing pages and make them available to be swapped out.
+ */
+void msm_gem_evict(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(is_unevictable(msm_obj));
+ GEM_WARN_ON(!msm_obj->evictable);
+ GEM_WARN_ON(msm_obj->active_count);
+
+ /* Get rid of any iommu mapping(s): */
+ put_iova_spaces(obj, false);
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+
+ put_pages(obj);
+
+ update_inactive(msm_obj);
+}
+
void msm_gem_vunmap(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 38bf919f8508..52828028b9d4 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -9,12 +9,26 @@
#include "msm_gpu.h"
#include "msm_gpu_trace.h"

+bool enable_swap = true;
+MODULE_PARM_DESC(enable_swap, "Enable swappable GEM buffers");
+module_param(enable_swap, bool, 0600);
+
+static bool can_swap(void)
+{
+ return enable_swap && get_nr_swap_pages() > 0;
+}
+
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- return priv->shrinkable_count;
+ unsigned count = priv->shrinkable_count;
+
+ if (can_swap())
+ count += priv->evictable_count;
+
+ return count;
}

static bool
@@ -32,6 +46,17 @@ purge(struct msm_gem_object *msm_obj)
return true;
}

+static bool
+evict(struct msm_gem_object *msm_obj)
+{
+ if (is_unevictable(msm_obj))
+ return false;
+
+ msm_gem_evict(&msm_obj->base);
+
+ return true;
+}
+
static unsigned long
scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
bool (*shrink)(struct msm_gem_object *msm_obj))
@@ -104,6 +129,16 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
if (freed > 0)
trace_msm_gem_purge(freed << PAGE_SHIFT);

+ if (can_swap() && freed < sc->nr_to_scan) {
+ int evicted = scan(priv, sc->nr_to_scan - freed,
+ &priv->inactive_willneed, evict);
+
+ if (evicted > 0)
+ trace_msm_gem_evict(evicted << PAGE_SHIFT);
+
+ freed += evicted;
+ }
+
return (freed > 0) ? freed : SHRINK_STOP;
}

diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index 03e0c2536b94..ca0b08d7875b 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -128,6 +128,19 @@ TRACE_EVENT(msm_gem_purge,
);


+TRACE_EVENT(msm_gem_evict,
+ TP_PROTO(u32 bytes),
+ TP_ARGS(bytes),
+ TP_STRUCT__entry(
+ __field(u32, bytes)
+ ),
+ TP_fast_assign(
+ __entry->bytes = bytes;
+ ),
+ TP_printk("Evicting %u bytes", __entry->bytes)
+);
+
+
TRACE_EVENT(msm_gem_purge_vmaps,
TP_PROTO(u32 unmapped),
TP_ARGS(unmapped),
--
2.30.2
\
 
 \ /
  Last update: 2021-04-05 19:44    [W:0.069 / U:0.672 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site