lkml.org 
[lkml]   [2020]   [Dec]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 6/9] mm: vmscan: use per memcg nr_deferred of shrinker
    Date
    Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's nr_deferred
    will be used in the following cases:
    1. Non memcg aware shrinkers
    2. !CONFIG_MEMCG
    3. memcg is disabled by boot parameter

    Signed-off-by: Yang Shi <shy828301@gmail.com>
    ---
    mm/vmscan.c | 88 +++++++++++++++++++++++++++++++++++++++++++++++++----
    1 file changed, 82 insertions(+), 6 deletions(-)

    diff --git a/mm/vmscan.c b/mm/vmscan.c
    index cba0bc8d4661..d569fdcaba79 100644
    --- a/mm/vmscan.c
    +++ b/mm/vmscan.c
    @@ -203,6 +203,12 @@ static DECLARE_RWSEM(shrinker_rwsem);
    static DEFINE_IDR(shrinker_idr);
    static int shrinker_nr_max;

    +static inline bool is_deferred_memcg_aware(struct shrinker *shrinker)
    +{
    + return (shrinker->flags & SHRINKER_MEMCG_AWARE) &&
    + !mem_cgroup_disabled();
    +}
    +
    static int prealloc_memcg_shrinker(struct shrinker *shrinker)
    {
    int id, ret = -ENOMEM;
    @@ -271,7 +277,58 @@ static bool writeback_throttling_sane(struct scan_control *sc)
    #endif
    return false;
    }
    +
    +static inline long count_nr_deferred(struct shrinker *shrinker,
    + struct shrink_control *sc)
    +{
    + bool per_memcg_deferred = is_deferred_memcg_aware(shrinker) && sc->memcg;
    + struct memcg_shrinker_deferred *deferred;
    + struct mem_cgroup *memcg = sc->memcg;
    + int nid = sc->nid;
    + int id = shrinker->id;
    + long nr;
    +
    + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
    + nid = 0;
    +
    + if (per_memcg_deferred) {
    + deferred = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_deferred,
    + true);
    + nr = atomic_long_xchg(&deferred->nr_deferred[id], 0);
    + } else
    + nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
    +
    + return nr;
    +}
    +
    +static inline long set_nr_deferred(long nr, struct shrinker *shrinker,
    + struct shrink_control *sc)
    +{
    + bool per_memcg_deferred = is_deferred_memcg_aware(shrinker) && sc->memcg;
    + struct memcg_shrinker_deferred *deferred;
    + struct mem_cgroup *memcg = sc->memcg;
    + int nid = sc->nid;
    + int id = shrinker->id;
    + long new_nr;
    +
    + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
    + nid = 0;
    +
    + if (per_memcg_deferred) {
    + deferred = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_deferred,
    + true);
    + new_nr = atomic_long_add_return(nr, &deferred->nr_deferred[id]);
    + } else
    + new_nr = atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
    +
    + return new_nr;
    +}
    #else
    +static inline bool is_deferred_memcg_aware(struct shrinker *shrinker)
    +{
    + return false;
    +}
    +
    static int prealloc_memcg_shrinker(struct shrinker *shrinker)
    {
    return 0;
    @@ -290,6 +347,29 @@ static bool writeback_throttling_sane(struct scan_control *sc)
    {
    return true;
    }
    +
    +static inline long count_nr_deferred(struct shrinker *shrinker,
    + struct shrink_control *sc)
    +{
    + int nid = sc->nid;
    +
    + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
    + nid = 0;
    +
    + return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
    +}
    +
    +static inline long set_nr_deferred(long nr, struct shrinker *shrinker,
    + struct shrink_control *sc)
    +{
    + int nid = sc->nid;
    +
    + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
    + nid = 0;
    +
    + return atomic_long_add_return(nr,
    + &shrinker->nr_deferred[nid]);
    +}
    #endif

    /*
    @@ -429,13 +509,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
    long freeable;
    long nr;
    long new_nr;
    - int nid = shrinkctl->nid;
    long batch_size = shrinker->batch ? shrinker->batch
    : SHRINK_BATCH;
    long scanned = 0, next_deferred;

    - if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
    - nid = 0;

    freeable = shrinker->count_objects(shrinker, shrinkctl);
    if (freeable == 0 || freeable == SHRINK_EMPTY)
    @@ -446,7 +523,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
    * and zero it so that other concurrent shrinker invocations
    * don't also do this scanning work.
    */
    - nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
    + nr = count_nr_deferred(shrinker, shrinkctl);

    total_scan = nr;
    if (shrinker->seeks) {
    @@ -539,8 +616,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
    * move the unused scan count back into the shrinker in a
    * manner that handles concurrent updates.
    */
    - new_nr = atomic_long_add_return(next_deferred,
    - &shrinker->nr_deferred[nid]);
    + new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);

    trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
    return freed;
    --
    2.26.2
    \
     
     \ /
      Last update: 2020-12-02 19:39    [W:4.078 / U:0.100 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site