lkml.org 
[lkml]   [2021]   [Sep]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 75/76] mm: list_lru: rename list_lru_per_memcg to list_lru_memcg
    Date
    Before now, the name of list_lru_memcg was occupied. Since previous
    patch, the name is free. So rename list_lru_per_memcg to list_lru_memcg,
    it is more brief.

    Signed-off-by: Muchun Song <songmuchun@bytedance.com>
    ---
    include/linux/list_lru.h | 2 +-
    mm/list_lru.c | 20 ++++++++++----------
    2 files changed, 11 insertions(+), 11 deletions(-)

    diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
    index c423be3cf2d3..d654c8e3d262 100644
    --- a/include/linux/list_lru.h
    +++ b/include/linux/list_lru.h
    @@ -32,7 +32,7 @@ struct list_lru_one {
    long nr_items;
    };

    -struct list_lru_per_memcg {
    +struct list_lru_memcg {
    struct rcu_head rcu;
    /* array of per cgroup per node lists, indexed by node id */
    struct list_lru_one nodes[];
    diff --git a/mm/list_lru.c b/mm/list_lru.c
    index 1202519aeb31..371097ee2485 100644
    --- a/mm/list_lru.c
    +++ b/mm/list_lru.c
    @@ -52,7 +52,7 @@ static inline struct list_lru_one *
    list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
    {
    if (list_lru_memcg_aware(lru) && idx >= 0) {
    - struct list_lru_per_memcg *mlru = xa_load(&lru->xa, idx);
    + struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);

    return mlru ? &mlru->nodes[nid] : NULL;
    }
    @@ -306,7 +306,7 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid,

    #ifdef CONFIG_MEMCG_KMEM
    if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
    - struct list_lru_per_memcg *mlru;
    + struct list_lru_memcg *mlru;
    unsigned long index;

    xa_for_each(&lru->xa, index, mlru) {
    @@ -335,10 +335,10 @@ static void init_one_lru(struct list_lru_one *l)
    }

    #ifdef CONFIG_MEMCG_KMEM
    -static struct list_lru_per_memcg *memcg_list_lru_alloc(gfp_t gfp)
    +static struct list_lru_memcg *memcg_list_lru_alloc(gfp_t gfp)
    {
    int nid;
    - struct list_lru_per_memcg *lru;
    + struct list_lru_memcg *lru;

    lru = kmalloc(struct_size(lru, nodes, nr_node_ids), gfp);
    if (!lru)
    @@ -352,7 +352,7 @@ static struct list_lru_per_memcg *memcg_list_lru_alloc(gfp_t gfp)

    static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
    {
    - struct list_lru_per_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
    + struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);

    /*
    * The __list_lru_walk_one() can walk the list of this node.
    @@ -374,7 +374,7 @@ static void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
    static void memcg_destroy_list_lru(struct list_lru *lru)
    {
    XA_STATE(xas, &lru->xa, 0);
    - struct list_lru_per_memcg *mlru;
    + struct list_lru_memcg *mlru;

    if (!list_lru_memcg_aware(lru))
    return;
    @@ -477,7 +477,7 @@ int list_lru_memcg_alloc(struct list_lru *lru, struct mem_cgroup *memcg, gfp_t g
    int i;

    struct list_lru_memcg_table {
    - struct list_lru_per_memcg *mlru;
    + struct list_lru_memcg *mlru;
    struct mem_cgroup *memcg;
    } *table;

    @@ -488,7 +488,7 @@ int list_lru_memcg_alloc(struct list_lru *lru, struct mem_cgroup *memcg, gfp_t g
    return 0;

    /*
    - * The allocated list_lru_per_memcg array is not accounted directly.
    + * The allocated list_lru_memcg array is not accounted directly.
    * Moreover, it should not come from DMA buffer and is not readily
    * reclaimable. So those GFP bits should be masked off.
    */
    @@ -500,7 +500,7 @@ int list_lru_memcg_alloc(struct list_lru *lru, struct mem_cgroup *memcg, gfp_t g
    /*
    * Because the list_lru can be reparented to the parent cgroup's
    * list_lru, we should make sure that this cgroup and all its
    - * ancestors have allocated list_lru_per_memcg.
    + * ancestors have allocated list_lru_memcg.
    */
    for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
    if (memcg_list_lru_skip_alloc(lru, memcg))
    @@ -519,7 +519,7 @@ int list_lru_memcg_alloc(struct list_lru *lru, struct mem_cgroup *memcg, gfp_t g
    xas_lock_irqsave(&xas, flags);
    while (i--) {
    int index = memcg_cache_id(table[i].memcg);
    - struct list_lru_per_memcg *mlru = table[i].mlru;
    + struct list_lru_memcg *mlru = table[i].mlru;

    xas_set(&xas, index);
    retry:
    --
    2.11.0
    \
     
     \ /
      Last update: 2021-09-14 09:47    [W:4.047 / U:0.268 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site