lkml.org 
[lkml]   [2023]   [Jan]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH v13 2/6] mm/vmstat: Use vmstat_dirty to track CPU-specific vmstat discrepancies
From: Aaron Tomlin <atomlin@atomlin.com>

This patch will now use the previously introduced CPU-specific variable
namely vmstat_dirty to indicate if a vmstat differential/or imbalance is
present for a given CPU. So, at the appropriate time, vmstat processing can
be initiated. The hope is that this particular approach is "cheaper" when
compared to need_update(). The idea is based on Marcelo's patch [1].

[1]: https://lore.kernel.org/lkml/20220204173554.763888172@fedora.localdomain/

Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
---
mm/vmstat.c | 48 ++++++++++++++----------------------------------
1 file changed, 14 insertions(+), 34 deletions(-)

Index: linux-2.6/mm/vmstat.c
===================================================================
--- linux-2.6.orig/mm/vmstat.c
+++ linux-2.6/mm/vmstat.c
@@ -381,6 +381,7 @@ void __mod_zone_page_state(struct zone *
x = 0;
}
__this_cpu_write(*p, x);
+ vmstat_mark_dirty();

preempt_enable_nested();
}
@@ -417,6 +418,7 @@ void __mod_node_page_state(struct pglist
x = 0;
}
__this_cpu_write(*p, x);
+ vmstat_mark_dirty();

preempt_enable_nested();
}
@@ -577,6 +579,9 @@ static inline void mod_zone_state(struct
s8 __percpu *p = pcp->vm_stat_diff + item;
long o, n, t, z;

+ /* cmpxchg and vmstat_mark_dirty should happen on the same CPU */
+ preempt_disable();
+
do {
z = 0; /* overflow to zone counters */

@@ -606,6 +611,8 @@ static inline void mod_zone_state(struct

if (z)
zone_page_state_add(z, zone, item);
+ vmstat_mark_dirty();
+ preempt_enable();
}

void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
@@ -645,6 +652,8 @@ static inline void mod_node_state(struct
delta >>= PAGE_SHIFT;
}

+ /* cmpxchg and vmstat_mark_dirty should happen on the same CPU */
+ preempt_disable();
do {
z = 0; /* overflow to node counters */

@@ -674,6 +683,8 @@ static inline void mod_node_state(struct

if (z)
node_page_state_add(z, pgdat, item);
+ vmstat_mark_dirty();
+ preempt_enable();
}

void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
@@ -828,6 +839,14 @@ static int refresh_cpu_vm_stats(bool do_
int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
int changes = 0;

+ /*
+ * Clear vmstat_dirty before clearing the percpu vmstats.
+ * If interrupts are enabled, it is possible that an interrupt
+ * or another task modifies a percpu vmstat, which will
+ * set vmstat_dirty to true.
+ */
+ vmstat_clear_dirty();
+
for_each_populated_zone(zone) {
struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
#ifdef CONFIG_NUMA
@@ -1957,35 +1976,6 @@ static void vmstat_update(struct work_st
}

/*
- * Check if the diffs for a certain cpu indicate that
- * an update is needed.
- */
-static bool need_update(int cpu)
-{
- pg_data_t *last_pgdat = NULL;
- struct zone *zone;
-
- for_each_populated_zone(zone) {
- struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
- struct per_cpu_nodestat *n;
-
- /*
- * The fast way of checking if there are any vmstat diffs.
- */
- if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
- return true;
-
- if (last_pgdat == zone->zone_pgdat)
- continue;
- last_pgdat = zone->zone_pgdat;
- n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
- if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
- return true;
- }
- return false;
-}
-
-/*
* Switch off vmstat processing and then fold all the remaining differentials
* until the diffs stay at zero. The function is used by NOHZ and can only be
* invoked when tick processing is not active.
@@ -1995,10 +1985,7 @@ void quiet_vmstat(void)
if (system_state != SYSTEM_RUNNING)
return;

- if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
- return;
-
- if (!need_update(smp_processor_id()))
+ if (!is_vmstat_dirty())
return;

/*
@@ -2029,7 +2016,7 @@ static void vmstat_shepherd(struct work_
for_each_online_cpu(cpu) {
struct delayed_work *dw = &per_cpu(vmstat_work, cpu);

- if (!delayed_work_pending(dw) && need_update(cpu))
+ if (!delayed_work_pending(dw) && per_cpu(vmstat_dirty, cpu))
queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);

cond_resched();

\
 
 \ /
  Last update: 2023-03-26 23:28    [W:0.174 / U:0.036 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site