lkml.org 
[lkml]   [2022]   [Jan]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 30/35] mm: create new include/linux/vm_event.h header file
    Date
    Split off the definitions necessary to update event counters from vmstat.h
    into a new vm_event.h file.

    The rationale is to allow header files included from mm.h to update
    counter events. vmstat.h can not be included from such header files,
    because it refers to page_pgdat() which is only defined later down
    in mm.h, and thus results in compile errors. vm_event.h does not refer
    to page_pgdat() and thus does not result in such errors.

    Signed-off-by: Michel Lespinasse <michel@lespinasse.org>
    ---
    include/linux/vm_event.h | 105 +++++++++++++++++++++++++++++++++++++++
    include/linux/vmstat.h | 95 +----------------------------------
    2 files changed, 106 insertions(+), 94 deletions(-)
    create mode 100644 include/linux/vm_event.h

    diff --git a/include/linux/vm_event.h b/include/linux/vm_event.h
    new file mode 100644
    index 000000000000..b3ae108a3841
    --- /dev/null
    +++ b/include/linux/vm_event.h
    @@ -0,0 +1,105 @@
    +/* SPDX-License-Identifier: GPL-2.0 */
    +#ifndef _LINUX_VM_EVENT_H
    +#define _LINUX_VM_EVENT_H
    +
    +#include <linux/types.h>
    +#include <linux/percpu.h>
    +#include <linux/mmzone.h>
    +#include <linux/vm_event_item.h>
    +#include <linux/atomic.h>
    +
    +#ifdef CONFIG_VM_EVENT_COUNTERS
    +/*
    + * Light weight per cpu counter implementation.
    + *
    + * Counters should only be incremented and no critical kernel component
    + * should rely on the counter values.
    + *
    + * Counters are handled completely inline. On many platforms the code
    + * generated will simply be the increment of a global address.
    + */
    +
    +struct vm_event_state {
    + unsigned long event[NR_VM_EVENT_ITEMS];
    +};
    +
    +DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
    +
    +/*
    + * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
    + * local_irq_disable overhead.
    + */
    +static inline void __count_vm_event(enum vm_event_item item)
    +{
    + raw_cpu_inc(vm_event_states.event[item]);
    +}
    +
    +static inline void count_vm_event(enum vm_event_item item)
    +{
    + this_cpu_inc(vm_event_states.event[item]);
    +}
    +
    +static inline void __count_vm_events(enum vm_event_item item, long delta)
    +{
    + raw_cpu_add(vm_event_states.event[item], delta);
    +}
    +
    +static inline void count_vm_events(enum vm_event_item item, long delta)
    +{
    + this_cpu_add(vm_event_states.event[item], delta);
    +}
    +
    +extern void all_vm_events(unsigned long *);
    +
    +extern void vm_events_fold_cpu(int cpu);
    +
    +#else
    +
    +/* Disable counters */
    +static inline void count_vm_event(enum vm_event_item item)
    +{
    +}
    +static inline void count_vm_events(enum vm_event_item item, long delta)
    +{
    +}
    +static inline void __count_vm_event(enum vm_event_item item)
    +{
    +}
    +static inline void __count_vm_events(enum vm_event_item item, long delta)
    +{
    +}
    +static inline void all_vm_events(unsigned long *ret)
    +{
    +}
    +static inline void vm_events_fold_cpu(int cpu)
    +{
    +}
    +
    +#endif /* CONFIG_VM_EVENT_COUNTERS */
    +
    +#ifdef CONFIG_NUMA_BALANCING
    +#define count_vm_numa_event(x) count_vm_event(x)
    +#define count_vm_numa_events(x, y) count_vm_events(x, y)
    +#else
    +#define count_vm_numa_event(x) do {} while (0)
    +#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
    +#endif /* CONFIG_NUMA_BALANCING */
    +
    +#ifdef CONFIG_DEBUG_TLBFLUSH
    +#define count_vm_tlb_event(x) count_vm_event(x)
    +#define count_vm_tlb_events(x, y) count_vm_events(x, y)
    +#else
    +#define count_vm_tlb_event(x) do {} while (0)
    +#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
    +#endif
    +
    +#ifdef CONFIG_DEBUG_VM_VMACACHE
    +#define count_vm_vmacache_event(x) count_vm_event(x)
    +#else
    +#define count_vm_vmacache_event(x) do {} while (0)
    +#endif
    +
    +#define __count_zid_vm_events(item, zid, delta) \
    + __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
    +
    +#endif /* _LINUX_VM_EVENT_H */
    diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
    index bfe38869498d..7c3c892ce89a 100644
    --- a/include/linux/vmstat.h
    +++ b/include/linux/vmstat.h
    @@ -6,6 +6,7 @@
    #include <linux/percpu.h>
    #include <linux/mmzone.h>
    #include <linux/vm_event_item.h>
    +#include <linux/vm_event.h>
    #include <linux/atomic.h>
    #include <linux/static_key.h>
    #include <linux/mmdebug.h>
    @@ -40,100 +41,6 @@ enum writeback_stat_item {
    NR_VM_WRITEBACK_STAT_ITEMS,
    };

    -#ifdef CONFIG_VM_EVENT_COUNTERS
    -/*
    - * Light weight per cpu counter implementation.
    - *
    - * Counters should only be incremented and no critical kernel component
    - * should rely on the counter values.
    - *
    - * Counters are handled completely inline. On many platforms the code
    - * generated will simply be the increment of a global address.
    - */
    -
    -struct vm_event_state {
    - unsigned long event[NR_VM_EVENT_ITEMS];
    -};
    -
    -DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
    -
    -/*
    - * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
    - * local_irq_disable overhead.
    - */
    -static inline void __count_vm_event(enum vm_event_item item)
    -{
    - raw_cpu_inc(vm_event_states.event[item]);
    -}
    -
    -static inline void count_vm_event(enum vm_event_item item)
    -{
    - this_cpu_inc(vm_event_states.event[item]);
    -}
    -
    -static inline void __count_vm_events(enum vm_event_item item, long delta)
    -{
    - raw_cpu_add(vm_event_states.event[item], delta);
    -}
    -
    -static inline void count_vm_events(enum vm_event_item item, long delta)
    -{
    - this_cpu_add(vm_event_states.event[item], delta);
    -}
    -
    -extern void all_vm_events(unsigned long *);
    -
    -extern void vm_events_fold_cpu(int cpu);
    -
    -#else
    -
    -/* Disable counters */
    -static inline void count_vm_event(enum vm_event_item item)
    -{
    -}
    -static inline void count_vm_events(enum vm_event_item item, long delta)
    -{
    -}
    -static inline void __count_vm_event(enum vm_event_item item)
    -{
    -}
    -static inline void __count_vm_events(enum vm_event_item item, long delta)
    -{
    -}
    -static inline void all_vm_events(unsigned long *ret)
    -{
    -}
    -static inline void vm_events_fold_cpu(int cpu)
    -{
    -}
    -
    -#endif /* CONFIG_VM_EVENT_COUNTERS */
    -
    -#ifdef CONFIG_NUMA_BALANCING
    -#define count_vm_numa_event(x) count_vm_event(x)
    -#define count_vm_numa_events(x, y) count_vm_events(x, y)
    -#else
    -#define count_vm_numa_event(x) do {} while (0)
    -#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
    -#endif /* CONFIG_NUMA_BALANCING */
    -
    -#ifdef CONFIG_DEBUG_TLBFLUSH
    -#define count_vm_tlb_event(x) count_vm_event(x)
    -#define count_vm_tlb_events(x, y) count_vm_events(x, y)
    -#else
    -#define count_vm_tlb_event(x) do {} while (0)
    -#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
    -#endif
    -
    -#ifdef CONFIG_DEBUG_VM_VMACACHE
    -#define count_vm_vmacache_event(x) count_vm_event(x)
    -#else
    -#define count_vm_vmacache_event(x) do {} while (0)
    -#endif
    -
    -#define __count_zid_vm_events(item, zid, delta) \
    - __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
    -
    /*
    * Zone and node-based page accounting with per cpu differentials.
    */
    --
    2.20.1
    \
     
     \ /
      Last update: 2022-01-28 14:20    [W:3.401 / U:1.704 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site