lkml.org 
[lkml]   [2020]   [May]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC 27/43] x86/mm/numa: add numa_isolate_memblocks()
    Date
    Provide a way for a caller external to numa to ensure memblocks in the
    memblock reserved list do not cross node boundaries and have a node id
    assigned to them. This will be used by PKRAM to ensure initialization of
    page structs for preserved pages can be deferred and multithreaded
    efficiently.

    Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com>
    ---
    arch/x86/include/asm/numa.h | 4 ++++
    arch/x86/mm/numa.c | 32 ++++++++++++++++++++------------
    2 files changed, 24 insertions(+), 12 deletions(-)

    diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
    index bbfde3d2662f..f9e05f4eb1c6 100644
    --- a/arch/x86/include/asm/numa.h
    +++ b/arch/x86/include/asm/numa.h
    @@ -40,6 +40,7 @@ static inline void set_apicid_to_node(int apicid, s16 node)
    }

    extern int numa_cpu_node(int cpu);
    +extern void __init numa_isolate_memblocks(void);

    #else /* CONFIG_NUMA */
    static inline void set_apicid_to_node(int apicid, s16 node)
    @@ -50,6 +51,9 @@ static inline int numa_cpu_node(int cpu)
    {
    return NUMA_NO_NODE;
    }
    +static inline void numa_isolate_memblocks(void)
    +{
    +}
    #endif /* CONFIG_NUMA */

    #ifdef CONFIG_X86_32
    diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
    index 59ba008504dc..df0065e24ea5 100644
    --- a/arch/x86/mm/numa.c
    +++ b/arch/x86/mm/numa.c
    @@ -475,6 +475,25 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
    return true;
    }

    +void __init numa_isolate_memblocks(void)
    +{
    + int i;
    +
    + /*
    + * Iterate over all memory known to the x86 architecture,
    + * and use those ranges to set the nid in memblock.reserved.
    + * This will split up the memblock regions along node
    + * boundaries and will set the node IDs as well.
    + */
    + for (i = 0; i < numa_meminfo.nr_blks; i++) {
    + struct numa_memblk *mb = numa_meminfo.blk + i;
    + int ret;
    +
    + ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
    + WARN_ON_ONCE(ret);
    + }
    +}
    +
    /*
    * Mark all currently memblock-reserved physical memory (which covers the
    * kernel's own memory ranges) as hot-unswappable.
    @@ -493,19 +512,8 @@ static void __init numa_clear_kernel_node_hotplug(void)
    * used by the kernel, but those regions are not split up
    * along node boundaries yet, and don't necessarily have their
    * node ID set yet either.
    - *
    - * So iterate over all memory known to the x86 architecture,
    - * and use those ranges to set the nid in memblock.reserved.
    - * This will split up the memblock regions along node
    - * boundaries and will set the node IDs as well.
    */
    - for (i = 0; i < numa_meminfo.nr_blks; i++) {
    - struct numa_memblk *mb = numa_meminfo.blk + i;
    - int ret;
    -
    - ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
    - WARN_ON_ONCE(ret);
    - }
    + numa_isolate_memblocks();

    /*
    * Now go over all reserved memblock regions, to construct a
    --
    2.13.3
    \
     
     \ /
      Last update: 2020-05-07 02:48    [W:4.287 / U:1.060 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site