lkml.org 
[lkml]   [2013]   [Jul]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[ 05/19] module: do percpu allocation after uniqueness check. No, really!
    Date
    3.10-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Rusty Russell <rusty@rustcorp.com.au>

    commit 8d8022e8aba85192e937f1f0f7450e256d66ae5c upstream.

    v3.8-rc1-5-g1fb9341 was supposed to stop parallel kvm loads exhausting
    percpu memory on large machines:

    Now we have a new state MODULE_STATE_UNFORMED, we can insert the
    module into the list (and thus guarantee its uniqueness) before we
    allocate the per-cpu region.

    In my defence, it didn't actually say the patch did this. Just that
    we "can".

    This patch actually *does* it.

    Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
    Tested-by: Jim Hull <jim.hull@hp.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    kernel/module.c | 34 ++++++++++++++++++----------------
    1 file changed, 18 insertions(+), 16 deletions(-)

    --- a/kernel/module.c
    +++ b/kernel/module.c
    @@ -2927,7 +2927,6 @@ static struct module *layout_and_allocat
    {
    /* Module within temporary copy. */
    struct module *mod;
    - Elf_Shdr *pcpusec;
    int err;

    mod = setup_load_info(info, flags);
    @@ -2942,17 +2941,10 @@ static struct module *layout_and_allocat
    err = module_frob_arch_sections(info->hdr, info->sechdrs,
    info->secstrings, mod);
    if (err < 0)
    - goto out;
    + return ERR_PTR(err);

    - pcpusec = &info->sechdrs[info->index.pcpu];
    - if (pcpusec->sh_size) {
    - /* We have a special allocation for this section. */
    - err = percpu_modalloc(mod,
    - pcpusec->sh_size, pcpusec->sh_addralign);
    - if (err)
    - goto out;
    - pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC;
    - }
    + /* We will do a special allocation for per-cpu sections later. */
    + info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;

    /* Determine total sizes, and put offsets in sh_entsize. For now
    this is done generically; there doesn't appear to be any
    @@ -2963,17 +2955,22 @@ static struct module *layout_and_allocat
    /* Allocate and move to the final place */
    err = move_module(mod, info);
    if (err)
    - goto free_percpu;
    + return ERR_PTR(err);

    /* Module has been copied to its final place now: return it. */
    mod = (void *)info->sechdrs[info->index.mod].sh_addr;
    kmemleak_load_module(mod, info);
    return mod;
    +}

    -free_percpu:
    - percpu_modfree(mod);
    -out:
    - return ERR_PTR(err);
    +static int alloc_module_percpu(struct module *mod, struct load_info *info)
    +{
    + Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
    + if (!pcpusec->sh_size)
    + return 0;
    +
    + /* We have a special allocation for this section. */
    + return percpu_modalloc(mod, pcpusec->sh_size, pcpusec->sh_addralign);
    }

    /* mod is no longer valid after this! */
    @@ -3237,6 +3234,11 @@ static int load_module(struct load_info
    }
    #endif

    + /* To avoid stressing percpu allocator, do this once we're unique. */
    + err = alloc_module_percpu(mod, info);
    + if (err)
    + goto unlink_mod;
    +
    /* Now module is in final location, initialize linked lists, etc. */
    err = module_unload_init(mod);
    if (err)



    \
     
     \ /
      Last update: 2013-07-12 00:41    [W:5.401 / U:0.060 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site