lkml.org 
[lkml]   [2013]   [Aug]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Date
    Subject[PATCH v3 2/2] mm: make lru_add_drain_all() selective
    This change makes lru_add_drain_all() only selectively interrupt
    the cpus that have per-cpu free pages that can be drained.

    This is important in nohz mode where calling mlockall(), for
    example, otherwise will interrupt every core unnecessarily.

    Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
    ---
    v3: split commit into two, one for workqueue and one for mm, though both
    should probably be taken through -mm.

    mm/swap.c | 37 ++++++++++++++++++++++++++++++++++++-
    1 file changed, 36 insertions(+), 1 deletion(-)

    diff --git a/mm/swap.c b/mm/swap.c
    index 4a1d0d2..d4a862b 100644
    --- a/mm/swap.c
    +++ b/mm/swap.c
    @@ -405,6 +405,11 @@ static void activate_page_drain(int cpu)
    pagevec_lru_move_fn(pvec, __activate_page, NULL);
    }

    +static bool need_activate_page_drain(int cpu)
    +{
    + return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
    +}
    +
    void activate_page(struct page *page)
    {
    if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
    @@ -422,6 +427,11 @@ static inline void activate_page_drain(int cpu)
    {
    }

    +static bool need_activate_page_drain(int cpu)
    +{
    + return false;
    +}
    +
    void activate_page(struct page *page)
    {
    struct zone *zone = page_zone(page);
    @@ -683,7 +693,32 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
    */
    int lru_add_drain_all(void)
    {
    - return schedule_on_each_cpu(lru_add_drain_per_cpu);
    + cpumask_var_t mask;
    + int cpu, rc;
    +
    + if (!alloc_cpumask_var(&mask, GFP_KERNEL))
    + return -ENOMEM;
    + cpumask_clear(mask);
    +
    + /*
    + * Figure out which cpus need flushing. It's OK if we race
    + * with changes to the per-cpu lru pvecs, since it's no worse
    + * than if we flushed all cpus, since a cpu could still end
    + * up putting pages back on its pvec before we returned.
    + * And this avoids interrupting other cpus unnecessarily.
    + */
    + for_each_online_cpu(cpu) {
    + if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
    + pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
    + pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
    + need_activate_page_drain(cpu))
    + cpumask_set_cpu(cpu, mask);
    + }
    +
    + rc = schedule_on_cpu_mask(lru_add_drain_per_cpu, mask);
    +
    + free_cpumask_var(mask);
    + return rc;
    }

    /*
    --
    1.8.3.1


    \
     
     \ /
      Last update: 2013-08-07 23:21    [W:6.027 / U:0.132 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site