lkml.org 
[lkml]   [2014]   [May]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [patch v3 3/6] mm, compaction: add per-zone migration pfn cache for async compaction
On 05/07/2014 04:22 AM, David Rientjes wrote:
> Each zone has a cached migration scanner pfn for memory compaction so that
> subsequent calls to memory compaction can start where the previous call left
> off.
>
> Currently, the compaction migration scanner only updates the per-zone cached pfn
> when pageblocks were not skipped for async compaction. This creates a
> dependency on calling sync compaction to avoid having subsequent calls to async
> compaction from scanning an enormous amount of non-MOVABLE pageblocks each time
> it is called. On large machines, this could be potentially very expensive.
>
> This patch adds a per-zone cached migration scanner pfn only for async
> compaction. It is updated everytime a pageblock has been scanned in its
> entirety and when no pages from it were successfully isolated. The cached
> migration scanner pfn for sync compaction is updated only when called for sync
> compaction.
>
> Signed-off-by: David Rientjes <rientjes@google.com>
> ---
> v3: do not update pageblock skip metadata when skipped due to async per
> Vlastimil.

Great.

Acked-by: Vlastimil Babka <vbabka@suse.cz>


> include/linux/mmzone.h | 5 ++--
> mm/compaction.c | 66 ++++++++++++++++++++++++++++++--------------------
> 2 files changed, 43 insertions(+), 28 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -360,9 +360,10 @@ struct zone {
> /* Set to true when the PG_migrate_skip bits should be cleared */
> bool compact_blockskip_flush;
>
> - /* pfns where compaction scanners should start */
> + /* pfn where compaction free scanner should start */
> unsigned long compact_cached_free_pfn;
> - unsigned long compact_cached_migrate_pfn;
> + /* pfn where async and sync compaction migration scanner should start */
> + unsigned long compact_cached_migrate_pfn[2];
> #endif
> #ifdef CONFIG_MEMORY_HOTPLUG
> /* see spanned/present_pages for more description */
> diff --git a/mm/compaction.c b/mm/compaction.c
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -89,7 +89,8 @@ static void __reset_isolation_suitable(struct zone *zone)
> unsigned long end_pfn = zone_end_pfn(zone);
> unsigned long pfn;
>
> - zone->compact_cached_migrate_pfn = start_pfn;
> + zone->compact_cached_migrate_pfn[0] = start_pfn;
> + zone->compact_cached_migrate_pfn[1] = start_pfn;
> zone->compact_cached_free_pfn = end_pfn;
> zone->compact_blockskip_flush = false;
>
> @@ -131,9 +132,10 @@ void reset_isolation_suitable(pg_data_t *pgdat)
> */
> static void update_pageblock_skip(struct compact_control *cc,
> struct page *page, unsigned long nr_isolated,
> - bool migrate_scanner)
> + bool set_unsuitable, bool migrate_scanner)
> {
> struct zone *zone = cc->zone;
> + unsigned long pfn;
>
> if (cc->ignore_skip_hint)
> return;
> @@ -141,20 +143,31 @@ static void update_pageblock_skip(struct compact_control *cc,
> if (!page)
> return;
>
> - if (!nr_isolated) {
> - unsigned long pfn = page_to_pfn(page);
> + if (nr_isolated)
> + return;
> +
> + /*
> + * Only skip pageblocks when all forms of compaction will be known to
> + * fail in the near future.
> + */
> + if (set_unsuitable)
> set_pageblock_skip(page);
>
> - /* Update where compaction should restart */
> - if (migrate_scanner) {
> - if (!cc->finished_update_migrate &&
> - pfn > zone->compact_cached_migrate_pfn)
> - zone->compact_cached_migrate_pfn = pfn;
> - } else {
> - if (!cc->finished_update_free &&
> - pfn < zone->compact_cached_free_pfn)
> - zone->compact_cached_free_pfn = pfn;
> - }
> + pfn = page_to_pfn(page);
> +
> + /* Update where async and sync compaction should restart */
> + if (migrate_scanner) {
> + if (cc->finished_update_migrate)
> + return;
> + if (pfn > zone->compact_cached_migrate_pfn[0])
> + zone->compact_cached_migrate_pfn[0] = pfn;
> + if (cc->sync && pfn > zone->compact_cached_migrate_pfn[1])
> + zone->compact_cached_migrate_pfn[1] = pfn;
> + } else {
> + if (cc->finished_update_free)
> + return;
> + if (pfn < zone->compact_cached_free_pfn)
> + zone->compact_cached_free_pfn = pfn;
> }
> }
> #else
> @@ -166,7 +179,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
>
> static void update_pageblock_skip(struct compact_control *cc,
> struct page *page, unsigned long nr_isolated,
> - bool migrate_scanner)
> + bool set_unsuitable, bool migrate_scanner)
> {
> }
> #endif /* CONFIG_COMPACTION */
> @@ -329,7 +342,8 @@ isolate_fail:
>
> /* Update the pageblock-skip if the whole pageblock was scanned */
> if (blockpfn == end_pfn)
> - update_pageblock_skip(cc, valid_page, total_isolated, false);
> + update_pageblock_skip(cc, valid_page, total_isolated, true,
> + false);
>
> count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
> if (total_isolated)
> @@ -464,7 +478,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
> unsigned long flags;
> bool locked = false;
> struct page *page = NULL, *valid_page = NULL;
> - bool skipped_async_unsuitable = false;
> + bool set_unsuitable = true;
> const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
> (unevictable ? ISOLATE_UNEVICTABLE : 0);
>
> @@ -541,8 +555,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
> */
> mt = get_pageblock_migratetype(page);
> if (!cc->sync && !migrate_async_suitable(mt)) {
> - cc->finished_update_migrate = true;
> - skipped_async_unsuitable = true;
> + set_unsuitable = false;
> goto next_pageblock;
> }
> }
> @@ -646,11 +659,10 @@ next_pageblock:
> /*
> * Update the pageblock-skip information and cached scanner pfn,
> * if the whole pageblock was scanned without isolating any page.
> - * This is not done when pageblock was skipped due to being unsuitable
> - * for async compaction, so that eventual sync compaction can try.
> */
> - if (low_pfn == end_pfn && !skipped_async_unsuitable)
> - update_pageblock_skip(cc, valid_page, nr_isolated, true);
> + if (low_pfn == end_pfn)
> + update_pageblock_skip(cc, valid_page, nr_isolated,
> + set_unsuitable, true);
>
> trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
>
> @@ -877,7 +889,8 @@ static int compact_finished(struct zone *zone,
> /* Compaction run completes if the migrate and free scanner meet */
> if (cc->free_pfn <= cc->migrate_pfn) {
> /* Let the next compaction start anew. */
> - zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
> + zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
> + zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
> zone->compact_cached_free_pfn = zone_end_pfn(zone);
>
> /*
> @@ -1002,7 +1015,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
> * information on where the scanners should start but check that it
> * is initialised by ensuring the values are within zone boundaries.
> */
> - cc->migrate_pfn = zone->compact_cached_migrate_pfn;
> + cc->migrate_pfn = zone->compact_cached_migrate_pfn[cc->sync];
> cc->free_pfn = zone->compact_cached_free_pfn;
> if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
> cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
> @@ -1010,7 +1023,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
> }
> if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
> cc->migrate_pfn = start_pfn;
> - zone->compact_cached_migrate_pfn = cc->migrate_pfn;
> + zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
> + zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
> }
>
> trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
>



\
 
 \ /
  Last update: 2014-05-07 12:01    [W:0.240 / U:0.236 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site