lkml.org 
[lkml]   [2012]   [Sep]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch] mm, thp: fix mlock statistics
NR_MLOCK is only accounted in single page units: there's no logic to
handle transparent hugepages. This patch checks the appropriate number
of pages to adjust the statistics by so that the correct amount of memory
is reflected.

Currently:

$ grep Mlocked /proc/meminfo
Mlocked: 19636 kB

#define MAP_SIZE (4 << 30) /* 4GB */

void *ptr = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
mlock(ptr, MAP_SIZE);

$ grep Mlocked /proc/meminfo
Mlocked: 29844 kB

munlock(ptr, MAP_SIZE);

$ grep Mlocked /proc/meminfo
Mlocked: 19636 kB

And with this patch:

$ grep Mlock /proc/meminfo
Mlocked: 19636 kB

mlock(ptr, MAP_SIZE);

$ grep Mlock /proc/meminfo
Mlocked: 4213664 kB

munlock(ptr, MAP_SIZE);

$ grep Mlock /proc/meminfo
Mlocked: 19636 kB

Reported-by: Hugh Dickens <hughd@google.com>
Signed-off-by: David Rientjes <rientjes@google.com>
---
mm/internal.h | 3 ++-
mm/mlock.c | 6 ++++--
mm/page_alloc.c | 2 +-
3 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -180,7 +180,8 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
return 0;

if (!TestSetPageMlocked(page)) {
- inc_zone_page_state(page, NR_MLOCK);
+ mod_zone_page_state(page_zone(page), NR_MLOCK,
+ hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
}
return 1;
diff --git a/mm/mlock.c b/mm/mlock.c
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -81,7 +81,8 @@ void mlock_vma_page(struct page *page)
BUG_ON(!PageLocked(page));

if (!TestSetPageMlocked(page)) {
- inc_zone_page_state(page, NR_MLOCK);
+ mod_zone_page_state(page_zone(page), NR_MLOCK,
+ hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
if (!isolate_lru_page(page))
putback_lru_page(page);
@@ -108,7 +109,8 @@ void munlock_vma_page(struct page *page)
BUG_ON(!PageLocked(page));

if (TestClearPageMlocked(page)) {
- dec_zone_page_state(page, NR_MLOCK);
+ mod_zone_page_state(page_zone(page), NR_MLOCK,
+ -hpage_nr_pages(page));
if (!isolate_lru_page(page)) {
int ret = SWAP_AGAIN;

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -604,7 +604,7 @@ out:
*/
static inline void free_page_mlock(struct page *page)
{
- __dec_zone_page_state(page, NR_MLOCK);
+ __mod_zone_page_state(page_zone(page), NR_MLOCK, -hpage_nr_pages(page));
__count_vm_event(UNEVICTABLE_MLOCKFREED);
}


\
 
 \ /
  Last update: 2012-09-27 04:41    [W:0.108 / U:0.916 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site