lkml.org 
[lkml]   [2018]   [Jan]   [31]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH v1 10/13] mm: add LRU batch lock API's
Date
Add the LRU batch locking API's themselves.  This adds the final piece
of infrastructure necessary for locking batches on an LRU list.

The API's lock a specific page on the LRU list, taking only the
appropriate LRU batch lock for a non-sentinel page and taking the
node's/memcg's lru_lock in addition for a sentinel page.

These interfaces are designed for performance: they minimize the number
of times we needlessly drop and then reacquire the same lock(s) when
used in a loop. They're difficult to use but will do for a prototype.

Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
---
include/linux/mm_inline.h | 58 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 58 insertions(+)

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 1f1657c75b1b..11d9fcf93f2b 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -210,6 +210,64 @@ static __always_inline void lru_unlock_all(struct pglist_data *pgdat,
local_irq_enable();
}

+static __always_inline spinlock_t *page_lru_batch_lock(struct page *page)
+{
+ return &page_pgdat(page)->lru_batch_locks[page->lru_batch].lock;
+}
+
+/**
+ * lru_batch_lock - lock an LRU list batch
+ */
+static __always_inline void lru_batch_lock(struct page *page,
+ spinlock_t **locked_lru_batch,
+ struct pglist_data **locked_pgdat,
+ unsigned long *flags)
+{
+ spinlock_t *lru_batch = page_lru_batch_lock(page);
+ struct pglist_data *pgdat = page_pgdat(page);
+
+ VM_BUG_ON(*locked_pgdat && !page->lru_sentinel);
+
+ if (lru_batch != *locked_lru_batch) {
+ VM_BUG_ON(*locked_pgdat);
+ VM_BUG_ON(*locked_lru_batch);
+ spin_lock_irqsave(lru_batch, *flags);
+ *locked_lru_batch = lru_batch;
+ if (page->lru_sentinel) {
+ spin_lock(&pgdat->lru_lock);
+ *locked_pgdat = pgdat;
+ }
+ } else if (!*locked_pgdat && page->lru_sentinel) {
+ spin_lock(&pgdat->lru_lock);
+ *locked_pgdat = pgdat;
+ }
+}
+
+/**
+ * lru_batch_unlock - unlock an LRU list batch
+ */
+static __always_inline void lru_batch_unlock(struct page *page,
+ spinlock_t **locked_lru_batch,
+ struct pglist_data **locked_pgdat,
+ unsigned long *flags)
+{
+ spinlock_t *lru_batch = (page) ? page_lru_batch_lock(page) : NULL;
+
+ VM_BUG_ON(!*locked_lru_batch);
+
+ if (lru_batch != *locked_lru_batch) {
+ if (*locked_pgdat) {
+ spin_unlock(&(*locked_pgdat)->lru_lock);
+ *locked_pgdat = NULL;
+ }
+ spin_unlock_irqrestore(*locked_lru_batch, *flags);
+ *locked_lru_batch = NULL;
+ } else if (*locked_pgdat && !page->lru_sentinel) {
+ spin_unlock(&(*locked_pgdat)->lru_lock);
+ *locked_pgdat = NULL;
+ }
+}
+
/**
* page_lru_base_type - which LRU list type should a page be on?
* @page: the page to test
--
2.16.1
\
 
 \ /
  Last update: 2018-02-01 00:13    [W:0.215 / U:0.132 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site