lkml.org 
[lkml]   [2012]   [Aug]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
SubjectRe: [PATCH 33/36] autonuma: powerpc port
From
Date
On Wed, 2012-08-22 at 16:59 +0200, Andrea Arcangeli wrote:
> From: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
>
> * PMD flaging is not required in powerpc since large pages
> are tracked in ptes.
> * Yet to be tested with large pages
> * This is an initial patch that partially works
> * knuma_scand and numa hinting page faults works
> * Page migration is yet to be observed/verified
>
> Signed-off-by: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>

I don't like this.

---
> arch/powerpc/include/asm/pgtable.h | 48 ++++++++++++++++++++++++++++-
> arch/powerpc/include/asm/pte-hash64-64k.h | 4 ++-
> arch/powerpc/mm/numa.c | 3 +-
> mm/autonuma.c | 2 +-
> 4 files changed, 53 insertions(+), 4 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index 2e0e411..5f03079 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -33,10 +33,56 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
> static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
> static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
> static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
> -static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
> +static inline int pte_present(pte_t pte) { return pte_val(pte) &
> + (_PAGE_PRESENT|_PAGE_NUMA_PTE); }

Is this absolutely necessary ? (testing two bits). It somewhat changes
the semantics of "pte_present" which I don't really like.

> static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
> static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
>
> +#ifdef CONFIG_AUTONUMA
> +static inline int pte_numa(pte_t pte)
> +{
> + return (pte_val(pte) &
> + (_PAGE_NUMA_PTE|_PAGE_PRESENT)) == _PAGE_NUMA_PTE;
> +}
> +
> +#endif

Why the ifdef and not anywhere else ?

> +static inline pte_t pte_mknonnuma(pte_t pte)
> +{
> + pte_val(pte) &= ~_PAGE_NUMA_PTE;
> + pte_val(pte) |= (_PAGE_PRESENT|_PAGE_ACCESSED);
> +
> + return pte;
> +}
> +
> +static inline pte_t pte_mknuma(pte_t pte)
> +{
> + pte_val(pte) |= _PAGE_NUMA_PTE;
> + pte_val(pte) &= ~_PAGE_PRESENT;
> + return pte;
> +}
> +
> +static inline int pmd_numa(pmd_t pmd)
> +{
> + /* PMD tracking not implemented */
> + return 0;
> +}
> +
> +static inline pmd_t pmd_mknonnuma(pmd_t pmd)
> +{
> + BUG();
> + return pmd;
> +}
> +
> +static inline pmd_t pmd_mknuma(pmd_t pmd)
> +{
> + BUG();
> + return pmd;
> +}
> +
> +/* No pmd flags on powerpc */
> +#define set_pmd_at(mm, addr, pmdp, pmd) do { } while (0)
> +
> /* Conversion functions: convert a page and protection to a page entry,
> * and a page entry and page directory to the page they refer to.
> *
> diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
> index 59247e8..f7e1468 100644
> --- a/arch/powerpc/include/asm/pte-hash64-64k.h
> +++ b/arch/powerpc/include/asm/pte-hash64-64k.h
> @@ -7,6 +7,8 @@
> #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
> #define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */
>
> +#define _PAGE_NUMA_PTE 0x40000000 /* Adjust PTE_RPN_SHIFT below */
> +
> /* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead,
> * we set that to be the whole sub-bits mask. The C code will only
> * test this, so a multi-bit mask will work. For combo pages, this
> @@ -36,7 +38,7 @@
> * That gives us a max RPN of 34 bits, which means a max of 50 bits
> * of addressable physical space, or 46 bits for the special 4k PFNs.
> */
> -#define PTE_RPN_SHIFT (30)
> +#define PTE_RPN_SHIFT (31)

I'm concerned. We are already running short on RPN bits. We can't spare
more. If you absolutely need a PTE bit, we'll need to explore ways to
free some, but just reducing the RPN isn't an option.

Think of what happens if PTE_4K_PFN is set...

Also you conveniently avoided all the other pte-*.h variants meaning you
broke the build for everything except ppc64 with 64k pages.

> #ifndef __ASSEMBLY__
>
> diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
> index 39b1597..80af41e 100644
> --- a/arch/powerpc/mm/numa.c
> +++ b/arch/powerpc/mm/numa.c
> @@ -22,6 +22,7 @@
> #include <linux/pfn.h>
> #include <linux/cpuset.h>
> #include <linux/node.h>
> +#include <linux/page_autonuma.h>
> #include <asm/sparsemem.h>
> #include <asm/prom.h>
> #include <asm/smp.h>
> @@ -1045,7 +1046,7 @@ void __init do_init_bootmem(void)
> * all reserved areas marked.
> */
> NODE_DATA(nid) = careful_zallocation(nid,
> - sizeof(struct pglist_data),
> + autonuma_pglist_data_size(),
> SMP_CACHE_BYTES, end_pfn);
>
> dbg("node %d\n", nid);
> diff --git a/mm/autonuma.c b/mm/autonuma.c
> index ada6c57..a4da3f3 100644
> --- a/mm/autonuma.c
> +++ b/mm/autonuma.c
> @@ -25,7 +25,7 @@ unsigned long autonuma_flags __read_mostly =
> #ifdef CONFIG_AUTONUMA_DEFAULT_ENABLED
> |(1<<AUTONUMA_ENABLED_FLAG)
> #endif
> - |(1<<AUTONUMA_SCAN_PMD_FLAG);
> + |(0<<AUTONUMA_SCAN_PMD_FLAG);

That changes the default accross all architectures, is that ok vs.
Andrea ?

Cheers,
Ben.

> static DEFINE_MUTEX(knumad_mm_mutex);
>



\
 
 \ /
  Last update: 2012-08-23 00:41    [W:0.193 / U:0.272 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site