lkml.org 
[lkml]   [2020]   [Jun]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: linux-next: fix ups for clashes between akpm and powerpc trees
Hi all,

On Wed, 3 Jun 2020 20:26:55 +1000 Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>
> Some things turned up in the powerpc tree today that required some changes
> to patches in the akpm tree and also the following fixup patch provided
> (mostly) by Michael. I have applied this as a single patch today, but
> parts of it should probably go in some other patches.
>
> From: Stephen Rothwell <sfr@canb.auug.org.au>
> Date: Wed, 3 Jun 2020 20:03:49 +1000
> Subject: [PATCH] powerpc fixes for changes clashing with akpm tree changes
>
> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
> ---

I applied this again today. It is slightly different and now looks like this:

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 25c3cb8272c0..a6799723cd98 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1008,6 +1008,12 @@ extern struct page *p4d_page(p4d_t p4d);
#define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS)
#define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS)

+static inline unsigned long pgd_index(unsigned long address)
+{
+ return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1);
+}
+#define pgd_index pgd_index
+
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index c188a6f64bcd..1927e1b653f2 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -205,10 +205,6 @@ static inline void pmd_clear(pmd_t *pmdp)
*pmdp = __pmd(0);
}

-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
/* to find an entry in a page-table-directory */
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
@@ -241,7 +237,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
pte_basic_t old = pte_val(*p);
pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
int num, i;
- pmd_t *pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
+ pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);

if (!huge)
num = PAGE_SIZE / SZ_4K;
@@ -341,6 +337,10 @@ static inline int pte_young(pte_t pte)
pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
#endif

+#define pte_offset_kernel(dir, addr) \
+ (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
+ pte_index(addr))
+
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c
index db4ef44af22f..569d98a41881 100644
--- a/arch/powerpc/mm/kasan/8xx.c
+++ b/arch/powerpc/mm/kasan/8xx.c
@@ -10,7 +10,7 @@
static int __init
kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block)
{
- pmd_t *pmd = pmd_ptr_k(k_start);
+ pmd_t *pmd = pmd_off_k(k_start);
unsigned long k_cur, k_next;

for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) {
@@ -59,7 +59,7 @@ int __init kasan_init_region(void *start, size_t size)
return ret;

for (; k_cur < k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_ptr_k(k_cur);
+ pmd_t *pmd = pmd_off_k(k_cur);
void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);

diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c
index 4bc491a4a1fd..a32b4640b9de 100644
--- a/arch/powerpc/mm/kasan/book3s_32.c
+++ b/arch/powerpc/mm/kasan/book3s_32.c
@@ -46,7 +46,7 @@ int __init kasan_init_region(void *start, size_t size)
kasan_update_early_region(k_start, k_cur, __pte(0));

for (; k_cur < k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_ptr_k(k_cur);
+ pmd_t *pmd = pmd_off_k(k_cur);
void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);

diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 286441bbbe49..92e8929cbe3e 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -74,7 +74,7 @@ static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
pgprot_t prot, int psize, bool new)
{
- pmd_t *pmdp = pmd_ptr_k(va);
+ pmd_t *pmdp = pmd_off_k(va);
pte_t *ptep;

if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 45a0556089e8..1136257c3a99 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -264,7 +264,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
#if defined(CONFIG_PPC_8xx)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
{
- pmd_t *pmd = pmd_ptr(mm, addr);
+ pmd_t *pmd = pmd_off(mm, addr);
pte_basic_t val;
pte_basic_t *entry = &ptep->pte;
int num = is_hugepd(*((hugepd_t *)pmd)) ? 1 : SZ_512K / SZ_4K;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index e2d054c9575e..6eb4eab79385 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -40,7 +40,7 @@ notrace void __init early_ioremap_init(void)
{
unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
pte_t *ptep = (pte_t *)early_fixmap_pagetable;
- pmd_t *pmdp = pmd_ptr_k(addr);
+ pmd_t *pmdp = pmd_off_k(addr);

for (; (s32)(FIXADDR_TOP - addr) > 0;
addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
--
2.26.2
--
Cheers,
Stephen Rothwell
[unhandled content-type:application/pgp-signature]
\
 
 \ /
  Last update: 2020-06-04 08:53    [W:0.063 / U:0.384 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site