lkml.org 
[lkml]   [2020]   [Nov]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
SubjectRe: [PATCH] highmem: fix highmem for xtensa
Date
Max,

On Fri, Nov 13 2020 at 08:34, Max Filippov wrote:
> On Fri, Nov 13, 2020 at 6:36 AM Thomas Gleixner <tglx@linutronix.de> wrote:
>> On Fri, Nov 13 2020 at 05:50, Max Filippov wrote:
>> > On Fri, Nov 13, 2020 at 5:40 AM Thomas Gleixner <tglx@linutronix.de> wrote:
>> >> What's wrong with just doing the obvious and making the fixmap defines
>> >> the other way round?
>> >
>> > It becomes really awkward when we get to support high memory with
>> > aliasing data cache: we must think about the actual virtual addresses
>> > assigned to pages and it feels much simpler when it's done this way.
>>
>> Feeling are not really a technical argument. Is there any functional
>> difference which matters?
>
> arch_kmap_local_map_idx must produce index based on type and
> pfn that will be translated to virtual address with the same color this
> page would've had if it was in the low memory. With positive fixmap
> the formula is: (type * (number of cache colors)) + (color of the pfn).
> With negative fixmap there must be additional +1 and -1 in it.

I agree that the top down mechanics is not really intuitive, but that
does not justify the ifdeffery in the generic code.

xtensa can just use the generic fix_to_virt/virt_to_fix mechanics. All
it needs is to adjust the mapping defines and to adjust the color offset
to

NR_COLORS - color

which is not an unreasonable ask. As a side effect all highmem inflicted
systems which do not have the cache aliasing problem can just use the
generic code as is. See untested patch below.

It builds for some configs, but the smp_lx200_defconfig (which has the
aliasing) it fails to build even without this patch (highmem.o at least
builds).

Toolchain is the one from https://mirrors.edge.kernel.org/pub/tools/crosstool/

Thanks,

tglx
---
Subject: xtensa/mm/highmem: Make generic kmap_atomic() work correctly
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 13 Nov 2020 21:25:12 +0100

The conversion to the generic kmap_atomic() implementation missed the fact
that xtensa's fixmap works bottom up while all other implementations work
top down. There is no real reason why xtensa needs to work that way.

Cure it by:

- Using the generic fix_to_virt()/virt_to_fix() functions which work top
down
- Adjusting the mapping defines
- Using the generic index calculation for the non cache aliasing case
- Making the cache colour offset reverse so the effective index is correct

While at it, remove the outdated and misleading comment above the fixmap
enum which originates from the initial copy&pasta of this code from i386.

Reported-by: Max Filippov <jcmvbkbc@gmail.com>
Fixes: 629ed3f7dad2 ("xtensa/mm/highmem: Switch to generic kmap atomic")
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/xtensa/include/asm/fixmap.h | 55 ++++----------------------------------
arch/xtensa/include/asm/highmem.h | 15 ++++++----
arch/xtensa/mm/highmem.c | 18 +++++++-----
arch/xtensa/mm/init.c | 4 +-
arch/xtensa/mm/mmu.c | 3 +-
5 files changed, 31 insertions(+), 64 deletions(-)

--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -17,63 +17,22 @@
#include <linux/threads.h>
#include <linux/pgtable.h>
#include <asm/kmap_size.h>
-#endif

-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the start of the consistent memory region upwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * higher than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- */
+/* The map slots for temporary mappings via kmap_atomic/local(). */
enum fixed_addresses {
-#ifdef CONFIG_HIGHMEM
- /* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN +
(KM_MAX_IDX * NR_CPUS * DCACHE_N_COLORS) - 1,
-#endif
__end_of_fixed_addresses
};

-#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
+#define FIXADDR_END (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
-
-#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
+/* Enforce that FIXADDR_START is PMD aligned to handle cache aliasing */
+#define FIXADDR_START ((FIXADDR_END - FIXADDR_SIZE) & PMD_MASK)
+#define FIXADDR_TOP (FIXADDR_START + FIXADDR_SIZE - PAGE_SIZE)

-#ifndef __ASSEMBLY__
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without translation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /* Check if this memory layout is broken because fixmap overlaps page
- * table.
- */
- BUILD_BUG_ON(FIXADDR_START <
- TLBTEMP_BASE_1 + TLBTEMP_SIZE);
- BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
- return __fix_to_virt(idx);
-}
-
-static inline unsigned long virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
-
-#endif
+#include <asm-generic/fixmap.h>

+#endif /* CONFIG_HIGHMEM */
#endif
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -12,6 +12,7 @@
#ifndef _XTENSA_HIGHMEM_H
#define _XTENSA_HIGHMEM_H

+#ifdef CONFIG_HIGHMEM
#include <linux/wait.h>
#include <linux/pgtable.h>
#include <asm/cacheflush.h>
@@ -58,6 +59,13 @@ static inline wait_queue_head_t *get_pkm
{
return pkmap_map_wait_arr + color;
}
+
+enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
+#define arch_kmap_local_map_idx kmap_local_map_idx
+
+enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr);
+#define arch_kmap_local_unmap_idx kmap_local_unmap_idx
+
#endif

extern pte_t *pkmap_page_table;
@@ -67,15 +75,10 @@ static inline void flush_cache_kmaps(voi
flush_cache_all();
}

-enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
-#define arch_kmap_local_map_idx kmap_local_map_idx
-
-enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr);
-#define arch_kmap_local_unmap_idx kmap_local_unmap_idx
-
#define arch_kmap_local_post_unmap(vaddr) \
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)

void kmap_init(void);

+#endif /* CONFIG_HIGHMEM */
#endif
--- a/arch/xtensa/mm/highmem.c
+++ b/arch/xtensa/mm/highmem.c
@@ -23,16 +23,16 @@ static void __init kmap_waitqueues_init(
for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i)
init_waitqueue_head(pkmap_map_wait_arr + i);
}
-#else
-static inline void kmap_waitqueues_init(void)
-{
-}
-#endif

static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
{
- return (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS +
- color;
+ int idx = (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS;
+
+ /*
+ * The fixmap operates top down, so the color offset needs to be
+ * reverse as well.
+ */
+ return idx + DCACHE_N_COLORS - color;
}

enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn)
@@ -45,6 +45,10 @@ enum fixed_addresses kmap_local_unmap_id
return kmap_idx(type, DCACHE_ALIAS(addr));
}

+#else
+static inline void kmap_waitqueues_init(void) { }
+#endif
+
void __init kmap_init(void)
{
/* Check if this memory layout is broken because PKMAP overlaps
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -147,8 +147,8 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10,
- FIXADDR_START, FIXADDR_TOP,
- (FIXADDR_TOP - FIXADDR_START) >> 10,
+ FIXADDR_START, FIXADDR_END,
+ (FIXADDR_END - FIXADDR_START) >> 10,
#endif
PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -52,7 +52,8 @@ static void * __init init_pmd(unsigned l

static void __init fixedrange_init(void)
{
- init_pmd(__fix_to_virt(0), __end_of_fixed_addresses);
+ BUILD_BUG_ON(FIXADDR_START < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
+ init_pmd(FIXADDR_START, __end_of_fixed_addresses);
}
#endif






\
 
 \ /
  Last update: 2020-11-13 23:35    [W:1.415 / U:0.580 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site