lkml.org 
[lkml]   [2021]   [May]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v27 29/31] mm: Move arch_calc_vm_prot_bits() to arch/x86/include/asm/mman.h
    Date
    To prepare the introduction of PROT_SHADOW_STACK and be consistent with other
    architectures, move arch_vm_get_page_prot() and arch_calc_vm_prot_bits() to
    arch/x86/include/asm/mman.h.

    Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
    Reviewed-by: Kees Cook <keescook@chromium.org>
    Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
    ---
    arch/x86/include/asm/mman.h | 30 ++++++++++++++++++++++++++++++
    arch/x86/include/uapi/asm/mman.h | 28 +++-------------------------
    2 files changed, 33 insertions(+), 25 deletions(-)
    create mode 100644 arch/x86/include/asm/mman.h

    diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
    new file mode 100644
    index 000000000000..629f6c81263a
    --- /dev/null
    +++ b/arch/x86/include/asm/mman.h
    @@ -0,0 +1,30 @@
    +/* SPDX-License-Identifier: GPL-2.0 */
    +#ifndef _ASM_X86_MMAN_H
    +#define _ASM_X86_MMAN_H
    +
    +#include <linux/mm.h>
    +#include <uapi/asm/mman.h>
    +
    +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
    +/*
    + * Take the 4 protection key bits out of the vma->vm_flags
    + * value and turn them in to the bits that we can put in
    + * to a pte.
    + *
    + * Only override these if Protection Keys are available
    + * (which is only on 64-bit).
    + */
    +#define arch_vm_get_page_prot(vm_flags) __pgprot( \
    + ((vm_flags) & VM_PKEY_BIT0 ? _PAGE_PKEY_BIT0 : 0) | \
    + ((vm_flags) & VM_PKEY_BIT1 ? _PAGE_PKEY_BIT1 : 0) | \
    + ((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) | \
    + ((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0))
    +
    +#define arch_calc_vm_prot_bits(prot, key) ( \
    + ((key) & 0x1 ? VM_PKEY_BIT0 : 0) | \
    + ((key) & 0x2 ? VM_PKEY_BIT1 : 0) | \
    + ((key) & 0x4 ? VM_PKEY_BIT2 : 0) | \
    + ((key) & 0x8 ? VM_PKEY_BIT3 : 0))
    +#endif
    +
    +#endif /* _ASM_X86_MMAN_H */
    diff --git a/arch/x86/include/uapi/asm/mman.h b/arch/x86/include/uapi/asm/mman.h
    index d4a8d0424bfb..f28fa4acaeaf 100644
    --- a/arch/x86/include/uapi/asm/mman.h
    +++ b/arch/x86/include/uapi/asm/mman.h
    @@ -1,31 +1,9 @@
    /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
    -#ifndef _ASM_X86_MMAN_H
    -#define _ASM_X86_MMAN_H
    +#ifndef _UAPI_ASM_X86_MMAN_H
    +#define _UAPI_ASM_X86_MMAN_H

    #define MAP_32BIT 0x40 /* only give out 32bit addresses */

    -#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
    -/*
    - * Take the 4 protection key bits out of the vma->vm_flags
    - * value and turn them in to the bits that we can put in
    - * to a pte.
    - *
    - * Only override these if Protection Keys are available
    - * (which is only on 64-bit).
    - */
    -#define arch_vm_get_page_prot(vm_flags) __pgprot( \
    - ((vm_flags) & VM_PKEY_BIT0 ? _PAGE_PKEY_BIT0 : 0) | \
    - ((vm_flags) & VM_PKEY_BIT1 ? _PAGE_PKEY_BIT1 : 0) | \
    - ((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) | \
    - ((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0))
    -
    -#define arch_calc_vm_prot_bits(prot, key) ( \
    - ((key) & 0x1 ? VM_PKEY_BIT0 : 0) | \
    - ((key) & 0x2 ? VM_PKEY_BIT1 : 0) | \
    - ((key) & 0x4 ? VM_PKEY_BIT2 : 0) | \
    - ((key) & 0x8 ? VM_PKEY_BIT3 : 0))
    -#endif
    -
    #include <asm-generic/mman.h>

    -#endif /* _ASM_X86_MMAN_H */
    +#endif /* _UAPI_ASM_X86_MMAN_H */
    --
    2.21.0
    \
     
     \ /
      Last update: 2021-05-22 00:17    [W:2.414 / U:0.188 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site