lkml.org 
[lkml]   [2020]   [May]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH v1] x86: Pin cr4 FSGSBASE
On Mon, May 25, 2020 at 10:28:48PM -0700, Andi Kleen wrote:
> From: Andi Kleen <ak@linux.intel.com>
>
> Since there seem to be kernel modules floating around that set
> FSGSBASE incorrectly, prevent this in the CR4 pinning. Currently
> CR4 pinning just checks that bits are set, this also checks
> that the FSGSBASE bit is not set, and if it is clears it again.
>
> Note this patch will need to be undone when the full FSGSBASE
> patches are merged. But it's a reasonable solution for v5.2+
> stable at least. Sadly the older kernels don't have the necessary
> infrastructure for this (although a simpler version of this
> could be added there too)
>
> Cc: stable@vger.kernel.org # v5.2+
> Signed-off-by: Andi Kleen <ak@linux.intel.com>
> ---
> arch/x86/kernel/cpu/common.c | 5 +++++
> 1 file changed, 5 insertions(+)
>
> diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
> index bed0cb83fe24..1f5b7871ae9a 100644
> --- a/arch/x86/kernel/cpu/common.c
> +++ b/arch/x86/kernel/cpu/common.c
> @@ -385,6 +385,11 @@ void native_write_cr4(unsigned long val)
> /* Warn after we've set the missing bits. */
> WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n",
> bits_missing);
> + if (val & X86_CR4_FSGSBASE) {
> + WARN_ONCE(1, "CR4 unexpectedly set FSGSBASE!?\n");
> + val &= ~X86_CR4_FSGSBASE;
> + goto set_register;
> + }
> }
> }
> EXPORT_SYMBOL(native_write_cr4);
> --
> 2.25.4
>

And if this is going to be more permanent, we can separate the mask
(untested):


diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index bed0cb83fe24..ead64f7420a5 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -347,6 +347,8 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c)
cr4_clear_bits(X86_CR4_UMIP);
}

+static const unsigned long cr4_pinned_mask =
+ X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
static unsigned long cr4_pinned_bits __ro_after_init;

@@ -371,20 +373,20 @@ EXPORT_SYMBOL(native_write_cr0);

void native_write_cr4(unsigned long val)
{
- unsigned long bits_missing = 0;
+ unsigned long bits_changed = 0;

set_register:
asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));

if (static_branch_likely(&cr_pinning)) {
- if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) {
- bits_missing = ~val & cr4_pinned_bits;
- val |= bits_missing;
+ if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
+ bits_changed = ~val & cr4_pinned_mask;
+ val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
goto set_register;
}
/* Warn after we've set the missing bits. */
- WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n",
- bits_missing);
+ WARN_ONCE(bits_changed, "pinned CR4 bits changed: %lx!?\n",
+ bits_changed);
}
}
EXPORT_SYMBOL(native_write_cr4);
@@ -396,7 +398,7 @@ void cr4_init(void)
if (boot_cpu_has(X86_FEATURE_PCID))
cr4 |= X86_CR4_PCIDE;
if (static_branch_likely(&cr_pinning))
- cr4 |= cr4_pinned_bits;
+ cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;

__write_cr4(cr4);

@@ -411,10 +413,7 @@ void cr4_init(void)
*/
static void __init setup_cr_pinning(void)
{
- unsigned long mask;
-
- mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP);
- cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask;
+ cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
static_key_enable(&cr_pinning.key);
}

--
Kees Cook

\
 
 \ /
  Last update: 2020-05-26 18:39    [W:0.150 / U:0.012 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site