lkml.org 
[lkml]   [2012]   [Mar]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 02/38] Disintegrate asm/system.h for X86 [ver #3]
    Date
    Disintegrate asm/system.h for X86.

    Signed-off-by: David Howells <dhowells@redhat.com>
    cc: x86@kernel.org
    ---

    arch/x86/ia32/ia32_aout.c | 1
    arch/x86/include/asm/apic.h | 1
    arch/x86/include/asm/auxvec.h | 7
    arch/x86/include/asm/barrier.h | 116 +++++++
    arch/x86/include/asm/bug.h | 4
    arch/x86/include/asm/cacheflush.h | 1
    arch/x86/include/asm/elf.h | 1
    arch/x86/include/asm/exec.h | 1
    arch/x86/include/asm/futex.h | 1
    arch/x86/include/asm/i387.h | 1
    arch/x86/include/asm/local.h | 1
    arch/x86/include/asm/mc146818rtc.h | 1
    arch/x86/include/asm/processor.h | 31 ++
    arch/x86/include/asm/segment.h | 58 +++
    arch/x86/include/asm/special_insns.h | 199 +++++++++++
    arch/x86/include/asm/stackprotector.h | 1
    arch/x86/include/asm/switch_to.h | 129 +++++++
    arch/x86/include/asm/system.h | 527 ------------------------------
    arch/x86/include/asm/tlbflush.h | 2
    arch/x86/include/asm/virtext.h | 1
    arch/x86/kernel/acpi/cstate.c | 1
    arch/x86/kernel/apm_32.c | 1
    arch/x86/kernel/cpu/mcheck/p5.c | 1
    arch/x86/kernel/cpu/mcheck/therm_throt.c | 1
    arch/x86/kernel/cpu/mcheck/winchip.c | 1
    arch/x86/kernel/cpu/mtrr/generic.c | 1
    arch/x86/kernel/cpuid.c | 1
    arch/x86/kernel/i8259.c | 1
    arch/x86/kernel/irqinit.c | 1
    arch/x86/kernel/kgdb.c | 1
    arch/x86/kernel/ldt.c | 1
    arch/x86/kernel/machine_kexec_32.c | 1
    arch/x86/kernel/mca_32.c | 1
    arch/x86/kernel/module.c | 1
    arch/x86/kernel/msr.c | 1
    arch/x86/kernel/paravirt.c | 1
    arch/x86/kernel/pci-calgary_64.c | 1
    arch/x86/kernel/process.c | 1
    arch/x86/kernel/process_32.c | 2
    arch/x86/kernel/process_64.c | 2
    arch/x86/kernel/ptrace.c | 1
    arch/x86/kernel/setup.c | 1
    arch/x86/kernel/tce_64.c | 1
    arch/x86/kernel/tls.c | 1
    arch/x86/kernel/traps.c | 1
    arch/x86/mm/init.c | 1
    arch/x86/mm/init_32.c | 1
    arch/x86/mm/init_64.c | 1
    arch/x86/mm/pgtable_32.c | 1
    arch/x86/power/hibernate_32.c | 1
    50 files changed, 555 insertions(+), 561 deletions(-)
    create mode 100644 arch/x86/include/asm/barrier.h
    create mode 100644 arch/x86/include/asm/exec.h
    create mode 100644 arch/x86/include/asm/special_insns.h
    create mode 100644 arch/x86/include/asm/switch_to.h

    diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
    index 39e4909..18b2561 100644
    --- a/arch/x86/ia32/ia32_aout.c
    +++ b/arch/x86/ia32/ia32_aout.c
    @@ -26,7 +26,6 @@
    #include <linux/init.h>
    #include <linux/jiffies.h>

    -#include <asm/system.h>
    #include <asm/uaccess.h>
    #include <asm/pgalloc.h>
    #include <asm/cacheflush.h>
    diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
    index 3ab9bdd..108246d 100644
    --- a/arch/x86/include/asm/apic.h
    +++ b/arch/x86/include/asm/apic.h
    @@ -11,7 +11,6 @@
    #include <linux/atomic.h>
    #include <asm/fixmap.h>
    #include <asm/mpspec.h>
    -#include <asm/system.h>
    #include <asm/msr.h>

    #define ARCH_APICTIMER_STOPS_ON_C3 1
    diff --git a/arch/x86/include/asm/auxvec.h b/arch/x86/include/asm/auxvec.h
    index 1316b4c..77203ac 100644
    --- a/arch/x86/include/asm/auxvec.h
    +++ b/arch/x86/include/asm/auxvec.h
    @@ -9,4 +9,11 @@
    #endif
    #define AT_SYSINFO_EHDR 33

    +/* entries in ARCH_DLINFO: */
    +#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
    +# define AT_VECTOR_SIZE_ARCH 2
    +#else /* else it's non-compat x86-64 */
    +# define AT_VECTOR_SIZE_ARCH 1
    +#endif
    +
    #endif /* _ASM_X86_AUXVEC_H */
    diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
    new file mode 100644
    index 0000000..c6cd358
    --- /dev/null
    +++ b/arch/x86/include/asm/barrier.h
    @@ -0,0 +1,116 @@
    +#ifndef _ASM_X86_BARRIER_H
    +#define _ASM_X86_BARRIER_H
    +
    +#include <asm/alternative.h>
    +#include <asm/nops.h>
    +
    +/*
    + * Force strict CPU ordering.
    + * And yes, this is required on UP too when we're talking
    + * to devices.
    + */
    +
    +#ifdef CONFIG_X86_32
    +/*
    + * Some non-Intel clones support out of order store. wmb() ceases to be a
    + * nop for these.
    + */
    +#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
    +#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
    +#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
    +#else
    +#define mb() asm volatile("mfence":::"memory")
    +#define rmb() asm volatile("lfence":::"memory")
    +#define wmb() asm volatile("sfence" ::: "memory")
    +#endif
    +
    +/**
    + * read_barrier_depends - Flush all pending reads that subsequents reads
    + * depend on.
    + *
    + * No data-dependent reads from memory-like regions are ever reordered
    + * over this barrier. All reads preceding this primitive are guaranteed
    + * to access memory (but not necessarily other CPUs' caches) before any
    + * reads following this primitive that depend on the data return by
    + * any of the preceding reads. This primitive is much lighter weight than
    + * rmb() on most CPUs, and is never heavier weight than is
    + * rmb().
    + *
    + * These ordering constraints are respected by both the local CPU
    + * and the compiler.
    + *
    + * Ordering is not guaranteed by anything other than these primitives,
    + * not even by data dependencies. See the documentation for
    + * memory_barrier() for examples and URLs to more information.
    + *
    + * For example, the following code would force ordering (the initial
    + * value of "a" is zero, "b" is one, and "p" is "&a"):
    + *
    + * <programlisting>
    + * CPU 0 CPU 1
    + *
    + * b = 2;
    + * memory_barrier();
    + * p = &b; q = p;
    + * read_barrier_depends();
    + * d = *q;
    + * </programlisting>
    + *
    + * because the read of "*q" depends on the read of "p" and these
    + * two reads are separated by a read_barrier_depends(). However,
    + * the following code, with the same initial values for "a" and "b":
    + *
    + * <programlisting>
    + * CPU 0 CPU 1
    + *
    + * a = 2;
    + * memory_barrier();
    + * b = 3; y = b;
    + * read_barrier_depends();
    + * x = a;
    + * </programlisting>
    + *
    + * does not enforce ordering, since there is no data dependency between
    + * the read of "a" and the read of "b". Therefore, on some CPUs, such
    + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
    + * in cases like this where there are no data dependencies.
    + **/
    +
    +#define read_barrier_depends() do { } while (0)
    +
    +#ifdef CONFIG_SMP
    +#define smp_mb() mb()
    +#ifdef CONFIG_X86_PPRO_FENCE
    +# define smp_rmb() rmb()
    +#else
    +# define smp_rmb() barrier()
    +#endif
    +#ifdef CONFIG_X86_OOSTORE
    +# define smp_wmb() wmb()
    +#else
    +# define smp_wmb() barrier()
    +#endif
    +#define smp_read_barrier_depends() read_barrier_depends()
    +#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
    +#else
    +#define smp_mb() barrier()
    +#define smp_rmb() barrier()
    +#define smp_wmb() barrier()
    +#define smp_read_barrier_depends() do { } while (0)
    +#define set_mb(var, value) do { var = value; barrier(); } while (0)
    +#endif
    +
    +/*
    + * Stop RDTSC speculation. This is needed when you need to use RDTSC
    + * (or get_cycles or vread that possibly accesses the TSC) in a defined
    + * code region.
    + *
    + * (Could use an alternative three way for this if there was one.)
    + */
    +static __always_inline void rdtsc_barrier(void)
    +{
    + alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
    + alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
    +}
    +
    +#endif /* _ASM_X86_BARRIER_H */
    diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
    index f654d1b..11e1152 100644
    --- a/arch/x86/include/asm/bug.h
    +++ b/arch/x86/include/asm/bug.h
    @@ -36,4 +36,8 @@ do { \
    #endif /* !CONFIG_BUG */

    #include <asm-generic/bug.h>
    +
    +
    +extern void show_regs_common(void);
    +
    #endif /* _ASM_X86_BUG_H */
    diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
    index 4e12668..9863ee3 100644
    --- a/arch/x86/include/asm/cacheflush.h
    +++ b/arch/x86/include/asm/cacheflush.h
    @@ -3,6 +3,7 @@

    /* Caches aren't brain-dead on the intel. */
    #include <asm-generic/cacheflush.h>
    +#include <asm/special_insns.h>

    #ifdef CONFIG_X86_PAT
    /*
    diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
    index 5f962df..f27f79a 100644
    --- a/arch/x86/include/asm/elf.h
    +++ b/arch/x86/include/asm/elf.h
    @@ -84,7 +84,6 @@ extern unsigned int vdso_enabled;
    (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))

    #include <asm/processor.h>
    -#include <asm/system.h>

    #ifdef CONFIG_X86_32
    #include <asm/desc.h>
    diff --git a/arch/x86/include/asm/exec.h b/arch/x86/include/asm/exec.h
    new file mode 100644
    index 0000000..54c2e1d
    --- /dev/null
    +++ b/arch/x86/include/asm/exec.h
    @@ -0,0 +1 @@
    +/* define arch_align_stack() here */
    diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
    index d09bb03..71ecbcb 100644
    --- a/arch/x86/include/asm/futex.h
    +++ b/arch/x86/include/asm/futex.h
    @@ -9,7 +9,6 @@
    #include <asm/asm.h>
    #include <asm/errno.h>
    #include <asm/processor.h>
    -#include <asm/system.h>

    #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
    asm volatile("1:\t" insn "\n" \
    diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
    index 2479049..82374df 100644
    --- a/arch/x86/include/asm/i387.h
    +++ b/arch/x86/include/asm/i387.h
    @@ -24,6 +24,7 @@
    #include <asm/user.h>
    #include <asm/uaccess.h>
    #include <asm/xsave.h>
    +#include <asm/special_insns.h>

    extern unsigned int sig_xstate_size;
    extern void fpu_init(void);
    diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
    index 9cdae5d..c8bed0d 100644
    --- a/arch/x86/include/asm/local.h
    +++ b/arch/x86/include/asm/local.h
    @@ -3,7 +3,6 @@

    #include <linux/percpu.h>

    -#include <asm/system.h>
    #include <linux/atomic.h>
    #include <asm/asm.h>

    diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h
    index 0e8e85b..d354fb7 100644
    --- a/arch/x86/include/asm/mc146818rtc.h
    +++ b/arch/x86/include/asm/mc146818rtc.h
    @@ -5,7 +5,6 @@
    #define _ASM_X86_MC146818RTC_H

    #include <asm/io.h>
    -#include <asm/system.h>
    #include <asm/processor.h>
    #include <linux/mc146818rtc.h>

    diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
    index 58545c9..8322a35 100644
    --- a/arch/x86/include/asm/processor.h
    +++ b/arch/x86/include/asm/processor.h
    @@ -14,13 +14,13 @@ struct mm_struct;
    #include <asm/sigcontext.h>
    #include <asm/current.h>
    #include <asm/cpufeature.h>
    -#include <asm/system.h>
    #include <asm/page.h>
    #include <asm/pgtable_types.h>
    #include <asm/percpu.h>
    #include <asm/msr.h>
    #include <asm/desc_defs.h>
    #include <asm/nops.h>
    +#include <asm/special_insns.h>

    #include <linux/personality.h>
    #include <linux/cpumask.h>
    @@ -29,6 +29,15 @@ struct mm_struct;
    #include <linux/math64.h>
    #include <linux/init.h>
    #include <linux/err.h>
    +#include <linux/irqflags.h>
    +
    +/*
    + * We handle most unaligned accesses in hardware. On the other hand
    + * unaligned DMA can be quite expensive on some Nehalem processors.
    + *
    + * Based on this we disable the IP header alignment in network drivers.
    + */
    +#define NET_IP_ALIGN 0

    #define HBP_NUM 4
    /*
    @@ -1021,4 +1030,24 @@ extern bool cpu_has_amd_erratum(const int *);
    #define cpu_has_amd_erratum(x) (false)
    #endif /* CONFIG_CPU_SUP_AMD */

    +#ifdef CONFIG_X86_32
    +/*
    + * disable hlt during certain critical i/o operations
    + */
    +#define HAVE_DISABLE_HLT
    +#endif
    +
    +void disable_hlt(void);
    +void enable_hlt(void);
    +
    +void cpu_idle_wait(void);
    +
    +extern unsigned long arch_align_stack(unsigned long sp);
    +extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
    +
    +void default_idle(void);
    +bool set_pm_idle_to_default(void);
    +
    +void stop_this_cpu(void *dummy);
    +
    #endif /* _ASM_X86_PROCESSOR_H */
    diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
    index 5e64171..1654662 100644
    --- a/arch/x86/include/asm/segment.h
    +++ b/arch/x86/include/asm/segment.h
    @@ -212,7 +212,61 @@
    #ifdef __KERNEL__
    #ifndef __ASSEMBLY__
    extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10];
    -#endif
    -#endif
    +
    +/*
    + * Load a segment. Fall back on loading the zero
    + * segment if something goes wrong..
    + */
    +#define loadsegment(seg, value) \
    +do { \
    + unsigned short __val = (value); \
    + \
    + asm volatile(" \n" \
    + "1: movl %k0,%%" #seg " \n" \
    + \
    + ".section .fixup,\"ax\" \n" \
    + "2: xorl %k0,%k0 \n" \
    + " jmp 1b \n" \
    + ".previous \n" \
    + \
    + _ASM_EXTABLE(1b, 2b) \
    + \
    + : "+r" (__val) : : "memory"); \
    +} while (0)
    +
    +/*
    + * Save a segment register away
    + */
    +#define savesegment(seg, value) \
    + asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
    +
    +/*
    + * x86_32 user gs accessors.
    + */
    +#ifdef CONFIG_X86_32
    +#ifdef CONFIG_X86_32_LAZY_GS
    +#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
    +#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
    +#define task_user_gs(tsk) ((tsk)->thread.gs)
    +#define lazy_save_gs(v) savesegment(gs, (v))
    +#define lazy_load_gs(v) loadsegment(gs, (v))
    +#else /* X86_32_LAZY_GS */
    +#define get_user_gs(regs) (u16)((regs)->gs)
    +#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
    +#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
    +#define lazy_save_gs(v) do { } while (0)
    +#define lazy_load_gs(v) do { } while (0)
    +#endif /* X86_32_LAZY_GS */
    +#endif /* X86_32 */
    +
    +static inline unsigned long get_limit(unsigned long segment)
    +{
    + unsigned long __limit;
    + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
    + return __limit + 1;
    +}
    +
    +#endif /* !__ASSEMBLY__ */
    +#endif /* __KERNEL__ */

    #endif /* _ASM_X86_SEGMENT_H */
    diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
    new file mode 100644
    index 0000000..41fc93a
    --- /dev/null
    +++ b/arch/x86/include/asm/special_insns.h
    @@ -0,0 +1,199 @@
    +#ifndef _ASM_X86_SPECIAL_INSNS_H
    +#define _ASM_X86_SPECIAL_INSNS_H
    +
    +
    +#ifdef __KERNEL__
    +
    +static inline void native_clts(void)
    +{
    + asm volatile("clts");
    +}
    +
    +/*
    + * Volatile isn't enough to prevent the compiler from reordering the
    + * read/write functions for the control registers and messing everything up.
    + * A memory clobber would solve the problem, but would prevent reordering of
    + * all loads stores around it, which can hurt performance. Solution is to
    + * use a variable and mimic reads and writes to it to enforce serialization
    + */
    +static unsigned long __force_order;
    +
    +static inline unsigned long native_read_cr0(void)
    +{
    + unsigned long val;
    + asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
    + return val;
    +}
    +
    +static inline void native_write_cr0(unsigned long val)
    +{
    + asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
    +}
    +
    +static inline unsigned long native_read_cr2(void)
    +{
    + unsigned long val;
    + asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
    + return val;
    +}
    +
    +static inline void native_write_cr2(unsigned long val)
    +{
    + asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
    +}
    +
    +static inline unsigned long native_read_cr3(void)
    +{
    + unsigned long val;
    + asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
    + return val;
    +}
    +
    +static inline void native_write_cr3(unsigned long val)
    +{
    + asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
    +}
    +
    +static inline unsigned long native_read_cr4(void)
    +{
    + unsigned long val;
    + asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
    + return val;
    +}
    +
    +static inline unsigned long native_read_cr4_safe(void)
    +{
    + unsigned long val;
    + /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
    + * exists, so it will never fail. */
    +#ifdef CONFIG_X86_32
    + asm volatile("1: mov %%cr4, %0\n"
    + "2:\n"
    + _ASM_EXTABLE(1b, 2b)
    + : "=r" (val), "=m" (__force_order) : "0" (0));
    +#else
    + val = native_read_cr4();
    +#endif
    + return val;
    +}
    +
    +static inline void native_write_cr4(unsigned long val)
    +{
    + asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
    +}
    +
    +#ifdef CONFIG_X86_64
    +static inline unsigned long native_read_cr8(void)
    +{
    + unsigned long cr8;
    + asm volatile("movq %%cr8,%0" : "=r" (cr8));
    + return cr8;
    +}
    +
    +static inline void native_write_cr8(unsigned long val)
    +{
    + asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
    +}
    +#endif
    +
    +static inline void native_wbinvd(void)
    +{
    + asm volatile("wbinvd": : :"memory");
    +}
    +
    +extern void native_load_gs_index(unsigned);
    +
    +#ifdef CONFIG_PARAVIRT
    +#include <asm/paravirt.h>
    +#else
    +
    +static inline unsigned long read_cr0(void)
    +{
    + return native_read_cr0();
    +}
    +
    +static inline void write_cr0(unsigned long x)
    +{
    + native_write_cr0(x);
    +}
    +
    +static inline unsigned long read_cr2(void)
    +{
    + return native_read_cr2();
    +}
    +
    +static inline void write_cr2(unsigned long x)
    +{
    + native_write_cr2(x);
    +}
    +
    +static inline unsigned long read_cr3(void)
    +{
    + return native_read_cr3();
    +}
    +
    +static inline void write_cr3(unsigned long x)
    +{
    + native_write_cr3(x);
    +}
    +
    +static inline unsigned long read_cr4(void)
    +{
    + return native_read_cr4();
    +}
    +
    +static inline unsigned long read_cr4_safe(void)
    +{
    + return native_read_cr4_safe();
    +}
    +
    +static inline void write_cr4(unsigned long x)
    +{
    + native_write_cr4(x);
    +}
    +
    +static inline void wbinvd(void)
    +{
    + native_wbinvd();
    +}
    +
    +#ifdef CONFIG_X86_64
    +
    +static inline unsigned long read_cr8(void)
    +{
    + return native_read_cr8();
    +}
    +
    +static inline void write_cr8(unsigned long x)
    +{
    + native_write_cr8(x);
    +}
    +
    +static inline void load_gs_index(unsigned selector)
    +{
    + native_load_gs_index(selector);
    +}
    +
    +#endif
    +
    +/* Clear the 'TS' bit */
    +static inline void clts(void)
    +{
    + native_clts();
    +}
    +
    +#endif/* CONFIG_PARAVIRT */
    +
    +#define stts() write_cr0(read_cr0() | X86_CR0_TS)
    +
    +static inline void clflush(volatile void *__p)
    +{
    + asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
    +}
    +
    +#define nop() asm volatile ("nop")
    +
    +
    +#endif /* __KERNEL__ */
    +
    +#endif /* _ASM_X86_SPECIAL_INSNS_H */
    diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
    index 1575177..b5d9533 100644
    --- a/arch/x86/include/asm/stackprotector.h
    +++ b/arch/x86/include/asm/stackprotector.h
    @@ -38,7 +38,6 @@
    #include <asm/tsc.h>
    #include <asm/processor.h>
    #include <asm/percpu.h>
    -#include <asm/system.h>
    #include <asm/desc.h>
    #include <linux/random.h>

    diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
    new file mode 100644
    index 0000000..4ec45b3
    --- /dev/null
    +++ b/arch/x86/include/asm/switch_to.h
    @@ -0,0 +1,129 @@
    +#ifndef _ASM_X86_SWITCH_TO_H
    +#define _ASM_X86_SWITCH_TO_H
    +
    +struct task_struct; /* one of the stranger aspects of C forward declarations */
    +struct task_struct *__switch_to(struct task_struct *prev,
    + struct task_struct *next);
    +struct tss_struct;
    +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
    + struct tss_struct *tss);
    +
    +#ifdef CONFIG_X86_32
    +
    +#ifdef CONFIG_CC_STACKPROTECTOR
    +#define __switch_canary \
    + "movl %P[task_canary](%[next]), %%ebx\n\t" \
    + "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
    +#define __switch_canary_oparam \
    + , [stack_canary] "=m" (stack_canary.canary)
    +#define __switch_canary_iparam \
    + , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
    +#else /* CC_STACKPROTECTOR */
    +#define __switch_canary
    +#define __switch_canary_oparam
    +#define __switch_canary_iparam
    +#endif /* CC_STACKPROTECTOR */
    +
    +/*
    + * Saving eflags is important. It switches not only IOPL between tasks,
    + * it also protects other tasks from NT leaking through sysenter etc.
    + */
    +#define switch_to(prev, next, last) \
    +do { \
    + /* \
    + * Context-switching clobbers all registers, so we clobber \
    + * them explicitly, via unused output variables. \
    + * (EAX and EBP is not listed because EBP is saved/restored \
    + * explicitly for wchan access and EAX is the return value of \
    + * __switch_to()) \
    + */ \
    + unsigned long ebx, ecx, edx, esi, edi; \
    + \
    + asm volatile("pushfl\n\t" /* save flags */ \
    + "pushl %%ebp\n\t" /* save EBP */ \
    + "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
    + "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
    + "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
    + "pushl %[next_ip]\n\t" /* restore EIP */ \
    + __switch_canary \
    + "jmp __switch_to\n" /* regparm call */ \
    + "1:\t" \
    + "popl %%ebp\n\t" /* restore EBP */ \
    + "popfl\n" /* restore flags */ \
    + \
    + /* output parameters */ \
    + : [prev_sp] "=m" (prev->thread.sp), \
    + [prev_ip] "=m" (prev->thread.ip), \
    + "=a" (last), \
    + \
    + /* clobbered output registers: */ \
    + "=b" (ebx), "=c" (ecx), "=d" (edx), \
    + "=S" (esi), "=D" (edi) \
    + \
    + __switch_canary_oparam \
    + \
    + /* input parameters: */ \
    + : [next_sp] "m" (next->thread.sp), \
    + [next_ip] "m" (next->thread.ip), \
    + \
    + /* regparm parameters for __switch_to(): */ \
    + [prev] "a" (prev), \
    + [next] "d" (next) \
    + \
    + __switch_canary_iparam \
    + \
    + : /* reloaded segment registers */ \
    + "memory"); \
    +} while (0)
    +
    +#else /* CONFIG_X86_32 */
    +
    +/* frame pointer must be last for get_wchan */
    +#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
    +#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
    +
    +#define __EXTRA_CLOBBER \
    + , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
    + "r12", "r13", "r14", "r15"
    +
    +#ifdef CONFIG_CC_STACKPROTECTOR
    +#define __switch_canary \
    + "movq %P[task_canary](%%rsi),%%r8\n\t" \
    + "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
    +#define __switch_canary_oparam \
    + , [gs_canary] "=m" (irq_stack_union.stack_canary)
    +#define __switch_canary_iparam \
    + , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
    +#else /* CC_STACKPROTECTOR */
    +#define __switch_canary
    +#define __switch_canary_oparam
    +#define __switch_canary_iparam
    +#endif /* CC_STACKPROTECTOR */
    +
    +/* Save restore flags to clear handle leaking NT */
    +#define switch_to(prev, next, last) \
    + asm volatile(SAVE_CONTEXT \
    + "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
    + "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
    + "call __switch_to\n\t" \
    + "movq "__percpu_arg([current_task])",%%rsi\n\t" \
    + __switch_canary \
    + "movq %P[thread_info](%%rsi),%%r8\n\t" \
    + "movq %%rax,%%rdi\n\t" \
    + "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
    + "jnz ret_from_fork\n\t" \
    + RESTORE_CONTEXT \
    + : "=a" (last) \
    + __switch_canary_oparam \
    + : [next] "S" (next), [prev] "D" (prev), \
    + [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
    + [ti_flags] "i" (offsetof(struct thread_info, flags)), \
    + [_tif_fork] "i" (_TIF_FORK), \
    + [thread_info] "i" (offsetof(struct task_struct, stack)), \
    + [current_task] "m" (current_task) \
    + __switch_canary_iparam \
    + : "memory", "cc" __EXTRA_CLOBBER)
    +
    +#endif /* CONFIG_X86_32 */
    +
    +#endif /* _ASM_X86_SWITCH_TO_H */
    diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
    index 2d2f01c..0d84f9e 100644
    --- a/arch/x86/include/asm/system.h
    +++ b/arch/x86/include/asm/system.h
    @@ -1,523 +1,6 @@
    -#ifndef _ASM_X86_SYSTEM_H
    -#define _ASM_X86_SYSTEM_H
    -
    -#include <asm/asm.h>
    -#include <asm/segment.h>
    -#include <asm/cpufeature.h>
    +/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
    +#include <asm/barrier.h>
    #include <asm/cmpxchg.h>
    -#include <asm/nops.h>
    -
    -#include <linux/kernel.h>
    -#include <linux/irqflags.h>
    -
    -/* entries in ARCH_DLINFO: */
    -#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
    -# define AT_VECTOR_SIZE_ARCH 2
    -#else /* else it's non-compat x86-64 */
    -# define AT_VECTOR_SIZE_ARCH 1
    -#endif
    -
    -struct task_struct; /* one of the stranger aspects of C forward declarations */
    -struct task_struct *__switch_to(struct task_struct *prev,
    - struct task_struct *next);
    -struct tss_struct;
    -void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
    - struct tss_struct *tss);
    -extern void show_regs_common(void);
    -
    -#ifdef CONFIG_X86_32
    -
    -#ifdef CONFIG_CC_STACKPROTECTOR
    -#define __switch_canary \
    - "movl %P[task_canary](%[next]), %%ebx\n\t" \
    - "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
    -#define __switch_canary_oparam \
    - , [stack_canary] "=m" (stack_canary.canary)
    -#define __switch_canary_iparam \
    - , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
    -#else /* CC_STACKPROTECTOR */
    -#define __switch_canary
    -#define __switch_canary_oparam
    -#define __switch_canary_iparam
    -#endif /* CC_STACKPROTECTOR */
    -
    -/*
    - * Saving eflags is important. It switches not only IOPL between tasks,
    - * it also protects other tasks from NT leaking through sysenter etc.
    - */
    -#define switch_to(prev, next, last) \
    -do { \
    - /* \
    - * Context-switching clobbers all registers, so we clobber \
    - * them explicitly, via unused output variables. \
    - * (EAX and EBP is not listed because EBP is saved/restored \
    - * explicitly for wchan access and EAX is the return value of \
    - * __switch_to()) \
    - */ \
    - unsigned long ebx, ecx, edx, esi, edi; \
    - \
    - asm volatile("pushfl\n\t" /* save flags */ \
    - "pushl %%ebp\n\t" /* save EBP */ \
    - "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
    - "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
    - "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
    - "pushl %[next_ip]\n\t" /* restore EIP */ \
    - __switch_canary \
    - "jmp __switch_to\n" /* regparm call */ \
    - "1:\t" \
    - "popl %%ebp\n\t" /* restore EBP */ \
    - "popfl\n" /* restore flags */ \
    - \
    - /* output parameters */ \
    - : [prev_sp] "=m" (prev->thread.sp), \
    - [prev_ip] "=m" (prev->thread.ip), \
    - "=a" (last), \
    - \
    - /* clobbered output registers: */ \
    - "=b" (ebx), "=c" (ecx), "=d" (edx), \
    - "=S" (esi), "=D" (edi) \
    - \
    - __switch_canary_oparam \
    - \
    - /* input parameters: */ \
    - : [next_sp] "m" (next->thread.sp), \
    - [next_ip] "m" (next->thread.ip), \
    - \
    - /* regparm parameters for __switch_to(): */ \
    - [prev] "a" (prev), \
    - [next] "d" (next) \
    - \
    - __switch_canary_iparam \
    - \
    - : /* reloaded segment registers */ \
    - "memory"); \
    -} while (0)
    -
    -/*
    - * disable hlt during certain critical i/o operations
    - */
    -#define HAVE_DISABLE_HLT
    -#else
    -
    -/* frame pointer must be last for get_wchan */
    -#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
    -#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
    -
    -#define __EXTRA_CLOBBER \
    - , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
    - "r12", "r13", "r14", "r15"
    -
    -#ifdef CONFIG_CC_STACKPROTECTOR
    -#define __switch_canary \
    - "movq %P[task_canary](%%rsi),%%r8\n\t" \
    - "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
    -#define __switch_canary_oparam \
    - , [gs_canary] "=m" (irq_stack_union.stack_canary)
    -#define __switch_canary_iparam \
    - , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
    -#else /* CC_STACKPROTECTOR */
    -#define __switch_canary
    -#define __switch_canary_oparam
    -#define __switch_canary_iparam
    -#endif /* CC_STACKPROTECTOR */
    -
    -/* Save restore flags to clear handle leaking NT */
    -#define switch_to(prev, next, last) \
    - asm volatile(SAVE_CONTEXT \
    - "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
    - "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
    - "call __switch_to\n\t" \
    - "movq "__percpu_arg([current_task])",%%rsi\n\t" \
    - __switch_canary \
    - "movq %P[thread_info](%%rsi),%%r8\n\t" \
    - "movq %%rax,%%rdi\n\t" \
    - "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
    - "jnz ret_from_fork\n\t" \
    - RESTORE_CONTEXT \
    - : "=a" (last) \
    - __switch_canary_oparam \
    - : [next] "S" (next), [prev] "D" (prev), \
    - [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
    - [ti_flags] "i" (offsetof(struct thread_info, flags)), \
    - [_tif_fork] "i" (_TIF_FORK), \
    - [thread_info] "i" (offsetof(struct task_struct, stack)), \
    - [current_task] "m" (current_task) \
    - __switch_canary_iparam \
    - : "memory", "cc" __EXTRA_CLOBBER)
    -#endif
    -
    -#ifdef __KERNEL__
    -
    -extern void native_load_gs_index(unsigned);
    -
    -/*
    - * Load a segment. Fall back on loading the zero
    - * segment if something goes wrong..
    - */
    -#define loadsegment(seg, value) \
    -do { \
    - unsigned short __val = (value); \
    - \
    - asm volatile(" \n" \
    - "1: movl %k0,%%" #seg " \n" \
    - \
    - ".section .fixup,\"ax\" \n" \
    - "2: xorl %k0,%k0 \n" \
    - " jmp 1b \n" \
    - ".previous \n" \
    - \
    - _ASM_EXTABLE(1b, 2b) \
    - \
    - : "+r" (__val) : : "memory"); \
    -} while (0)
    -
    -/*
    - * Save a segment register away
    - */
    -#define savesegment(seg, value) \
    - asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
    -
    -/*
    - * x86_32 user gs accessors.
    - */
    -#ifdef CONFIG_X86_32
    -#ifdef CONFIG_X86_32_LAZY_GS
    -#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
    -#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
    -#define task_user_gs(tsk) ((tsk)->thread.gs)
    -#define lazy_save_gs(v) savesegment(gs, (v))
    -#define lazy_load_gs(v) loadsegment(gs, (v))
    -#else /* X86_32_LAZY_GS */
    -#define get_user_gs(regs) (u16)((regs)->gs)
    -#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
    -#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
    -#define lazy_save_gs(v) do { } while (0)
    -#define lazy_load_gs(v) do { } while (0)
    -#endif /* X86_32_LAZY_GS */
    -#endif /* X86_32 */
    -
    -static inline unsigned long get_limit(unsigned long segment)
    -{
    - unsigned long __limit;
    - asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
    - return __limit + 1;
    -}
    -
    -static inline void native_clts(void)
    -{
    - asm volatile("clts");
    -}
    -
    -/*
    - * Volatile isn't enough to prevent the compiler from reordering the
    - * read/write functions for the control registers and messing everything up.
    - * A memory clobber would solve the problem, but would prevent reordering of
    - * all loads stores around it, which can hurt performance. Solution is to
    - * use a variable and mimic reads and writes to it to enforce serialization
    - */
    -static unsigned long __force_order;
    -
    -static inline unsigned long native_read_cr0(void)
    -{
    - unsigned long val;
    - asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
    - return val;
    -}
    -
    -static inline void native_write_cr0(unsigned long val)
    -{
    - asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
    -}
    -
    -static inline unsigned long native_read_cr2(void)
    -{
    - unsigned long val;
    - asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
    - return val;
    -}
    -
    -static inline void native_write_cr2(unsigned long val)
    -{
    - asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
    -}
    -
    -static inline unsigned long native_read_cr3(void)
    -{
    - unsigned long val;
    - asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
    - return val;
    -}
    -
    -static inline void native_write_cr3(unsigned long val)
    -{
    - asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
    -}
    -
    -static inline unsigned long native_read_cr4(void)
    -{
    - unsigned long val;
    - asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
    - return val;
    -}
    -
    -static inline unsigned long native_read_cr4_safe(void)
    -{
    - unsigned long val;
    - /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
    - * exists, so it will never fail. */
    -#ifdef CONFIG_X86_32
    - asm volatile("1: mov %%cr4, %0\n"
    - "2:\n"
    - _ASM_EXTABLE(1b, 2b)
    - : "=r" (val), "=m" (__force_order) : "0" (0));
    -#else
    - val = native_read_cr4();
    -#endif
    - return val;
    -}
    -
    -static inline void native_write_cr4(unsigned long val)
    -{
    - asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
    -}
    -
    -#ifdef CONFIG_X86_64
    -static inline unsigned long native_read_cr8(void)
    -{
    - unsigned long cr8;
    - asm volatile("movq %%cr8,%0" : "=r" (cr8));
    - return cr8;
    -}
    -
    -static inline void native_write_cr8(unsigned long val)
    -{
    - asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
    -}
    -#endif
    -
    -static inline void native_wbinvd(void)
    -{
    - asm volatile("wbinvd": : :"memory");
    -}
    -
    -#ifdef CONFIG_PARAVIRT
    -#include <asm/paravirt.h>
    -#else
    -
    -static inline unsigned long read_cr0(void)
    -{
    - return native_read_cr0();
    -}
    -
    -static inline void write_cr0(unsigned long x)
    -{
    - native_write_cr0(x);
    -}
    -
    -static inline unsigned long read_cr2(void)
    -{
    - return native_read_cr2();
    -}
    -
    -static inline void write_cr2(unsigned long x)
    -{
    - native_write_cr2(x);
    -}
    -
    -static inline unsigned long read_cr3(void)
    -{
    - return native_read_cr3();
    -}
    -
    -static inline void write_cr3(unsigned long x)
    -{
    - native_write_cr3(x);
    -}
    -
    -static inline unsigned long read_cr4(void)
    -{
    - return native_read_cr4();
    -}
    -
    -static inline unsigned long read_cr4_safe(void)
    -{
    - return native_read_cr4_safe();
    -}
    -
    -static inline void write_cr4(unsigned long x)
    -{
    - native_write_cr4(x);
    -}
    -
    -static inline void wbinvd(void)
    -{
    - native_wbinvd();
    -}
    -
    -#ifdef CONFIG_X86_64
    -
    -static inline unsigned long read_cr8(void)
    -{
    - return native_read_cr8();
    -}
    -
    -static inline void write_cr8(unsigned long x)
    -{
    - native_write_cr8(x);
    -}
    -
    -static inline void load_gs_index(unsigned selector)
    -{
    - native_load_gs_index(selector);
    -}
    -
    -#endif
    -
    -/* Clear the 'TS' bit */
    -static inline void clts(void)
    -{
    - native_clts();
    -}
    -
    -#endif/* CONFIG_PARAVIRT */
    -
    -#define stts() write_cr0(read_cr0() | X86_CR0_TS)
    -
    -#endif /* __KERNEL__ */
    -
    -static inline void clflush(volatile void *__p)
    -{
    - asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
    -}
    -
    -#define nop() asm volatile ("nop")
    -
    -void disable_hlt(void);
    -void enable_hlt(void);
    -
    -void cpu_idle_wait(void);
    -
    -extern unsigned long arch_align_stack(unsigned long sp);
    -extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
    -
    -void default_idle(void);
    -bool set_pm_idle_to_default(void);
    -
    -void stop_this_cpu(void *dummy);
    -
    -/*
    - * Force strict CPU ordering.
    - * And yes, this is required on UP too when we're talking
    - * to devices.
    - */
    -#ifdef CONFIG_X86_32
    -/*
    - * Some non-Intel clones support out of order store. wmb() ceases to be a
    - * nop for these.
    - */
    -#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
    -#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
    -#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
    -#else
    -#define mb() asm volatile("mfence":::"memory")
    -#define rmb() asm volatile("lfence":::"memory")
    -#define wmb() asm volatile("sfence" ::: "memory")
    -#endif
    -
    -/**
    - * read_barrier_depends - Flush all pending reads that subsequents reads
    - * depend on.
    - *
    - * No data-dependent reads from memory-like regions are ever reordered
    - * over this barrier. All reads preceding this primitive are guaranteed
    - * to access memory (but not necessarily other CPUs' caches) before any
    - * reads following this primitive that depend on the data return by
    - * any of the preceding reads. This primitive is much lighter weight than
    - * rmb() on most CPUs, and is never heavier weight than is
    - * rmb().
    - *
    - * These ordering constraints are respected by both the local CPU
    - * and the compiler.
    - *
    - * Ordering is not guaranteed by anything other than these primitives,
    - * not even by data dependencies. See the documentation for
    - * memory_barrier() for examples and URLs to more information.
    - *
    - * For example, the following code would force ordering (the initial
    - * value of "a" is zero, "b" is one, and "p" is "&a"):
    - *
    - * <programlisting>
    - * CPU 0 CPU 1
    - *
    - * b = 2;
    - * memory_barrier();
    - * p = &b; q = p;
    - * read_barrier_depends();
    - * d = *q;
    - * </programlisting>
    - *
    - * because the read of "*q" depends on the read of "p" and these
    - * two reads are separated by a read_barrier_depends(). However,
    - * the following code, with the same initial values for "a" and "b":
    - *
    - * <programlisting>
    - * CPU 0 CPU 1
    - *
    - * a = 2;
    - * memory_barrier();
    - * b = 3; y = b;
    - * read_barrier_depends();
    - * x = a;
    - * </programlisting>
    - *
    - * does not enforce ordering, since there is no data dependency between
    - * the read of "a" and the read of "b". Therefore, on some CPUs, such
    - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
    - * in cases like this where there are no data dependencies.
    - **/
    -
    -#define read_barrier_depends() do { } while (0)
    -
    -#ifdef CONFIG_SMP
    -#define smp_mb() mb()
    -#ifdef CONFIG_X86_PPRO_FENCE
    -# define smp_rmb() rmb()
    -#else
    -# define smp_rmb() barrier()
    -#endif
    -#ifdef CONFIG_X86_OOSTORE
    -# define smp_wmb() wmb()
    -#else
    -# define smp_wmb() barrier()
    -#endif
    -#define smp_read_barrier_depends() read_barrier_depends()
    -#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
    -#else
    -#define smp_mb() barrier()
    -#define smp_rmb() barrier()
    -#define smp_wmb() barrier()
    -#define smp_read_barrier_depends() do { } while (0)
    -#define set_mb(var, value) do { var = value; barrier(); } while (0)
    -#endif
    -
    -/*
    - * Stop RDTSC speculation. This is needed when you need to use RDTSC
    - * (or get_cycles or vread that possibly accesses the TSC) in a defined
    - * code region.
    - *
    - * (Could use an alternative three way for this if there was one.)
    - */
    -static __always_inline void rdtsc_barrier(void)
    -{
    - alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
    - alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
    -}
    -
    -/*
    - * We handle most unaligned accesses in hardware. On the other hand
    - * unaligned DMA can be quite expensive on some Nehalem processors.
    - *
    - * Based on this we disable the IP header alignment in network drivers.
    - */
    -#define NET_IP_ALIGN 0
    -#endif /* _ASM_X86_SYSTEM_H */
    +#include <asm/exec.h>
    +#include <asm/special_insns.h>
    +#include <asm/switch_to.h>
    diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
    index 169be89..c0e108e 100644
    --- a/arch/x86/include/asm/tlbflush.h
    +++ b/arch/x86/include/asm/tlbflush.h
    @@ -5,7 +5,7 @@
    #include <linux/sched.h>

    #include <asm/processor.h>
    -#include <asm/system.h>
    +#include <asm/special_insns.h>

    #ifdef CONFIG_PARAVIRT
    #include <asm/paravirt.h>
    diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
    index e0f9aa1..5da71c2 100644
    --- a/arch/x86/include/asm/virtext.h
    +++ b/arch/x86/include/asm/virtext.h
    @@ -16,7 +16,6 @@
    #define _ASM_X86_VIRTEX_H

    #include <asm/processor.h>
    -#include <asm/system.h>

    #include <asm/vmx.h>
    #include <asm/svm.h>
    diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
    index f50e7fb..d2b7f27 100644
    --- a/arch/x86/kernel/acpi/cstate.c
    +++ b/arch/x86/kernel/acpi/cstate.c
    @@ -14,6 +14,7 @@
    #include <acpi/processor.h>
    #include <asm/acpi.h>
    #include <asm/mwait.h>
    +#include <asm/special_insns.h>

    /*
    * Initialize bm_flags based on the CPU cache properties
    diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
    index f76623c..56585a5 100644
    --- a/arch/x86/kernel/apm_32.c
    +++ b/arch/x86/kernel/apm_32.c
    @@ -231,7 +231,6 @@
    #include <linux/syscore_ops.h>
    #include <linux/i8253.h>

    -#include <asm/system.h>
    #include <asm/uaccess.h>
    #include <asm/desc.h>
    #include <asm/olpc.h>
    diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
    index 5c0e653..2d5454c 100644
    --- a/arch/x86/kernel/cpu/mcheck/p5.c
    +++ b/arch/x86/kernel/cpu/mcheck/p5.c
    @@ -9,7 +9,6 @@
    #include <linux/smp.h>

    #include <asm/processor.h>
    -#include <asm/system.h>
    #include <asm/mce.h>
    #include <asm/msr.h>

    diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
    index 67bb17a..47a1870 100644
    --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
    +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
    @@ -25,7 +25,6 @@
    #include <linux/cpu.h>

    #include <asm/processor.h>
    -#include <asm/system.h>
    #include <asm/apic.h>
    #include <asm/idle.h>
    #include <asm/mce.h>
    diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
    index 54060f5..2d7998f 100644
    --- a/arch/x86/kernel/cpu/mcheck/winchip.c
    +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
    @@ -8,7 +8,6 @@
    #include <linux/init.h>

    #include <asm/processor.h>
    -#include <asm/system.h>
    #include <asm/mce.h>
    #include <asm/msr.h>

    diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
    index 97b2635..75772ae 100644
    --- a/arch/x86/kernel/cpu/mtrr/generic.c
    +++ b/arch/x86/kernel/cpu/mtrr/generic.c
    @@ -12,7 +12,6 @@
    #include <asm/processor-flags.h>
    #include <asm/cpufeature.h>
    #include <asm/tlbflush.h>
    -#include <asm/system.h>
    #include <asm/mtrr.h>
    #include <asm/msr.h>
    #include <asm/pat.h>
    diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
    index a524353..39472dd 100644
    --- a/arch/x86/kernel/cpuid.c
    +++ b/arch/x86/kernel/cpuid.c
    @@ -43,7 +43,6 @@

    #include <asm/processor.h>
    #include <asm/msr.h>
    -#include <asm/system.h>

    static struct class *cpuid_class;

    diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
    index 6104852..36d1853 100644
    --- a/arch/x86/kernel/i8259.c
    +++ b/arch/x86/kernel/i8259.c
    @@ -15,7 +15,6 @@
    #include <linux/delay.h>

    #include <linux/atomic.h>
    -#include <asm/system.h>
    #include <asm/timer.h>
    #include <asm/hw_irq.h>
    #include <asm/pgtable.h>
    diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
    index 313fb5c..99b85b4 100644
    --- a/arch/x86/kernel/irqinit.c
    +++ b/arch/x86/kernel/irqinit.c
    @@ -16,7 +16,6 @@
    #include <linux/delay.h>

    #include <linux/atomic.h>
    -#include <asm/system.h>
    #include <asm/timer.h>
    #include <asm/hw_irq.h>
    #include <asm/pgtable.h>
    diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
    index faba577..4425a12 100644
    --- a/arch/x86/kernel/kgdb.c
    +++ b/arch/x86/kernel/kgdb.c
    @@ -46,7 +46,6 @@

    #include <asm/debugreg.h>
    #include <asm/apicdef.h>
    -#include <asm/system.h>
    #include <asm/apic.h>
    #include <asm/nmi.h>

    diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
    index ea69726..ebc9873 100644
    --- a/arch/x86/kernel/ldt.c
    +++ b/arch/x86/kernel/ldt.c
    @@ -15,7 +15,6 @@
    #include <linux/vmalloc.h>
    #include <linux/uaccess.h>

    -#include <asm/system.h>
    #include <asm/ldt.h>
    #include <asm/desc.h>
    #include <asm/mmu_context.h>
    diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
    index a3fa43b..5b19e4d 100644
    --- a/arch/x86/kernel/machine_kexec_32.c
    +++ b/arch/x86/kernel/machine_kexec_32.c
    @@ -23,7 +23,6 @@
    #include <asm/apic.h>
    #include <asm/cpufeature.h>
    #include <asm/desc.h>
    -#include <asm/system.h>
    #include <asm/cacheflush.h>
    #include <asm/debugreg.h>

    diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c
    index 177183c..7eb1e2b 100644
    --- a/arch/x86/kernel/mca_32.c
    +++ b/arch/x86/kernel/mca_32.c
    @@ -43,7 +43,6 @@
    #include <linux/mca.h>
    #include <linux/kprobes.h>
    #include <linux/slab.h>
    -#include <asm/system.h>
    #include <asm/io.h>
    #include <linux/proc_fs.h>
    #include <linux/mman.h>
    diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
    index 925179f..f21fd94 100644
    --- a/arch/x86/kernel/module.c
    +++ b/arch/x86/kernel/module.c
    @@ -26,7 +26,6 @@
    #include <linux/gfp.h>
    #include <linux/jump_label.h>

    -#include <asm/system.h>
    #include <asm/page.h>
    #include <asm/pgtable.h>

    diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
    index 9635676..eb11369 100644
    --- a/arch/x86/kernel/msr.c
    +++ b/arch/x86/kernel/msr.c
    @@ -40,7 +40,6 @@

    #include <asm/processor.h>
    #include <asm/msr.h>
    -#include <asm/system.h>

    static struct class *msr_class;

    diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
    index d90272e..4ae36d9 100644
    --- a/arch/x86/kernel/paravirt.c
    +++ b/arch/x86/kernel/paravirt.c
    @@ -37,6 +37,7 @@
    #include <asm/apic.h>
    #include <asm/tlbflush.h>
    #include <asm/timer.h>
    +#include <asm/special_insns.h>

    /* nop stub */
    void _paravirt_nop(void)
    diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
    index 726494b..6ac5782 100644
    --- a/arch/x86/kernel/pci-calgary_64.c
    +++ b/arch/x86/kernel/pci-calgary_64.c
    @@ -42,7 +42,6 @@
    #include <asm/calgary.h>
    #include <asm/tce.h>
    #include <asm/pci-direct.h>
    -#include <asm/system.h>
    #include <asm/dma.h>
    #include <asm/rio.h>
    #include <asm/bios_ebda.h>
    diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
    index 15763af..e01ed8b 100644
    --- a/arch/x86/kernel/process.c
    +++ b/arch/x86/kernel/process.c
    @@ -15,7 +15,6 @@
    #include <trace/events/power.h>
    #include <linux/hw_breakpoint.h>
    #include <asm/cpu.h>
    -#include <asm/system.h>
    #include <asm/apic.h>
    #include <asm/syscalls.h>
    #include <asm/idle.h>
    diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
    index c08d1ff..c8a5244 100644
    --- a/arch/x86/kernel/process_32.c
    +++ b/arch/x86/kernel/process_32.c
    @@ -41,7 +41,6 @@
    #include <linux/cpuidle.h>

    #include <asm/pgtable.h>
    -#include <asm/system.h>
    #include <asm/ldt.h>
    #include <asm/processor.h>
    #include <asm/i387.h>
    @@ -58,6 +57,7 @@
    #include <asm/syscalls.h>
    #include <asm/debugreg.h>
    #include <asm/nmi.h>
    +#include <asm/switch_to.h>

    asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");

    diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
    index cfa5c90..2466f98 100644
    --- a/arch/x86/kernel/process_64.c
    +++ b/arch/x86/kernel/process_64.c
    @@ -40,7 +40,6 @@
    #include <linux/cpuidle.h>

    #include <asm/pgtable.h>
    -#include <asm/system.h>
    #include <asm/processor.h>
    #include <asm/i387.h>
    #include <asm/mmu_context.h>
    @@ -52,6 +51,7 @@
    #include <asm/syscalls.h>
    #include <asm/debugreg.h>
    #include <asm/nmi.h>
    +#include <asm/switch_to.h>

    asmlinkage extern void ret_from_fork(void);

    diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
    index 5026738..e7d573b 100644
    --- a/arch/x86/kernel/ptrace.c
    +++ b/arch/x86/kernel/ptrace.c
    @@ -24,7 +24,6 @@

    #include <asm/uaccess.h>
    #include <asm/pgtable.h>
    -#include <asm/system.h>
    #include <asm/processor.h>
    #include <asm/i387.h>
    #include <asm/debugreg.h>
    diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
    index d7d5099..b7f71ad 100644
    --- a/arch/x86/kernel/setup.c
    +++ b/arch/x86/kernel/setup.c
    @@ -90,7 +90,6 @@
    #include <asm/processor.h>
    #include <asm/bugs.h>

    -#include <asm/system.h>
    #include <asm/vsyscall.h>
    #include <asm/cpu.h>
    #include <asm/desc.h>
    diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c
    index 9e540fe..ab40954 100644
    --- a/arch/x86/kernel/tce_64.c
    +++ b/arch/x86/kernel/tce_64.c
    @@ -34,6 +34,7 @@
    #include <asm/tce.h>
    #include <asm/calgary.h>
    #include <asm/proto.h>
    +#include <asm/cacheflush.h>

    /* flush a tce at 'tceaddr' to main memory */
    static inline void flush_tce(void* tceaddr)
    diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
    index 6bb7b85..73920e4 100644
    --- a/arch/x86/kernel/tls.c
    +++ b/arch/x86/kernel/tls.c
    @@ -6,7 +6,6 @@

    #include <asm/uaccess.h>
    #include <asm/desc.h>
    -#include <asm/system.h>
    #include <asm/ldt.h>
    #include <asm/processor.h>
    #include <asm/proto.h>
    diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
    index 4bbe04d..e88ed30 100644
    --- a/arch/x86/kernel/traps.c
    +++ b/arch/x86/kernel/traps.c
    @@ -50,7 +50,6 @@
    #include <asm/processor.h>
    #include <asm/debugreg.h>
    #include <linux/atomic.h>
    -#include <asm/system.h>
    #include <asm/traps.h>
    #include <asm/desc.h>
    #include <asm/i387.h>
    diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
    index 6cabf65..4f0cec7 100644
    --- a/arch/x86/mm/init.c
    +++ b/arch/x86/mm/init.c
    @@ -12,7 +12,6 @@
    #include <asm/page_types.h>
    #include <asm/sections.h>
    #include <asm/setup.h>
    -#include <asm/system.h>
    #include <asm/tlbflush.h>
    #include <asm/tlb.h>
    #include <asm/proto.h>
    diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
    index 8663f6c..575d86f 100644
    --- a/arch/x86/mm/init_32.c
    +++ b/arch/x86/mm/init_32.c
    @@ -35,7 +35,6 @@
    #include <asm/asm.h>
    #include <asm/bios_ebda.h>
    #include <asm/processor.h>
    -#include <asm/system.h>
    #include <asm/uaccess.h>
    #include <asm/pgtable.h>
    #include <asm/dma.h>
    diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
    index 436a030..fc18be0 100644
    --- a/arch/x86/mm/init_64.c
    +++ b/arch/x86/mm/init_64.c
    @@ -35,7 +35,6 @@

    #include <asm/processor.h>
    #include <asm/bios_ebda.h>
    -#include <asm/system.h>
    #include <asm/uaccess.h>
    #include <asm/pgtable.h>
    #include <asm/pgalloc.h>
    diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
    index cac7184..a69bcb8 100644
    --- a/arch/x86/mm/pgtable_32.c
    +++ b/arch/x86/mm/pgtable_32.c
    @@ -10,7 +10,6 @@
    #include <linux/spinlock.h>
    #include <linux/module.h>

    -#include <asm/system.h>
    #include <asm/pgtable.h>
    #include <asm/pgalloc.h>
    #include <asm/fixmap.h>
    diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
    index 3769079..74202c1 100644
    --- a/arch/x86/power/hibernate_32.c
    +++ b/arch/x86/power/hibernate_32.c
    @@ -10,7 +10,6 @@
    #include <linux/suspend.h>
    #include <linux/bootmem.h>

    -#include <asm/system.h>
    #include <asm/page.h>
    #include <asm/pgtable.h>
    #include <asm/mmzone.h>


    \
     
     \ /
      Last update: 2012-03-15 22:11    [W:4.150 / U:0.456 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site