404

[ Avaa Bypassed ]




Upload:

Command:

botdev@3.17.65.101: ~ $
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PROCESSOR_H
#define _ASM_X86_PROCESSOR_H

#include <asm/processor-flags.h>

/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
struct vm86;

#include <asm/math_emu.h>
#include <asm/segment.h>
#include <asm/types.h>
#include <uapi/asm/sigcontext.h>
#include <asm/current.h>
#include <asm/cpufeatures.h>
#include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
#include <asm/msr.h>
#include <asm/desc_defs.h>
#include <asm/nops.h>
#include <asm/special_insns.h>
#include <asm/fpu/types.h>
#include <asm/unwind_hints.h>

#include <linux/personality.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/math64.h>
#include <linux/err.h>
#include <linux/irqflags.h>
#include <linux/mem_encrypt.h>

/*
 * We handle most unaligned accesses in hardware.  On the other hand
 * unaligned DMA can be quite expensive on some Nehalem processors.
 *
 * Based on this we disable the IP header alignment in network drivers.
 */
#define NET_IP_ALIGN	0

#define HBP_NUM 4
/*
 * Default implementation of macro that returns current
 * instruction pointer ("program counter").
 */
static inline void *current_text_addr(void)
{
	void *pc;

	asm volatile("mov $1f, %0; 1:":"=r" (pc));

	return pc;
}

/*
 * These alignment constraints are for performance in the vSMP case,
 * but in the task_struct case we must also meet hardware imposed
 * alignment requirements of the FPU state:
 */
#ifdef CONFIG_X86_VSMP
# define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
# define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
#else
# define ARCH_MIN_TASKALIGN		__alignof__(union fpregs_state)
# define ARCH_MIN_MMSTRUCT_ALIGN	0
#endif

enum tlb_infos {
	ENTRIES,
	NR_INFO
};

extern u16 __read_mostly tlb_lli_4k[NR_INFO];
extern u16 __read_mostly tlb_lli_2m[NR_INFO];
extern u16 __read_mostly tlb_lli_4m[NR_INFO];
extern u16 __read_mostly tlb_lld_4k[NR_INFO];
extern u16 __read_mostly tlb_lld_2m[NR_INFO];
extern u16 __read_mostly tlb_lld_4m[NR_INFO];
extern u16 __read_mostly tlb_lld_1g[NR_INFO];

/*
 *  CPU type and hardware bug flags. Kept separately for each CPU.
 *  Members of this structure are referenced in head_32.S, so think twice
 *  before touching them. [mj]
 */

struct cpuinfo_x86 {
	__u8			x86;		/* CPU family */
	__u8			x86_vendor;	/* CPU vendor */
	__u8			x86_model;
	__u8			x86_stepping;
#ifdef CONFIG_X86_64
	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
	int			x86_tlbsize;
#endif
	__u8			x86_virt_bits;
	__u8			x86_phys_bits;
	/* CPUID returned core id bits: */
	__u8			x86_coreid_bits;
	__u8			cu_id;
	/* Max extended CPUID function supported: */
	__u32			extended_cpuid_level;
	/* Maximum supported CPUID level, -1=no CPUID: */
	int			cpuid_level;
	__u32			x86_capability[NCAPINTS + NBUGINTS];
	char			x86_vendor_id[16];
	char			x86_model_id[64];
	/* in KB - valid for CPUS which support this call: */
	unsigned int		x86_cache_size;
	int			x86_cache_alignment;	/* In bytes */
	/* Cache QoS architectural values: */
	int			x86_cache_max_rmid;	/* max index */
	int			x86_cache_occ_scale;	/* scale to bytes */
	int			x86_power;
	unsigned long		loops_per_jiffy;
	/* cpuid returned max cores value: */
	u16			 x86_max_cores;
	u16			apicid;
	u16			initial_apicid;
	u16			x86_clflush_size;
	/* number of cores as seen by the OS: */
	u16			booted_cores;
	/* Physical processor id: */
	u16			phys_proc_id;
	/* Logical processor id: */
	u16			logical_proc_id;
	/* Core id: */
	u16			cpu_core_id;
	/* Index into per_cpu list: */
	u16			cpu_index;
	u32			microcode;
	/* Address space bits used by the cache internally */
	u8			x86_cache_bits;
	unsigned		initialized : 1;
} __randomize_layout;

struct cpuid_regs {
	u32 eax, ebx, ecx, edx;
};

enum cpuid_regs_idx {
	CPUID_EAX = 0,
	CPUID_EBX,
	CPUID_ECX,
	CPUID_EDX,
};

#define X86_VENDOR_INTEL	0
#define X86_VENDOR_CYRIX	1
#define X86_VENDOR_AMD		2
#define X86_VENDOR_UMC		3
#define X86_VENDOR_CENTAUR	5
#define X86_VENDOR_TRANSMETA	7
#define X86_VENDOR_NSC		8
#define X86_VENDOR_NUM		9

#define X86_VENDOR_UNKNOWN	0xff

/*
 * capabilities of CPUs
 */
extern struct cpuinfo_x86	boot_cpu_data;
extern struct cpuinfo_x86	new_cpu_data;

extern struct x86_hw_tss	doublefault_tss;
extern __u32			cpu_caps_cleared[NCAPINTS + NBUGINTS];
extern __u32			cpu_caps_set[NCAPINTS + NBUGINTS];

#ifdef CONFIG_SMP
DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
#define cpu_data(cpu)		per_cpu(cpu_info, cpu)
#else
#define cpu_info		boot_cpu_data
#define cpu_data(cpu)		boot_cpu_data
#endif

extern const struct seq_operations cpuinfo_op;

#define cache_line_size()	(boot_cpu_data.x86_cache_alignment)

extern void cpu_detect(struct cpuinfo_x86 *c);

static inline unsigned long long l1tf_pfn_limit(void)
{
	return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
}

extern void early_cpu_init(void);
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
void print_cpu_msr(struct cpuinfo_x86 *);

#ifdef CONFIG_X86_32
extern int have_cpuid_p(void);
#else
static inline int have_cpuid_p(void)
{
	return 1;
}
#endif
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
				unsigned int *ecx, unsigned int *edx)
{
	/* ecx is often an input as well as an output. */
	asm volatile("cpuid"
	    : "=a" (*eax),
	      "=b" (*ebx),
	      "=c" (*ecx),
	      "=d" (*edx)
	    : "0" (*eax), "2" (*ecx)
	    : "memory");
}

#define native_cpuid_reg(reg)					\
static inline unsigned int native_cpuid_##reg(unsigned int op)	\
{								\
	unsigned int eax = op, ebx, ecx = 0, edx;		\
								\
	native_cpuid(&eax, &ebx, &ecx, &edx);			\
								\
	return reg;						\
}

/*
 * Native CPUID functions returning a single datum.
 */
native_cpuid_reg(eax)
native_cpuid_reg(ebx)
native_cpuid_reg(ecx)
native_cpuid_reg(edx)

/*
 * Friendlier CR3 helpers.
 */
static inline unsigned long read_cr3_pa(void)
{
	return __read_cr3() & CR3_ADDR_MASK;
}

static inline unsigned long native_read_cr3_pa(void)
{
	return __native_read_cr3() & CR3_ADDR_MASK;
}

static inline void load_cr3(pgd_t *pgdir)
{
	write_cr3(__sme_pa(pgdir));
}

/*
 * Note that while the legacy 'TSS' name comes from 'Task State Segment',
 * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
 * unrelated to the task-switch mechanism:
 */
#ifdef CONFIG_X86_32
/* This is the TSS defined by the hardware. */
struct x86_hw_tss {
	unsigned short		back_link, __blh;
	unsigned long		sp0;
	unsigned short		ss0, __ss0h;
	unsigned long		sp1;

	/*
	 * We don't use ring 1, so ss1 is a convenient scratch space in
	 * the same cacheline as sp0.  We use ss1 to cache the value in
	 * MSR_IA32_SYSENTER_CS.  When we context switch
	 * MSR_IA32_SYSENTER_CS, we first check if the new value being
	 * written matches ss1, and, if it's not, then we wrmsr the new
	 * value and update ss1.
	 *
	 * The only reason we context switch MSR_IA32_SYSENTER_CS is
	 * that we set it to zero in vm86 tasks to avoid corrupting the
	 * stack if we were to go through the sysenter path from vm86
	 * mode.
	 */
	unsigned short		ss1;	/* MSR_IA32_SYSENTER_CS */

	unsigned short		__ss1h;
	unsigned long		sp2;
	unsigned short		ss2, __ss2h;
	unsigned long		__cr3;
	unsigned long		ip;
	unsigned long		flags;
	unsigned long		ax;
	unsigned long		cx;
	unsigned long		dx;
	unsigned long		bx;
	unsigned long		sp;
	unsigned long		bp;
	unsigned long		si;
	unsigned long		di;
	unsigned short		es, __esh;
	unsigned short		cs, __csh;
	unsigned short		ss, __ssh;
	unsigned short		ds, __dsh;
	unsigned short		fs, __fsh;
	unsigned short		gs, __gsh;
	unsigned short		ldt, __ldth;
	unsigned short		trace;
	unsigned short		io_bitmap_base;

} __attribute__((packed));
#else
struct x86_hw_tss {
	u32			reserved1;
	u64			sp0;

	/*
	 * We store cpu_current_top_of_stack in sp1 so it's always accessible.
	 * Linux does not use ring 1, so sp1 is not otherwise needed.
	 */
	u64			sp1;

	u64			sp2;
	u64			reserved2;
	u64			ist[7];
	u32			reserved3;
	u32			reserved4;
	u16			reserved5;
	u16			io_bitmap_base;

} __attribute__((packed));
#endif

/*
 * IO-bitmap sizes:
 */
#define IO_BITMAP_BITS			65536
#define IO_BITMAP_BYTES			(IO_BITMAP_BITS/8)
#define IO_BITMAP_LONGS			(IO_BITMAP_BYTES/sizeof(long))
#define IO_BITMAP_OFFSET		(offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
#define INVALID_IO_BITMAP_OFFSET	0x8000

struct entry_stack {
	char	stack[PAGE_SIZE];
};

struct entry_stack_page {
	struct entry_stack stack;
} __aligned(PAGE_SIZE);

struct tss_struct {
	/*
	 * The fixed hardware portion.  This must not cross a page boundary
	 * at risk of violating the SDM's advice and potentially triggering
	 * errata.
	 */
	struct x86_hw_tss	x86_tss;

	/*
	 * The extra 1 is there because the CPU will access an
	 * additional byte beyond the end of the IO permission
	 * bitmap. The extra byte must be all 1 bits, and must
	 * be within the limit.
	 */
	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];
} __aligned(PAGE_SIZE);

DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);

/*
 * sizeof(unsigned long) coming from an extra "long" at the end
 * of the iobitmap.
 *
 * -1? seg base+limit should be pointing to the address of the
 * last valid byte
 */
#define __KERNEL_TSS_LIMIT	\
	(IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1)

#ifdef CONFIG_X86_32
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
#else
/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
#endif

/*
 * Save the original ist values for checking stack pointers during debugging
 */
struct orig_ist {
	unsigned long		ist[7];
};

#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);

union irq_stack_union {
	char irq_stack[IRQ_STACK_SIZE];
	/*
	 * GCC hardcodes the stack canary as %gs:40.  Since the
	 * irq_stack is the object at %gs:0, we reserve the bottom
	 * 48 bytes of the irq stack for the canary.
	 */
	struct {
		char gs_base[40];
		unsigned long stack_canary;
	};
};

DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
DECLARE_INIT_PER_CPU(irq_stack_union);

DECLARE_PER_CPU(char *, irq_stack_ptr);
DECLARE_PER_CPU(unsigned int, irq_count);
extern asmlinkage void ignore_sysret(void);
#else	/* X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR
/*
 * Make sure stack canary segment base is cached-aligned:
 *   "For Intel Atom processors, avoid non zero segment base address
 *    that is not aligned to cache line boundary at all cost."
 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
 */
struct stack_canary {
	char __pad[20];		/* canary at %gs:20 */
	unsigned long canary;
};
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif
/*
 * per-CPU IRQ handling stacks
 */
struct irq_stack {
	u32                     stack[THREAD_SIZE/sizeof(u32)];
} __aligned(THREAD_SIZE);

DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
#endif	/* X86_64 */

extern unsigned int fpu_kernel_xstate_size;
extern unsigned int fpu_user_xstate_size;

struct perf_event;

typedef struct {
	unsigned long		seg;
} mm_segment_t;

struct thread_struct {
	/* Cached TLS descriptors: */
	struct desc_struct	tls_array[GDT_ENTRY_TLS_ENTRIES];
#ifdef CONFIG_X86_32
	unsigned long		sp0;
#endif
	unsigned long		sp;
#ifdef CONFIG_X86_32
	unsigned long		sysenter_cs;
#else
	unsigned short		es;
	unsigned short		ds;
	unsigned short		fsindex;
	unsigned short		gsindex;
#endif

#ifdef CONFIG_X86_64
	unsigned long		fsbase;
	unsigned long		gsbase;
#else
	/*
	 * XXX: this could presumably be unsigned short.  Alternatively,
	 * 32-bit kernels could be taught to use fsindex instead.
	 */
	unsigned long fs;
	unsigned long gs;
#endif

	/* Save middle states of ptrace breakpoints */
	struct perf_event	*ptrace_bps[HBP_NUM];
	/* Debug status used for traps, single steps, etc... */
	unsigned long           debugreg6;
	/* Keep track of the exact dr7 value set by the user */
	unsigned long           ptrace_dr7;
	/* Fault info: */
	unsigned long		cr2;
	unsigned long		trap_nr;
	unsigned long		error_code;
#ifdef CONFIG_VM86
	/* Virtual 86 mode info */
	struct vm86		*vm86;
#endif
	/* IO permissions: */
	unsigned long		*io_bitmap_ptr;
	unsigned long		iopl;
	/* Max allowed port in the bitmap, in bytes: */
	unsigned		io_bitmap_max;

	mm_segment_t		addr_limit;

	unsigned int		sig_on_uaccess_err:1;
	unsigned int		uaccess_err:1;	/* uaccess failed */

	/* Floating point and extended processor state */
	struct fpu		fpu;
	/*
	 * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
	 * the end.
	 */
};

/*
 * Set IOPL bits in EFLAGS from given mask
 */
static inline void native_set_iopl_mask(unsigned mask)
{
#ifdef CONFIG_X86_32
	unsigned int reg;

	asm volatile ("pushfl;"
		      "popl %0;"
		      "andl %1, %0;"
		      "orl %2, %0;"
		      "pushl %0;"
		      "popfl"
		      : "=&r" (reg)
		      : "i" (~X86_EFLAGS_IOPL), "r" (mask));
#endif
}

static inline void
native_load_sp0(unsigned long sp0)
{
	this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
}

static inline void native_swapgs(void)
{
#ifdef CONFIG_X86_64
	asm volatile("swapgs" ::: "memory");
#endif
}

static inline unsigned long current_top_of_stack(void)
{
	/*
	 *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
	 *  and around vm86 mode and sp0 on x86_64 is special because of the
	 *  entry trampoline.
	 */
	return this_cpu_read_stable(cpu_current_top_of_stack);
}

static inline bool on_thread_stack(void)
{
	return (unsigned long)(current_top_of_stack() -
			       current_stack_pointer) < THREAD_SIZE;
}

#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define __cpuid			native_cpuid

static inline void load_sp0(unsigned long sp0)
{
	native_load_sp0(sp0);
}

#define set_iopl_mask native_set_iopl_mask
#endif /* CONFIG_PARAVIRT */

/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);

unsigned long get_wchan(struct task_struct *p);

/*
 * Generic CPUID function
 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
 * resulting in stale register contents being returned.
 */
static inline void cpuid(unsigned int op,
			 unsigned int *eax, unsigned int *ebx,
			 unsigned int *ecx, unsigned int *edx)
{
	*eax = op;
	*ecx = 0;
	__cpuid(eax, ebx, ecx, edx);
}

/* Some CPUID calls want 'count' to be placed in ecx */
static inline void cpuid_count(unsigned int op, int count,
			       unsigned int *eax, unsigned int *ebx,
			       unsigned int *ecx, unsigned int *edx)
{
	*eax = op;
	*ecx = count;
	__cpuid(eax, ebx, ecx, edx);
}

/*
 * CPUID functions returning a single datum
 */
static inline unsigned int cpuid_eax(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);

	return eax;
}

static inline unsigned int cpuid_ebx(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);

	return ebx;
}

static inline unsigned int cpuid_ecx(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);

	return ecx;
}

static inline unsigned int cpuid_edx(unsigned int op)
{
	unsigned int eax, ebx, ecx, edx;

	cpuid(op, &eax, &ebx, &ecx, &edx);

	return edx;
}

/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static __always_inline void rep_nop(void)
{
	asm volatile("rep; nop" ::: "memory");
}

static __always_inline void cpu_relax(void)
{
	rep_nop();
}

/*
 * This function forces the icache and prefetched instruction stream to
 * catch up with reality in two very specific cases:
 *
 *  a) Text was modified using one virtual address and is about to be executed
 *     from the same physical page at a different virtual address.
 *
 *  b) Text was modified on a different CPU, may subsequently be
 *     executed on this CPU, and you want to make sure the new version
 *     gets executed.  This generally means you're calling this in a IPI.
 *
 * If you're calling this for a different reason, you're probably doing
 * it wrong.
 */
static inline void sync_core(void)
{
	/*
	 * There are quite a few ways to do this.  IRET-to-self is nice
	 * because it works on every CPU, at any CPL (so it's compatible
	 * with paravirtualization), and it never exits to a hypervisor.
	 * The only down sides are that it's a bit slow (it seems to be
	 * a bit more than 2x slower than the fastest options) and that
	 * it unmasks NMIs.  The "push %cs" is needed because, in
	 * paravirtual environments, __KERNEL_CS may not be a valid CS
	 * value when we do IRET directly.
	 *
	 * In case NMI unmasking or performance ever becomes a problem,
	 * the next best option appears to be MOV-to-CR2 and an
	 * unconditional jump.  That sequence also works on all CPUs,
	 * but it will fault at CPL3 (i.e. Xen PV).
	 *
	 * CPUID is the conventional way, but it's nasty: it doesn't
	 * exist on some 486-like CPUs, and it usually exits to a
	 * hypervisor.
	 *
	 * Like all of Linux's memory ordering operations, this is a
	 * compiler barrier as well.
	 */
#ifdef CONFIG_X86_32
	asm volatile (
		"pushfl\n\t"
		"pushl %%cs\n\t"
		"pushl $1f\n\t"
		"iret\n\t"
		"1:"
		: ASM_CALL_CONSTRAINT : : "memory");
#else
	unsigned int tmp;

	asm volatile (
		UNWIND_HINT_SAVE
		"mov %%ss, %0\n\t"
		"pushq %q0\n\t"
		"pushq %%rsp\n\t"
		"addq $8, (%%rsp)\n\t"
		"pushfq\n\t"
		"mov %%cs, %0\n\t"
		"pushq %q0\n\t"
		"pushq $1f\n\t"
		"iretq\n\t"
		UNWIND_HINT_RESTORE
		"1:"
		: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
#endif
}

extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern void amd_e400_c1e_apic_setup(void);

extern unsigned long		boot_option_idle_override;

enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
			 IDLE_POLL};

extern void enable_sep_cpu(void);
extern int sysenter_setup(void);

extern void early_trap_init(void);
void early_trap_pf_init(void);

/* Defined in head.S */
extern struct desc_ptr		early_gdt_descr;

extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(int);
extern void load_direct_gdt(int);
extern void load_fixmap_gdt(int);
extern void load_percpu_segment(int);
extern void cpu_init(void);

static inline unsigned long get_debugctlmsr(void)
{
	unsigned long debugctlmsr = 0;

#ifndef CONFIG_X86_DEBUGCTLMSR
	if (boot_cpu_data.x86 < 6)
		return 0;
#endif
	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);

	return debugctlmsr;
}

static inline void update_debugctlmsr(unsigned long debugctlmsr)
{
#ifndef CONFIG_X86_DEBUGCTLMSR
	if (boot_cpu_data.x86 < 6)
		return;
#endif
	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
}

extern void set_task_blockstep(struct task_struct *task, bool on);

/* Boot loader type from the setup header: */
extern int			bootloader_type;
extern int			bootloader_version;

extern char			ignore_fpu_irq;

#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH

#ifdef CONFIG_X86_32
# define BASE_PREFETCH		""
# define ARCH_HAS_PREFETCH
#else
# define BASE_PREFETCH		"prefetcht0 %P1"
#endif

/*
 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
 *
 * It's not worth to care about 3dnow prefetches for the K6
 * because they are microcoded there and very slow.
 */
static inline void prefetch(const void *x)
{
	alternative_input(BASE_PREFETCH, "prefetchnta %P1",
			  X86_FEATURE_XMM,
			  "m" (*(const char *)x));
}

/*
 * 3dnow prefetch to get an exclusive cache line.
 * Useful for spinlocks to avoid one state transition in the
 * cache coherency protocol:
 */
static inline void prefetchw(const void *x)
{
	alternative_input(BASE_PREFETCH, "prefetchw %P1",
			  X86_FEATURE_3DNOWPREFETCH,
			  "m" (*(const char *)x));
}

static inline void spin_lock_prefetch(const void *x)
{
	prefetchw(x);
}

#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
			   TOP_OF_KERNEL_STACK_PADDING)

#define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))

#define task_pt_regs(task) \
({									\
	unsigned long __ptr = (unsigned long)task_stack_page(task);	\
	__ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;		\
	((struct pt_regs *)__ptr) - 1;					\
})

#ifdef CONFIG_X86_32
/*
 * User space process size: 3GB (default).
 */
#define IA32_PAGE_OFFSET	PAGE_OFFSET
#define TASK_SIZE		PAGE_OFFSET
#define TASK_SIZE_LOW		TASK_SIZE
#define TASK_SIZE_MAX		TASK_SIZE
#define DEFAULT_MAP_WINDOW	TASK_SIZE
#define STACK_TOP		TASK_SIZE
#define STACK_TOP_MAX		STACK_TOP

#define INIT_THREAD  {							  \
	.sp0			= TOP_OF_INIT_STACK,			  \
	.sysenter_cs		= __KERNEL_CS,				  \
	.io_bitmap_ptr		= NULL,					  \
	.addr_limit		= KERNEL_DS,				  \
}

#define KSTK_ESP(task)		(task_pt_regs(task)->sp)

#else
/*
 * User space process size.  This is the first address outside the user range.
 * There are a few constraints that determine this:
 *
 * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
 * address, then that syscall will enter the kernel with a
 * non-canonical return address, and SYSRET will explode dangerously.
 * We avoid this particular problem by preventing anything executable
 * from being mapped at the maximum canonical address.
 *
 * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
 * CPUs malfunction if they execute code from the highest canonical page.
 * They'll speculate right off the end of the canonical space, and
 * bad things happen.  This is worked around in the same way as the
 * Intel problem.
 *
 * With page table isolation enabled, we map the LDT in ... [stay tuned]
 */
#define TASK_SIZE_MAX	((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE)

#define DEFAULT_MAP_WINDOW	((1UL << 47) - PAGE_SIZE)

/* This decides where the kernel will search for a free chunk of vm
 * space during mmap's.
 */
#define IA32_PAGE_OFFSET	((current->personality & ADDR_LIMIT_3GB) ? \
					0xc0000000 : 0xFFFFe000)

#define TASK_SIZE_LOW		(test_thread_flag(TIF_ADDR32) ? \
					IA32_PAGE_OFFSET : DEFAULT_MAP_WINDOW)
#define TASK_SIZE		(test_thread_flag(TIF_ADDR32) ? \
					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
#define TASK_SIZE_OF(child)	((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
					IA32_PAGE_OFFSET : TASK_SIZE_MAX)

#define STACK_TOP		TASK_SIZE_LOW
#define STACK_TOP_MAX		TASK_SIZE_MAX

#define INIT_THREAD  {						\
	.addr_limit		= KERNEL_DS,			\
}

extern unsigned long KSTK_ESP(struct task_struct *task);

#endif /* CONFIG_X86_64 */

extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
					       unsigned long new_sp);

/*
 * This decides where the kernel will search for a free chunk of vm
 * space during mmap's.
 */
#define __TASK_UNMAPPED_BASE(task_size)	(PAGE_ALIGN(task_size / 3))
#define TASK_UNMAPPED_BASE		__TASK_UNMAPPED_BASE(TASK_SIZE_LOW)

#define KSTK_EIP(task)		(task_pt_regs(task)->ip)

/* Get/set a process' ability to use the timestamp counter instruction */
#define GET_TSC_CTL(adr)	get_tsc_mode((adr))
#define SET_TSC_CTL(val)	set_tsc_mode((val))

extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val);

DECLARE_PER_CPU(u64, msr_misc_features_shadow);

/* Register/unregister a process' MPX related resource */
#define MPX_ENABLE_MANAGEMENT()	mpx_enable_management()
#define MPX_DISABLE_MANAGEMENT()	mpx_disable_management()

#ifdef CONFIG_X86_INTEL_MPX
extern int mpx_enable_management(void);
extern int mpx_disable_management(void);
#else
static inline int mpx_enable_management(void)
{
	return -EINVAL;
}
static inline int mpx_disable_management(void)
{
	return -EINVAL;
}
#endif /* CONFIG_X86_INTEL_MPX */

#ifdef CONFIG_CPU_SUP_AMD
extern u16 amd_get_nb_id(int cpu);
extern u32 amd_get_nodes_per_socket(void);
#else
static inline u16 amd_get_nb_id(int cpu)		{ return 0; }
static inline u32 amd_get_nodes_per_socket(void)	{ return 0; }
#endif

static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
{
	uint32_t base, eax, signature[3];

	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
		cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);

		if (!memcmp(sig, signature, 12) &&
		    (leaves == 0 || ((eax - base) >= leaves)))
			return base;
	}

	return 0;
}

extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
extern void free_kernel_image_pages(void *begin, void *end);

void default_idle(void);
#ifdef	CONFIG_XEN
bool xen_set_default_idle(void);
#else
#define xen_set_default_idle 0
#endif

void stop_this_cpu(void *dummy);
void df_debug(struct pt_regs *regs, long error_code);
void microcode_check(void);

enum l1tf_mitigations {
	L1TF_MITIGATION_OFF,
	L1TF_MITIGATION_FLUSH_NOWARN,
	L1TF_MITIGATION_FLUSH,
	L1TF_MITIGATION_FLUSH_NOSMT,
	L1TF_MITIGATION_FULL,
	L1TF_MITIGATION_FULL_FORCE
};

extern enum l1tf_mitigations l1tf_mitigation;

enum mds_mitigations {
	MDS_MITIGATION_OFF,
	MDS_MITIGATION_FULL,
	MDS_MITIGATION_VMWERV,
};

enum taa_mitigations {
	TAA_MITIGATION_OFF,
	TAA_MITIGATION_UCODE_NEEDED,
	TAA_MITIGATION_VERW,
	TAA_MITIGATION_TSX_DISABLED,
};

#endif /* _ASM_X86_PROCESSOR_H */

Filemanager

Name Type Size Permission Actions
crypto Folder 0755
e820 Folder 0755
fpu Folder 0755
numachip Folder 0755
trace Folder 0755
uv Folder 0755
xen Folder 0755
Kbuild File 294 B 0644
a.out-core.h File 1.89 KB 0644
acenv.h File 1.56 KB 0644
acpi.h File 4.76 KB 0644
agp.h File 1.04 KB 0644
alternative-asm.h File 2.43 KB 0644
alternative.h File 8.28 KB 0644
amd_nb.h File 2.98 KB 0644
apb_timer.h File 1.43 KB 0644
apic.h File 14.53 KB 0644
apic_flat_64.h File 151 B 0644
apicdef.h File 11.26 KB 0644
apm.h File 1.8 KB 0644
arch_hweight.h File 1.28 KB 0644
archrandom.h File 3.03 KB 0644
asm-offsets.h File 35 B 0644
asm-prototypes.h File 946 B 0644
asm.h File 4.97 KB 0644
atomic.h File 6.02 KB 0644
atomic64_32.h File 8.71 KB 0644
atomic64_64.h File 6.31 KB 0644
barrier.h File 3.6 KB 0644
bios_ebda.h File 914 B 0644
bitops.h File 13.78 KB 0644
boot.h File 1.53 KB 0644
bootparam_utils.h File 2.86 KB 0644
bug.h File 2.07 KB 0644
bugs.h File 493 B 0644
cache.h File 641 B 0644
cacheflush.h File 306 B 0644
cacheinfo.h File 209 B 0644
calgary.h File 2.31 KB 0644
ce4100.h File 121 B 0644
checksum.h File 133 B 0644
checksum_32.h File 4.86 KB 0644
checksum_64.h File 5.41 KB 0644
clocksource.h File 488 B 0644
cmdline.h File 302 B 0644
cmpxchg.h File 7.68 KB 0644
cmpxchg_32.h File 3.15 KB 0644
cmpxchg_64.h File 543 B 0644
compat.h File 7.37 KB 0644
cpu.h File 975 B 0644
cpu_device_id.h File 7.39 KB 0644
cpu_entry_area.h File 2.27 KB 0644
cpufeature.h File 7.75 KB 0644
cpufeatures.h File 25.59 KB 0644
cpumask.h File 408 B 0644
crash.h File 320 B 0644
current.h File 443 B 0644
debugreg.h File 2.67 KB 0644
delay.h File 208 B 0644
desc.h File 11.42 KB 0644
desc_defs.h File 3.16 KB 0644
device.h File 568 B 0644
disabled-features.h File 2.31 KB 0644
div64.h File 1.79 KB 0644
dma-mapping.h File 2.4 KB 0644
dma.h File 9.58 KB 0644
dmi.h File 556 B 0644
dwarf2.h File 2.43 KB 0644
edac.h File 474 B 0644
efi.h File 6.9 KB 0644
elf.h File 10.82 KB 0644
emergency-restart.h File 202 B 0644
entry_arch.h File 1.88 KB 0644
espfix.h File 426 B 0644
exec.h File 37 B 0644
export.h File 120 B 0644
extable.h File 1.27 KB 0644
fb.h File 540 B 0644
fixmap.h File 6.04 KB 0644
floppy.h File 6.59 KB 0644
frame.h File 815 B 0644
ftrace.h File 1.8 KB 0644
futex.h File 2.2 KB 0644
gart.h File 2.64 KB 0644
genapic.h File 22 B 0644
geode.h File 842 B 0644
hardirq.h File 2.3 KB 0644
highmem.h File 2.6 KB 0644
hpet.h File 3.38 KB 0644
hugetlb.h File 2.15 KB 0644
hw_breakpoint.h File 1.96 KB 0644
hw_irq.h File 3.85 KB 0644
hypervisor.h File 1.84 KB 0644
i8259.h File 1.93 KB 0644
ia32.h File 1.46 KB 0644
ia32_unistd.h File 313 B 0644
imr.h File 1.81 KB 0644
inat.h File 6.58 KB 0644
inat_types.h File 1013 B 0644
init.h File 632 B 0644
insn-eval.h File 837 B 0644
insn.h File 7.46 KB 0644
inst.h File 5.07 KB 0644
intel-family.h File 3.47 KB 0644
intel-mid.h File 4.91 KB 0644
intel_ds.h File 793 B 0644
intel_mid_vrtc.h File 326 B 0644
intel_pmc_ipc.h File 2.08 KB 0644
intel_pt.h File 292 B 0644
intel_punit_ipc.h File 4.56 KB 0644
intel_rdt_sched.h File 2.59 KB 0644
intel_scu_ipc.h File 2.3 KB 0644
intel_telemetry.h File 3.96 KB 0644
invpcid.h File 1.57 KB 0644
io.h File 12.21 KB 0644
io_apic.h File 5.63 KB 0644
iomap.h File 1.22 KB 0644
iommu.h File 392 B 0644
iommu_table.h File 3.82 KB 0644
iosf_mbi.h File 5.74 KB 0644
ipi.h File 2.84 KB 0644
irq.h File 1.12 KB 0644
irq_regs.h File 679 B 0644
irq_remapping.h File 2.96 KB 0644
irq_vectors.h File 4.12 KB 0644
irq_work.h File 397 B 0644
irqdomain.h File 1.61 KB 0644
irqflags.h File 4.38 KB 0644
ist.h File 735 B 0644
jump_label.h File 2.44 KB 0644
kasan.h File 966 B 0644
kaslr.h File 424 B 0644
kbdleds.h File 454 B 0644
kdebug.h File 752 B 0644
kexec-bzimage64.h File 189 B 0644
kexec.h File 6.69 KB 0644
kgdb.h File 2.09 KB 0644
kmap_types.h File 289 B 0644
kprobes.h File 3.82 KB 0644
kvm_emulate.h File 15.23 KB 0644
kvm_guest.h File 172 B 0644
kvm_host.h File 42.72 KB 0644
kvm_page_track.h File 2.48 KB 0644
kvm_para.h File 3 KB 0644
kvmclock.h File 170 B 0644
linkage.h File 581 B 0644
livepatch.h File 1.12 KB 0644
local.h File 3.83 KB 0644
local64.h File 33 B 0644
mach_timer.h File 1.55 KB 0644
mach_traps.h File 1013 B 0644
math_emu.h File 395 B 0644
mc146818rtc.h File 2.76 KB 0644
mce.h File 12.54 KB 0644
mem_encrypt.h File 2.83 KB 0644
microcode.h File 4.14 KB 0644
microcode_amd.h File 1.41 KB 0644
microcode_intel.h File 2.46 KB 0644
misc.h File 143 B 0644
mmconfig.h File 374 B 0644
mmu.h File 1.57 KB 0644
mmu_context.h File 10.27 KB 0644
mmx.h File 337 B 0644
mmzone.h File 129 B 0644
mmzone_32.h File 1.16 KB 0644
mmzone_64.h File 430 B 0644
module.h File 2.05 KB 0644
mpspec.h File 3.93 KB 0644
mpspec_def.h File 3.93 KB 0644
mpx.h File 2.97 KB 0644
mshyperv.h File 10.69 KB 0644
msi.h File 392 B 0644
msidef.h File 1.77 KB 0644
msr-index.h File 30.92 KB 0644
msr-trace.h File 1.35 KB 0644
msr.h File 10.85 KB 0644
mtrr.h File 4.62 KB 0644
mwait.h File 3.74 KB 0644
nmi.h File 1.39 KB 0644
nops.h File 4.31 KB 0644
nospec-branch.h File 11.49 KB 0644
numa.h File 2.18 KB 0644
numa_32.h File 256 B 0644
olpc.h File 3.16 KB 0644
olpc_ofw.h File 1.1 KB 0644
orc_lookup.h File 1.63 KB 0644
orc_types.h File 3.47 KB 0644
page.h File 2.18 KB 0644
page_32.h File 1.01 KB 0644
page_32_types.h File 1.7 KB 0644
page_64.h File 1.42 KB 0644
page_64_types.h File 2.34 KB 0644
page_types.h File 2.29 KB 0644
paravirt.h File 23.31 KB 0644
paravirt_types.h File 22.15 KB 0644
parport.h File 314 B 0644
pat.h File 768 B 0644
pci-direct.h File 995 B 0644
pci-functions.h File 654 B 0644
pci.h File 3.51 KB 0644
pci_64.h File 684 B 0644
pci_x86.h File 5.71 KB 0644
percpu.h File 18.97 KB 0644
perf_event.h File 8.82 KB 0644
perf_event_p4.h File 26.1 KB 0644
pgalloc.h File 5.57 KB 0644
pgtable-2level.h File 2.75 KB 0644
pgtable-2level_types.h File 867 B 0644
pgtable-3level.h File 10.24 KB 0644
pgtable-3level_types.h File 1.06 KB 0644
pgtable-invert.h File 1.07 KB 0644
pgtable.h File 33.91 KB 0644
pgtable_32.h File 3.1 KB 0644
pgtable_32_types.h File 2.01 KB 0644
pgtable_64.h File 7.37 KB 0644
pgtable_64_types.h File 3.67 KB 0644
pgtable_types.h File 16.75 KB 0644
pkeys.h File 3.17 KB 0644
platform_sst_audio.h File 3.18 KB 0644
pm-trace.h File 611 B 0644
posix_types.h File 144 B 0644
preempt.h File 3.04 KB 0644
probe_roms.h File 273 B 0644
processor-cyrix.h File 879 B 0644
processor-flags.h File 1.71 KB 0644
processor.h File 24.54 KB 0644
prom.h File 1 KB 0644
proto.h File 1003 B 0644
pti.h File 369 B 0644
ptrace.h File 8.52 KB 0644
purgatory.h File 571 B 0644
pvclock-abi.h File 1.49 KB 0644
pvclock.h File 2.64 KB 0644
qrwlock.h File 199 B 0644
qspinlock.h File 2.54 KB 0644
qspinlock_paravirt.h File 1.85 KB 0644
realmode.h File 1.76 KB 0644
reboot.h File 850 B 0644
reboot_fixups.h File 183 B 0644
refcount.h File 2.83 KB 0644
required-features.h File 2.81 KB 0644
rio.h File 2.57 KB 0644
rmwcc.h File 1.62 KB 0644
rwsem.h File 7.02 KB 0644
seccomp.h File 510 B 0644
sections.h File 465 B 0644
segment.h File 9.47 KB 0644
serial.h File 1.11 KB 0644
set_memory.h File 3.86 KB 0644
setup.h File 3.4 KB 0644
setup_arch.h File 77 B 0644
shmparam.h File 193 B 0644
sigcontext.h File 261 B 0644
sigframe.h File 2.25 KB 0644
sighandling.h File 649 B 0644
signal.h File 2.37 KB 0644
simd.h File 287 B 0644
smap.h File 1.71 KB 0644
smp.h File 4.73 KB 0644
sparsemem.h File 1.01 KB 0644
spec-ctrl.h File 2.81 KB 0644
special_insns.h File 5.04 KB 0644
spinlock.h File 1.19 KB 0644
spinlock_types.h File 719 B 0644
sta2x11.h File 352 B 0644
stackprotector.h File 4.13 KB 0644
stacktrace.h File 2.57 KB 0644
string.h File 129 B 0644
string_32.h File 7.74 KB 0644
string_64.h File 3.56 KB 0644
suspend.h File 131 B 0644
suspend_32.h File 822 B 0644
suspend_64.h File 1.79 KB 0644
svm.h File 7.08 KB 0644
swiotlb.h File 991 B 0644
switch_to.h File 2.98 KB 0644
sync_bitops.h File 3.42 KB 0644
sys_ia32.h File 1.8 KB 0644
syscall.h File 5.14 KB 0644
syscalls.h File 1.39 KB 0644
sysfb.h File 2.54 KB 0644
tce.h File 1.68 KB 0644
text-patching.h File 2.3 KB 0644
thread_info.h File 9.33 KB 0644
time.h File 331 B 0644
timer.h File 1 KB 0644
timex.h File 546 B 0644
tlb.h File 1.05 KB 0644
tlbbatch.h File 332 B 0644
tlbflush.h File 17.09 KB 0644
topology.h File 4.84 KB 0644
trace_clock.h File 406 B 0644
traps.h File 5.74 KB 0644
tsc.h File 1.93 KB 0644
uaccess.h File 21.69 KB 0644
uaccess_32.h File 1.54 KB 0644
uaccess_64.h File 5.32 KB 0644
umip.h File 329 B 0644
unaligned.h File 345 B 0644
unistd.h File 1.45 KB 0644
unwind.h File 3.13 KB 0644
unwind_hints.h File 3.01 KB 0644
uprobes.h File 1.57 KB 0644
user.h File 2.2 KB 0644
user32.h File 2.11 KB 0644
user_32.h File 4.92 KB 0644
user_64.h File 5.21 KB 0644
vdso.h File 1.09 KB 0644
vga.h File 740 B 0644
vgtod.h File 2.13 KB 0644
virtext.h File 2.62 KB 0644
vm86.h File 2.16 KB 0644
vmx.h File 23.5 KB 0644
vsyscall.h File 635 B 0644
vvar.h File 1.38 KB 0644
word-at-a-time.h File 2.54 KB 0644
x86_init.h File 9.25 KB 0644
xor.h File 10.26 KB 0644
xor_32.h File 14.4 KB 0644
xor_64.h File 716 B 0644
xor_avx.h File 4.5 KB 0644