404

[ Avaa Bypassed ]




Upload:

Command:

botdev@18.119.109.164: ~ $
/*
 *  arch/arm/include/asm/uaccess.h
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef _ASMARM_UACCESS_H
#define _ASMARM_UACCESS_H

/*
 * User space memory access functions
 */
#include <linux/string.h>
#include <asm/memory.h>
#include <asm/domain.h>
#include <asm/unified.h>
#include <asm/compiler.h>

#include <asm/extable.h>

/*
 * These two functions allow hooking accesses to userspace to increase
 * system integrity by ensuring that the kernel can not inadvertantly
 * perform such accesses (eg, via list poison values) which could then
 * be exploited for priviledge escalation.
 */
static inline unsigned int uaccess_save_and_enable(void)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
	unsigned int old_domain = get_domain();

	/* Set the current domain access to permit user accesses */
	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));

	return old_domain;
#else
	return 0;
#endif
}

static inline void uaccess_restore(unsigned int flags)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
	/* Restore the user access mask */
	set_domain(flags);
#endif
}

/*
 * These two are intentionally not defined anywhere - if the kernel
 * code generates any references to them, that's a bug.
 */
extern int __get_user_bad(void);
extern int __put_user_bad(void);

/*
 * Note that this is actually 0x1,0000,0000
 */
#define KERNEL_DS	0x00000000
#define get_ds()	(KERNEL_DS)

#ifdef CONFIG_MMU

#define USER_DS		TASK_SIZE
#define get_fs()	(current_thread_info()->addr_limit)

static inline void set_fs(mm_segment_t fs)
{
	current_thread_info()->addr_limit = fs;

	/*
	 * Prevent a mispredicted conditional call to set_fs from forwarding
	 * the wrong address limit to access_ok under speculation.
	 */
	dsb(nsh);
	isb();

	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
}

#define segment_eq(a, b)	((a) == (b))

/* We use 33-bit arithmetic here... */
#define __range_ok(addr, size) ({ \
	unsigned long flag, roksum; \
	__chk_user_ptr(addr);	\
	__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
		: "=&r" (flag), "=&r" (roksum) \
		: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
		: "cc"); \
	flag; })

/*
 * This is a type: either unsigned long, if the argument fits into
 * that type, or otherwise unsigned long long.
 */
#define __inttype(x) \
	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))

/*
 * Sanitise a uaccess pointer such that it becomes NULL if addr+size
 * is above the current addr_limit.
 */
#define uaccess_mask_range_ptr(ptr, size)			\
	((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
						    size_t size)
{
	void __user *safe_ptr = (void __user *)ptr;
	unsigned long tmp;

	asm volatile(
	"	sub	%1, %3, #1\n"
	"	subs	%1, %1, %0\n"
	"	addhs	%1, %1, #1\n"
	"	subhss	%1, %1, %2\n"
	"	movlo	%0, #0\n"
	: "+r" (safe_ptr), "=&r" (tmp)
	: "r" (size), "r" (current_thread_info()->addr_limit)
	: "cc");

	csdb();
	return safe_ptr;
}

/*
 * Single-value transfer routines.  They automatically use the right
 * size if we just have the right pointer type.  Note that the functions
 * which read from user space (*get_*) need to take care not to leak
 * kernel data even if the calling code is buggy and fails to check
 * the return value.  This means zeroing out the destination variable
 * or buffer on error.  Normally this is done out of line by the
 * fixup code, but there are a few places where it intrudes on the
 * main code path.  When we only write to user space, there is no
 * problem.
 */
extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
extern int __get_user_32t_8(void *);
extern int __get_user_8(void *);
extern int __get_user_64t_1(void *);
extern int __get_user_64t_2(void *);
extern int __get_user_64t_4(void *);

#define __GUP_CLOBBER_1	"lr", "cc"
#ifdef CONFIG_CPU_USE_DOMAINS
#define __GUP_CLOBBER_2	"ip", "lr", "cc"
#else
#define __GUP_CLOBBER_2 "lr", "cc"
#endif
#define __GUP_CLOBBER_4	"lr", "cc"
#define __GUP_CLOBBER_32t_8 "lr", "cc"
#define __GUP_CLOBBER_8	"lr", "cc"

#define __get_user_x(__r2, __p, __e, __l, __s)				\
	   __asm__ __volatile__ (					\
		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
		__asmeq("%3", "r1")					\
		"bl	__get_user_" #__s				\
		: "=&r" (__e), "=r" (__r2)				\
		: "0" (__p), "r" (__l)					\
		: __GUP_CLOBBER_##__s)

/* narrowing a double-word get into a single 32bit word register: */
#ifdef __ARMEB__
#define __get_user_x_32t(__r2, __p, __e, __l, __s)			\
	__get_user_x(__r2, __p, __e, __l, 32t_8)
#else
#define __get_user_x_32t __get_user_x
#endif

/*
 * storing result into proper least significant word of 64bit target var,
 * different only for big endian case where 64 bit __r2 lsw is r3:
 */
#ifdef __ARMEB__
#define __get_user_x_64t(__r2, __p, __e, __l, __s)		        \
	   __asm__ __volatile__ (					\
		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
		__asmeq("%3", "r1")					\
		"bl	__get_user_64t_" #__s				\
		: "=&r" (__e), "=r" (__r2)				\
		: "0" (__p), "r" (__l)					\
		: __GUP_CLOBBER_##__s)
#else
#define __get_user_x_64t __get_user_x
#endif


#define __get_user_check(x, p)						\
	({								\
		unsigned long __limit = current_thread_info()->addr_limit - 1; \
		register const typeof(*(p)) __user *__p asm("r0") = (p);\
		register __inttype(x) __r2 asm("r2");			\
		register unsigned long __l asm("r1") = __limit;		\
		register int __e asm("r0");				\
		unsigned int __ua_flags = uaccess_save_and_enable();	\
		switch (sizeof(*(__p))) {				\
		case 1:							\
			if (sizeof((x)) >= 8)				\
				__get_user_x_64t(__r2, __p, __e, __l, 1); \
			else						\
				__get_user_x(__r2, __p, __e, __l, 1);	\
			break;						\
		case 2:							\
			if (sizeof((x)) >= 8)				\
				__get_user_x_64t(__r2, __p, __e, __l, 2); \
			else						\
				__get_user_x(__r2, __p, __e, __l, 2);	\
			break;						\
		case 4:							\
			if (sizeof((x)) >= 8)				\
				__get_user_x_64t(__r2, __p, __e, __l, 4); \
			else						\
				__get_user_x(__r2, __p, __e, __l, 4);	\
			break;						\
		case 8:							\
			if (sizeof((x)) < 8)				\
				__get_user_x_32t(__r2, __p, __e, __l, 4); \
			else						\
				__get_user_x(__r2, __p, __e, __l, 8);	\
			break;						\
		default: __e = __get_user_bad(); break;			\
		}							\
		uaccess_restore(__ua_flags);				\
		x = (typeof(*(p))) __r2;				\
		__e;							\
	})

#define get_user(x, p)							\
	({								\
		might_fault();						\
		__get_user_check(x, p);					\
	 })

extern int __put_user_1(void *, unsigned int);
extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);

#define __put_user_check(__pu_val, __ptr, __err, __s)			\
	({								\
		unsigned long __limit = current_thread_info()->addr_limit - 1; \
		register typeof(__pu_val) __r2 asm("r2") = __pu_val;	\
		register const void __user *__p asm("r0") = __ptr;	\
		register unsigned long __l asm("r1") = __limit;		\
		register int __e asm("r0");				\
		__asm__ __volatile__ (					\
			__asmeq("%0", "r0") __asmeq("%2", "r2")		\
			__asmeq("%3", "r1")				\
			"bl	__put_user_" #__s			\
			: "=&r" (__e)					\
			: "0" (__p), "r" (__r2), "r" (__l)		\
			: "ip", "lr", "cc");				\
		__err = __e;						\
	})

#else /* CONFIG_MMU */

/*
 * uClinux has only one addr space, so has simplified address limits.
 */
#define USER_DS			KERNEL_DS

#define segment_eq(a, b)		(1)
#define __addr_ok(addr)		((void)(addr), 1)
#define __range_ok(addr, size)	((void)(addr), 0)
#define get_fs()		(KERNEL_DS)

static inline void set_fs(mm_segment_t fs)
{
}

#define get_user(x, p)	__get_user(x, p)
#define __put_user_check __put_user_nocheck

#endif /* CONFIG_MMU */

#define access_ok(type, addr, size)	(__range_ok(addr, size) == 0)

#define user_addr_max() \
	(uaccess_kernel() ? ~0UL : get_fs())

#ifdef CONFIG_CPU_SPECTRE
/*
 * When mitigating Spectre variant 1, it is not worth fixing the non-
 * verifying accessors, because we need to add verification of the
 * address space there.  Force these to use the standard get_user()
 * version instead.
 */
#define __get_user(x, ptr) get_user(x, ptr)
#else

/*
 * The "__xxx" versions of the user access functions do not verify the
 * address space - it must have been done previously with a separate
 * "access_ok()" call.
 *
 * The "xxx_error" versions set the third argument to EFAULT if an
 * error occurs, and leave it unchanged on success.  Note that these
 * versions are void (ie, don't return a value as such).
 */
#define __get_user(x, ptr)						\
({									\
	long __gu_err = 0;						\
	__get_user_err((x), (ptr), __gu_err);				\
	__gu_err;							\
})

#define __get_user_err(x, ptr, err)					\
do {									\
	unsigned long __gu_addr = (unsigned long)(ptr);			\
	unsigned long __gu_val;						\
	unsigned int __ua_flags;					\
	__chk_user_ptr(ptr);						\
	might_fault();							\
	__ua_flags = uaccess_save_and_enable();				\
	switch (sizeof(*(ptr))) {					\
	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err);	break;	\
	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err);	break;	\
	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err);	break;	\
	default: (__gu_val) = __get_user_bad();				\
	}								\
	uaccess_restore(__ua_flags);					\
	(x) = (__typeof__(*(ptr)))__gu_val;				\
} while (0)

#define __get_user_asm(x, addr, err, instr)			\
	__asm__ __volatile__(					\
	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
	"2:\n"							\
	"	.pushsection .text.fixup,\"ax\"\n"		\
	"	.align	2\n"					\
	"3:	mov	%0, %3\n"				\
	"	mov	%1, #0\n"				\
	"	b	2b\n"					\
	"	.popsection\n"					\
	"	.pushsection __ex_table,\"a\"\n"		\
	"	.align	3\n"					\
	"	.long	1b, 3b\n"				\
	"	.popsection"					\
	: "+r" (err), "=&r" (x)					\
	: "r" (addr), "i" (-EFAULT)				\
	: "cc")

#define __get_user_asm_byte(x, addr, err)			\
	__get_user_asm(x, addr, err, ldrb)

#if __LINUX_ARM_ARCH__ >= 6

#define __get_user_asm_half(x, addr, err)			\
	__get_user_asm(x, addr, err, ldrh)

#else

#ifndef __ARMEB__
#define __get_user_asm_half(x, __gu_addr, err)			\
({								\
	unsigned long __b1, __b2;				\
	__get_user_asm_byte(__b1, __gu_addr, err);		\
	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
	(x) = __b1 | (__b2 << 8);				\
})
#else
#define __get_user_asm_half(x, __gu_addr, err)			\
({								\
	unsigned long __b1, __b2;				\
	__get_user_asm_byte(__b1, __gu_addr, err);		\
	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
	(x) = (__b1 << 8) | __b2;				\
})
#endif

#endif /* __LINUX_ARM_ARCH__ >= 6 */

#define __get_user_asm_word(x, addr, err)			\
	__get_user_asm(x, addr, err, ldr)
#endif


#define __put_user_switch(x, ptr, __err, __fn)				\
	do {								\
		const __typeof__(*(ptr)) __user *__pu_ptr = (ptr);	\
		__typeof__(*(ptr)) __pu_val = (x);			\
		unsigned int __ua_flags;				\
		might_fault();						\
		__ua_flags = uaccess_save_and_enable();			\
		switch (sizeof(*(ptr))) {				\
		case 1: __fn(__pu_val, __pu_ptr, __err, 1); break;	\
		case 2:	__fn(__pu_val, __pu_ptr, __err, 2); break;	\
		case 4:	__fn(__pu_val, __pu_ptr, __err, 4); break;	\
		case 8:	__fn(__pu_val, __pu_ptr, __err, 8); break;	\
		default: __err = __put_user_bad(); break;		\
		}							\
		uaccess_restore(__ua_flags);				\
	} while (0)

#define put_user(x, ptr)						\
({									\
	int __pu_err = 0;						\
	__put_user_switch((x), (ptr), __pu_err, __put_user_check);	\
	__pu_err;							\
})

#ifdef CONFIG_CPU_SPECTRE
/*
 * When mitigating Spectre variant 1.1, all accessors need to include
 * verification of the address space.
 */
#define __put_user(x, ptr) put_user(x, ptr)

#else
#define __put_user(x, ptr)						\
({									\
	long __pu_err = 0;						\
	__put_user_switch((x), (ptr), __pu_err, __put_user_nocheck);	\
	__pu_err;							\
})

#define __put_user_nocheck(x, __pu_ptr, __err, __size)			\
	do {								\
		unsigned long __pu_addr = (unsigned long)__pu_ptr;	\
		__put_user_nocheck_##__size(x, __pu_addr, __err);	\
	} while (0)

#define __put_user_nocheck_1 __put_user_asm_byte
#define __put_user_nocheck_2 __put_user_asm_half
#define __put_user_nocheck_4 __put_user_asm_word
#define __put_user_nocheck_8 __put_user_asm_dword

#define __put_user_asm(x, __pu_addr, err, instr)		\
	__asm__ __volatile__(					\
	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
	"2:\n"							\
	"	.pushsection .text.fixup,\"ax\"\n"		\
	"	.align	2\n"					\
	"3:	mov	%0, %3\n"				\
	"	b	2b\n"					\
	"	.popsection\n"					\
	"	.pushsection __ex_table,\"a\"\n"		\
	"	.align	3\n"					\
	"	.long	1b, 3b\n"				\
	"	.popsection"					\
	: "+r" (err)						\
	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
	: "cc")

#define __put_user_asm_byte(x, __pu_addr, err)			\
	__put_user_asm(x, __pu_addr, err, strb)

#if __LINUX_ARM_ARCH__ >= 6

#define __put_user_asm_half(x, __pu_addr, err)			\
	__put_user_asm(x, __pu_addr, err, strh)

#else

#ifndef __ARMEB__
#define __put_user_asm_half(x, __pu_addr, err)			\
({								\
	unsigned long __temp = (__force unsigned long)(x);	\
	__put_user_asm_byte(__temp, __pu_addr, err);		\
	__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);	\
})
#else
#define __put_user_asm_half(x, __pu_addr, err)			\
({								\
	unsigned long __temp = (__force unsigned long)(x);	\
	__put_user_asm_byte(__temp >> 8, __pu_addr, err);	\
	__put_user_asm_byte(__temp, __pu_addr + 1, err);	\
})
#endif

#endif /* __LINUX_ARM_ARCH__ >= 6 */

#define __put_user_asm_word(x, __pu_addr, err)			\
	__put_user_asm(x, __pu_addr, err, str)

#ifndef __ARMEB__
#define	__reg_oper0	"%R2"
#define	__reg_oper1	"%Q2"
#else
#define	__reg_oper0	"%Q2"
#define	__reg_oper1	"%R2"
#endif

#define __put_user_asm_dword(x, __pu_addr, err)			\
	__asm__ __volatile__(					\
 ARM(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1], #4\n"	) \
 ARM(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1]\n"	) \
 THUMB(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1]\n"	) \
 THUMB(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1, #4]\n"	) \
	"3:\n"							\
	"	.pushsection .text.fixup,\"ax\"\n"		\
	"	.align	2\n"					\
	"4:	mov	%0, %3\n"				\
	"	b	3b\n"					\
	"	.popsection\n"					\
	"	.pushsection __ex_table,\"a\"\n"		\
	"	.align	3\n"					\
	"	.long	1b, 4b\n"				\
	"	.long	2b, 4b\n"				\
	"	.popsection"					\
	: "+r" (err), "+r" (__pu_addr)				\
	: "r" (x), "i" (-EFAULT)				\
	: "cc")

#endif /* !CONFIG_CPU_SPECTRE */

#ifdef CONFIG_MMU
extern unsigned long __must_check
arm_copy_from_user(void *to, const void __user *from, unsigned long n);

static inline unsigned long __must_check
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
	unsigned int __ua_flags;

	__ua_flags = uaccess_save_and_enable();
	n = arm_copy_from_user(to, from, n);
	uaccess_restore(__ua_flags);
	return n;
}

extern unsigned long __must_check
arm_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check
__copy_to_user_std(void __user *to, const void *from, unsigned long n);

static inline unsigned long __must_check
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
#ifndef CONFIG_UACCESS_WITH_MEMCPY
	unsigned int __ua_flags;
	__ua_flags = uaccess_save_and_enable();
	n = arm_copy_to_user(to, from, n);
	uaccess_restore(__ua_flags);
	return n;
#else
	return arm_copy_to_user(to, from, n);
#endif
}

extern unsigned long __must_check
arm_clear_user(void __user *addr, unsigned long n);
extern unsigned long __must_check
__clear_user_std(void __user *addr, unsigned long n);

static inline unsigned long __must_check
__clear_user(void __user *addr, unsigned long n)
{
	unsigned int __ua_flags = uaccess_save_and_enable();
	n = arm_clear_user(addr, n);
	uaccess_restore(__ua_flags);
	return n;
}

#else
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
	memcpy(to, (const void __force *)from, n);
	return 0;
}
static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
	memcpy((void __force *)to, from, n);
	return 0;
}
#define __clear_user(addr, n)		(memset((void __force *)addr, 0, n), 0)
#endif
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER

static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
	if (access_ok(VERIFY_WRITE, to, n))
		n = __clear_user(to, n);
	return n;
}

/* These are from lib/ code, and use __get_user() and friends */
extern long strncpy_from_user(char *dest, const char __user *src, long count);

extern __must_check long strnlen_user(const char __user *str, long n);

#endif /* _ASMARM_UACCESS_H */

Filemanager

Name Type Size Permission Actions
hardware Folder 0755
mach Folder 0755
xen Folder 0755
Kbuild File 568 B 0644
arch_gicv3.h File 9.05 KB 0644
arch_timer.h File 2.48 KB 0644
arm-cci.h File 1.05 KB 0644
asm-offsets.h File 35 B 0644
assembler.h File 10.46 KB 0644
atomic.h File 13.22 KB 0644
auxvec.h File 29 B 0644
bL_switcher.h File 2.28 KB 0644
barrier.h File 2.84 KB 0644
bitops.h File 8.62 KB 0644
bitrev.h File 451 B 0644
bug.h File 2.46 KB 0644
bugs.h File 546 B 0644
cache.h File 813 B 0644
cacheflush.h File 15.54 KB 0644
cachetype.h File 2.71 KB 0644
checksum.h File 3.71 KB 0644
clocksource.h File 153 B 0644
cmpxchg.h File 6.14 KB 0644
compiler.h File 978 B 0644
cp15.h File 3.84 KB 0644
cpu.h File 533 B 0644
cpufeature.h File 1.4 KB 0644
cpuidle.h File 1.33 KB 0644
cputype.h File 8.42 KB 0644
cti.h File 3.62 KB 0644
dcc.h File 1.01 KB 0644
delay.h File 2.83 KB 0644
device.h File 771 B 0644
div64.h File 3.17 KB 0644
dma-contiguous.h File 265 B 0644
dma-iommu.h File 1.01 KB 0644
dma-mapping.h File 7.44 KB 0644
dma.h File 4.18 KB 0644
dmi.h File 528 B 0644
domain.h File 3.65 KB 0644
ecard.h File 5.98 KB 0644
edac.h File 1.51 KB 0644
efi.h File 3.04 KB 0644
elf.h File 4.52 KB 0644
entry-macro-multi.S File 726 B 0644
exception.h File 571 B 0644
fb.h File 375 B 0644
fiq.h File 1.36 KB 0644
firmware.h File 1.82 KB 0644
fixmap.h File 1.84 KB 0644
flat.h File 915 B 0644
floppy.h File 3.61 KB 0644
fncpy.h File 3.08 KB 0644
fpstate.h File 1.73 KB 0644
ftrace.h File 1.92 KB 0644
futex.h File 4.24 KB 0644
glue-cache.h File 3.51 KB 0644
glue-df.h File 2.2 KB 0644
glue-pf.h File 1.12 KB 0644
glue-proc.h File 4.46 KB 0644
glue.h File 759 B 0644
gpio.h File 693 B 0644
hardirq.h File 803 B 0644
highmem.h File 2.15 KB 0644
hugetlb-3level.h File 2.03 KB 0644
hugetlb.h File 1.78 KB 0644
hw_breakpoint.h File 3.53 KB 0644
hw_irq.h File 349 B 0644
hwcap.h File 378 B 0644
hypervisor.h File 140 B 0644
ide.h File 566 B 0644
idmap.h File 355 B 0644
insn.h File 636 B 0644
io.h File 15.96 KB 0644
irq.h File 1015 B 0644
irq_work.h File 234 B 0644
irqflags.h File 3.88 KB 0644
jump_label.h File 1009 B 0644
kexec-internal.h File 272 B 0644
kexec.h File 2.3 KB 0644
kgdb.h File 2.72 KB 0644
kmap_types.h File 190 B 0644
kprobes.h File 2.65 KB 0644
kvm_arm.h File 7.6 KB 0644
kvm_asm.h File 2.84 KB 0644
kvm_coproc.h File 1.99 KB 0644
kvm_emulate.h File 7.84 KB 0644
kvm_host.h File 10.31 KB 0644
kvm_hyp.h File 4.49 KB 0644
kvm_mmio.h File 1.34 KB 0644
kvm_mmu.h File 7.27 KB 0644
limits.h File 166 B 0644
linkage.h File 216 B 0644
mc146818rtc.h File 720 B 0644
mcpm.h File 11.92 KB 0644
mcs_spinlock.h File 570 B 0644
memblock.h File 248 B 0644
memory.h File 10.12 KB 0644
mmu.h File 953 B 0644
mmu_context.h File 3.94 KB 0644
module.h File 1.57 KB 0644
mpu.h File 2.15 KB 0644
mtd-xip.h File 666 B 0644
neon.h File 1.16 KB 0644
nwflash.h File 252 B 0644
opcodes-sec.h File 742 B 0644
opcodes-virt.h File 1.32 KB 0644
opcodes.h File 8.07 KB 0644
outercache.h File 3.78 KB 0644
page-nommu.h File 957 B 0644
page.h File 3.61 KB 0644
paravirt.h File 454 B 0644
patch.h File 438 B 0644
pci.h File 956 B 0644
percpu.h File 1.56 KB 0644
perf_event.h File 850 B 0644
pgalloc.h File 3.79 KB 0644
pgtable-2level-hwdef.h File 3.45 KB 0644
pgtable-2level-types.h File 1.84 KB 0644
pgtable-2level.h File 8.51 KB 0644
pgtable-3level-hwdef.h File 3.95 KB 0644
pgtable-3level-types.h File 1.89 KB 0644
pgtable-3level.h File 9.54 KB 0644
pgtable-hwdef.h File 467 B 0644
pgtable-nommu.h File 2.66 KB 0644
pgtable.h File 11.68 KB 0644
probes.h File 1.73 KB 0644
proc-fns.h File 4.79 KB 0644
processor.h File 3.4 KB 0644
procinfo.h File 1.27 KB 0644
prom.h File 715 B 0644
psci.h File 771 B 0644
ptrace.h File 4.89 KB 0644
sections.h File 189 B 0644
set_memory.h File 1.04 KB 0644
setup.h File 934 B 0644
shmparam.h File 419 B 0644
signal.h File 500 B 0644
smp.h File 3.1 KB 0644
smp_plat.h File 2.48 KB 0644
smp_scu.h File 1.32 KB 0644
smp_twd.h File 908 B 0644
sparsemem.h File 716 B 0644
spectre.h File 906 B 0644
spinlock.h File 5.49 KB 0644
spinlock_types.h File 541 B 0644
stackprotector.h File 1.09 KB 0644
stacktrace.h File 742 B 0644
stage2_pgtable.h File 2.12 KB 0644
string.h File 1.43 KB 0644
suspend.h File 369 B 0644
swab.h File 1005 B 0644
switch_to.h File 1.03 KB 0644
sync_bitops.h File 1.03 KB 0644
syscall.h File 2.48 KB 0644
system_info.h File 763 B 0644
system_misc.h File 1.14 KB 0644
tcm.h File 937 B 0644
therm.h File 655 B 0644
thread_info.h File 5.2 KB 0644
thread_notify.h File 1.2 KB 0644
timex.h File 577 B 0644
tlb.h File 7.37 KB 0644
tlbflush.h File 17.88 KB 0644
tls.h File 3.09 KB 0644
topology.h File 1.18 KB 0644
traps.h File 1.17 KB 0644
trusted_foundations.h File 2.29 KB 0644
uaccess-asm.h File 2.83 KB 0644
uaccess.h File 16.22 KB 0644
ucontext.h File 2.98 KB 0644
unaligned.h File 846 B 0644
unified.h File 1.61 KB 0644
unistd.h File 1.68 KB 0644
unwind.h File 1.71 KB 0644
uprobes.h File 1.07 KB 0644
user.h File 4.2 KB 0644
v7m.h File 2.93 KB 0644
vdso.h File 507 B 0644
vdso_datapage.h File 1.69 KB 0644
vfp.h File 2.86 KB 0644
vfpmacros.h File 2.1 KB 0644
vga.h File 305 B 0644
virt.h File 2.9 KB 0644
word-at-a-time.h File 2.08 KB 0644
xor.h File 5.22 KB 0644