404

[ Avaa Bypassed ]




Upload:

Command:

botdev@13.59.219.20: ~ $
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_TLB_H
#define _ASM_IA64_TLB_H
/*
 * Based on <asm-generic/tlb.h>.
 *
 * Copyright (C) 2002-2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 */
/*
 * Removing a translation from a page table (including TLB-shootdown) is a four-step
 * procedure:
 *
 *	(1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
 *	    (this is a no-op on ia64).
 *	(2) Clear the relevant portions of the page-table
 *	(3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
 *	(4) Release the pages that were freed up in step (2).
 *
 * Note that the ordering of these steps is crucial to avoid races on MP machines.
 *
 * The Linux kernel defines several platform-specific hooks for TLB-shootdown.  When
 * unmapping a portion of the virtual address space, these hooks are called according to
 * the following template:
 *
 *	tlb <- tlb_gather_mmu(mm, start, end);		// start unmap for address space MM
 *	{
 *	  for each vma that needs a shootdown do {
 *	    tlb_start_vma(tlb, vma);
 *	      for each page-table-entry PTE that needs to be removed do {
 *		tlb_remove_tlb_entry(tlb, pte, address);
 *		if (pte refers to a normal page) {
 *		  tlb_remove_page(tlb, page);
 *		}
 *	      }
 *	    tlb_end_vma(tlb, vma);
 *	  }
 *	}
 *	tlb_finish_mmu(tlb, start, end);	// finish unmap for address space MM
 */
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/swap.h>

#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/machvec.h>

/*
 * If we can't allocate a page to make a big batch of page pointers
 * to work on, then just handle a few from the on-stack structure.
 */
#define	IA64_GATHER_BUNDLE	8

struct mmu_gather {
	struct mm_struct	*mm;
	unsigned int		nr;
	unsigned int		max;
	unsigned char		fullmm;		/* non-zero means full mm flush */
	unsigned char		need_flush;	/* really unmapped some PTEs? */
	unsigned long		start, end;
	unsigned long		start_addr;
	unsigned long		end_addr;
	struct page		**pages;
	struct page		*local[IA64_GATHER_BUNDLE];
};

struct ia64_tr_entry {
	u64 ifa;
	u64 itir;
	u64 pte;
	u64 rr;
}; /*Record for tr entry!*/

extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
extern void ia64_ptr_entry(u64 target_mask, int slot);

extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];

/*
 region register macros
*/
#define RR_TO_VE(val)   (((val) >> 0) & 0x0000000000000001)
#define RR_VE(val)	(((val) & 0x0000000000000001) << 0)
#define RR_VE_MASK	0x0000000000000001L
#define RR_VE_SHIFT	0
#define RR_TO_PS(val)	(((val) >> 2) & 0x000000000000003f)
#define RR_PS(val)	(((val) & 0x000000000000003f) << 2)
#define RR_PS_MASK	0x00000000000000fcL
#define RR_PS_SHIFT	2
#define RR_RID_MASK	0x00000000ffffff00L
#define RR_TO_RID(val) 	((val >> 8) & 0xffffff)

static inline void
ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
	tlb->need_flush = 0;

	if (tlb->fullmm) {
		/*
		 * Tearing down the entire address space.  This happens both as a result
		 * of exit() and execve().  The latter case necessitates the call to
		 * flush_tlb_mm() here.
		 */
		flush_tlb_mm(tlb->mm);
	} else if (unlikely (end - start >= 1024*1024*1024*1024UL
			     || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
	{
		/*
		 * If we flush more than a tera-byte or across regions, we're probably
		 * better off just flushing the entire TLB(s).  This should be very rare
		 * and is not worth optimizing for.
		 */
		flush_tlb_all();
	} else {
		/*
		 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
		 * vma pointer.
		 */
		struct vm_area_struct vma;

		vma.vm_mm = tlb->mm;
		/* flush the address range from the tlb: */
		flush_tlb_range(&vma, start, end);
		/* now flush the virt. page-table area mapping the address range: */
		flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
	}

}

static inline void
ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
{
	unsigned long i;
	unsigned int nr;

	/* lastly, release the freed pages */
	nr = tlb->nr;

	tlb->nr = 0;
	tlb->start_addr = ~0UL;
	for (i = 0; i < nr; ++i)
		free_page_and_swap_cache(tlb->pages[i]);
}

/*
 * Flush the TLB for address range START to END and, if not in fast mode, release the
 * freed pages that where gathered up to this point.
 */
static inline void
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
	if (!tlb->need_flush)
		return;
	ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
	ia64_tlb_flush_mmu_free(tlb);
}

static inline void __tlb_alloc_page(struct mmu_gather *tlb)
{
	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);

	if (addr) {
		tlb->pages = (void *)addr;
		tlb->max = PAGE_SIZE / sizeof(void *);
	}
}


static inline void
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
			unsigned long start, unsigned long end)
{
	tlb->mm = mm;
	tlb->max = ARRAY_SIZE(tlb->local);
	tlb->pages = tlb->local;
	tlb->nr = 0;
	tlb->fullmm = !(start | (end+1));
	tlb->start = start;
	tlb->end = end;
	tlb->start_addr = ~0UL;
}

/*
 * Called at the end of the shootdown operation to free up any resources that were
 * collected.
 */
static inline void
arch_tlb_finish_mmu(struct mmu_gather *tlb,
			unsigned long start, unsigned long end, bool force)
{
	if (force)
		tlb->need_flush = 1;
	/*
	 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
	 * tlb->end_addr.
	 */
	ia64_tlb_flush_mmu(tlb, start, end);

	/* keep the page table cache within bounds */
	check_pgt_cache();

	if (tlb->pages != tlb->local)
		free_pages((unsigned long)tlb->pages, 0);
}

/*
 * Logically, this routine frees PAGE.  On MP machines, the actual freeing of the page
 * must be delayed until after the TLB has been flushed (see comments at the beginning of
 * this file).
 */
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
	tlb->need_flush = 1;

	if (!tlb->nr && tlb->pages == tlb->local)
		__tlb_alloc_page(tlb);

	tlb->pages[tlb->nr++] = page;
	VM_WARN_ON(tlb->nr > tlb->max);
	if (tlb->nr == tlb->max)
		return true;
	return false;
}

static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
	ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
}

static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
	ia64_tlb_flush_mmu_free(tlb);
}

static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
	ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
}

static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
	if (__tlb_remove_page(tlb, page))
		tlb_flush_mmu(tlb);
}

static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
					  struct page *page, int page_size)
{
	return __tlb_remove_page(tlb, page);
}

static inline void tlb_remove_page_size(struct mmu_gather *tlb,
					struct page *page, int page_size)
{
	return tlb_remove_page(tlb, page);
}

/*
 * Remove TLB entry for PTE mapped at virtual address ADDRESS.  This is called for any
 * PTE, not just those pointing to (normal) physical memory.
 */
static inline void
__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
{
	if (tlb->start_addr == ~0UL)
		tlb->start_addr = address;
	tlb->end_addr = address + PAGE_SIZE;
}

static inline void
tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
		    unsigned long size)
{
	if (tlb->start_addr > address)
		tlb->start_addr = address;
	if (tlb->end_addr < address + size)
		tlb->end_addr = address + size;
}

#define tlb_migrate_finish(mm)	platform_tlb_migrate_finish(mm)

#define tlb_start_vma(tlb, vma)			do { } while (0)
#define tlb_end_vma(tlb, vma)			do { } while (0)

#define tlb_remove_tlb_entry(tlb, ptep, addr)		\
do {							\
	tlb->need_flush = 1;				\
	__tlb_remove_tlb_entry(tlb, ptep, addr);	\
} while (0)

#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
	tlb_remove_tlb_entry(tlb, ptep, address)

#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
						     unsigned int page_size)
{
}

#define pte_free_tlb(tlb, ptep, address)		\
do {							\
	tlb->need_flush = 1;				\
	__pte_free_tlb(tlb, ptep, address);		\
} while (0)

#define pmd_free_tlb(tlb, ptep, address)		\
do {							\
	tlb->need_flush = 1;				\
	__pmd_free_tlb(tlb, ptep, address);		\
} while (0)

#define pud_free_tlb(tlb, pudp, address)		\
do {							\
	tlb->need_flush = 1;				\
	__pud_free_tlb(tlb, pudp, address);		\
} while (0)

#endif /* _ASM_IA64_TLB_H */

Filemanager

Name Type Size Permission Actions
native Folder 0755
sn Folder 0755
uv Folder 0755
Kbuild File 224 B 0644
acenv.h File 1.27 KB 0644
acpi-ext.h File 590 B 0644
acpi.h File 4.1 KB 0644
agp.h File 857 B 0644
asm-offsets.h File 35 B 0644
asm-prototypes.h File 890 B 0644
asmmacro.h File 3.29 KB 0644
atomic.h File 9.47 KB 0644
barrier.h File 2.36 KB 0644
bitops.h File 10.84 KB 0644
bug.h File 404 B 0644
bugs.h File 436 B 0644
cache.h File 771 B 0644
cacheflush.h File 1.71 KB 0644
checksum.h File 2.1 KB 0644
clocksource.h File 276 B 0644
cpu.h File 456 B 0644
cputime.h File 855 B 0644
current.h File 418 B 0644
cyclone.h File 442 B 0644
delay.h File 1.7 KB 0644
device.h File 323 B 0644
div64.h File 31 B 0644
dma-mapping.h File 1.17 KB 0644
dma.h File 466 B 0644
dmi.h File 343 B 0644
early_ioremap.h File 428 B 0644
elf.h File 9.83 KB 0644
emergency-restart.h File 149 B 0644
esi.h File 887 B 0644
exception.h File 1.13 KB 0644
export.h File 115 B 0644
extable.h File 330 B 0644
fb.h File 569 B 0644
fpswa.h File 1.88 KB 0644
ftrace.h File 748 B 0644
futex.h File 2.56 KB 0644
gcc_intrin.h File 368 B 0644
hardirq.h File 564 B 0644
hpsim.h File 364 B 0644
hugetlb.h File 1.67 KB 0644
hw_irq.h File 6.33 KB 0644
idle.h File 200 B 0644
intrinsics.h File 306 B 0644
io.h File 11.77 KB 0644
iommu.h File 555 B 0644
iommu_table.h File 175 B 0644
iosapic.h File 3.16 KB 0644
irq.h File 1.02 KB 0644
irq_regs.h File 34 B 0644
irq_remapping.h File 142 B 0644
irqflags.h File 2.11 KB 0644
kdebug.h File 1.64 KB 0644
kexec.h File 1.57 KB 0644
kmap_types.h File 260 B 0644
kprobes.h File 3.82 KB 0644
kregs.h File 6.73 KB 0644
libata-portmap.h File 225 B 0644
linkage.h File 398 B 0644
local.h File 31 B 0644
local64.h File 33 B 0644
machvec.h File 12.1 KB 0644
machvec_dig.h File 449 B 0644
machvec_dig_vtd.h File 558 B 0644
machvec_hpsim.h File 544 B 0644
machvec_hpzx1.h File 544 B 0644
machvec_hpzx1_swiotlb.h File 632 B 0644
machvec_init.h File 1.33 KB 0644
machvec_sn2.h File 4.71 KB 0644
machvec_uv.h File 684 B 0644
mca.h File 5.91 KB 0644
mca_asm.h File 7.18 KB 0644
meminit.h File 2.24 KB 0644
mman.h File 432 B 0644
mmu.h File 374 B 0644
mmu_context.h File 5.29 KB 0644
mmzone.h File 1.1 KB 0644
module.h File 1.1 KB 0644
msidef.h File 1.4 KB 0644
nodedata.h File 1.85 KB 0644
numa.h File 2.18 KB 0644
page.h File 6.49 KB 0644
pal.h File 53.39 KB 0644
param.h File 439 B 0644
parport.h File 534 B 0644
patch.h File 1.19 KB 0644
pci.h File 2.83 KB 0644
percpu.h File 1.32 KB 0644
perfmon.h File 4.33 KB 0644
pgalloc.h File 2.84 KB 0644
pgtable.h File 20.92 KB 0644
processor.h File 17.98 KB 0644
ptrace.h File 5.2 KB 0644
rwsem.h File 3.82 KB 0644
sal.h File 26.51 KB 0644
sections.h File 1.35 KB 0644
segment.h File 162 B 0644
serial.h File 446 B 0644
shmparam.h File 445 B 0644
signal.h File 749 B 0644
smp.h File 3.21 KB 0644
sparsemem.h File 621 B 0644
spinlock.h File 6.92 KB 0644
spinlock_types.h File 475 B 0644
string.h File 659 B 0644
swiotlb.h File 344 B 0644
switch_to.h File 2.89 KB 0644
syscall.h File 2.06 KB 0644
termios.h File 1.88 KB 0644
thread_info.h File 4.66 KB 0644
timex.h File 1.47 KB 0644
tlb.h File 8.42 KB 0644
tlbflush.h File 2.33 KB 0644
topology.h File 1.58 KB 0644
types.h File 828 B 0644
uaccess.h File 9.86 KB 0644
unaligned.h File 337 B 0644
uncached.h File 463 B 0644
unistd.h File 1.45 KB 0644
unwind.h File 5.74 KB 0644
user.h File 2.25 KB 0644
ustack.h File 403 B 0644
vga.h File 657 B 0644
xor.h File 1.12 KB 0644