/* include/asm-generic/tlb.h * * Generic TLB shootdown code * * Copyright 2001 Red Hat, Inc. * Based on code from mm/memory.c Copyright Linus Torvalds and others. * * Copyright 2011 Red Hat, Inc., Peter Zijlstra * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #ifndef _ASM_GENERIC__TLB_H #define _ASM_GENERIC__TLB_H #include <linux/mmu_notifier.h> #include <linux/swap.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #ifdef CONFIG_HAVE_RCU_TABLE_FREE /* * Semi RCU freeing of the page directories. * * This is needed by some architectures to implement software pagetable walkers. * * gup_fast() and other software pagetable walkers do a lockless page-table * walk and therefore needs some synchronization with the freeing of the page * directories. The chosen means to accomplish that is by disabling IRQs over * the walk. * * Architectures that use IPIs to flush TLBs will then automagically DTRT, * since we unlink the page, flush TLBs, free the page. Since the disabling of * IRQs delays the completion of the TLB flush we can never observe an already * freed page. * * Architectures that do not have this (PPC) need to delay the freeing by some * other means, this is that means. * * What we do is batch the freed directory pages (tables) and RCU free them. * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling * holds off grace periods. * * However, in order to batch these pages we need to allocate storage, this * allocation is deep inside the MM code and can thus easily fail on memory * pressure. To guarantee progress we fall back to single table freeing, see * the implementation of tlb_remove_table_one(). * */ struct mmu_table_batch { struct rcu_head rcu; unsigned int nr; void *tables[0]; }; #define MAX_TABLE_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) extern void tlb_table_flush(struct mmu_gather *tlb); extern void tlb_remove_table(struct mmu_gather *tlb, void *table); #endif /* * If we can't allocate a page to make a big batch of page pointers * to work on, then just handle a few from the on-stack structure. */ #define MMU_GATHER_BUNDLE 8 struct mmu_gather_batch { struct mmu_gather_batch *next; unsigned int nr; unsigned int max; struct page *pages[0]; }; #define MAX_GATHER_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) /* * Limit the maximum number of mmu_gather batches to reduce a risk of soft * lockups for non-preemptible kernels on huge machines when a lot of memory * is zapped during unmapping. * 10K pages freed at once should be safe even without a preemption point. */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) /* struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. */ struct mmu_gather { struct mm_struct *mm; #ifdef CONFIG_HAVE_RCU_TABLE_FREE struct mmu_table_batch *batch; #endif unsigned long start; unsigned long end; /* we are in the middle of an operation to clear * a full mm and can make some optimizations */ unsigned int fullmm : 1, /* we have performed an operation which * requires a complete flush of the tlb */ need_flush_all : 1; struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; unsigned int batch_count; int page_size; }; #define HAVE_GENERIC_MMU_GATHER void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); void tlb_flush_mmu(struct mmu_gather *tlb); void arch_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end, bool force); void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, unsigned long size); extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address, unsigned int range_size) { tlb->start = min(tlb->start, address); tlb->end = max(tlb->end, address + range_size); } static inline void __tlb_reset_range(struct mmu_gather *tlb) { if (tlb->fullmm) { tlb->start = tlb->end = ~0; } else { tlb->start = TASK_SIZE; tlb->end = 0; } } static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { if (!tlb->end) return; tlb_flush(tlb); mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); __tlb_reset_range(tlb); } static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { if (__tlb_remove_page_size(tlb, page, page_size)) tlb_flush_mmu(tlb); } static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page_size(tlb, page, PAGE_SIZE); } /* tlb_remove_page * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when * required. */ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { return tlb_remove_page_size(tlb, page, PAGE_SIZE); } #ifndef tlb_remove_check_page_size_change #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, unsigned int page_size) { /* * We don't care about page size change, just update * mmu_gather page size here so that debug checks * doesn't throw false warning. */ #ifdef CONFIG_DEBUG_VM tlb->page_size = page_size; #endif } #endif /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, * the vmas are adjusted to only cover the region to be torn down. */ #ifndef tlb_start_vma #define tlb_start_vma(tlb, vma) do { } while (0) #endif #define __tlb_end_vma(tlb, vma) \ do { \ if (!tlb->fullmm) \ tlb_flush_mmu_tlbonly(tlb); \ } while (0) #ifndef tlb_end_vma #define tlb_end_vma __tlb_end_vma #endif #ifndef __tlb_remove_tlb_entry #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #endif /** * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. * * Record the fact that pte's were really unmapped by updating the range, * so we can later optimise away the tlb invalidate. This helps when * userspace is unmapping already-unmapped pages, which happens quite a lot. */ #define tlb_remove_tlb_entry(tlb, ptep, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ do { \ __tlb_adjust_range(tlb, address, huge_page_size(h)); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) /** * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation * This is a nop so far, because only x86 needs it. */ #ifndef __tlb_remove_pmd_tlb_entry #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) #endif #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ do { \ __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ } while (0) /** * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb * invalidation. This is a nop so far, because only x86 needs it. */ #ifndef __tlb_remove_pud_tlb_entry #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) #endif #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ do { \ __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ } while (0) /* * For things like page tables caches (ie caching addresses "inside" the * page tables, like x86 does), for legacy reasons, flushing an * individual page had better flush the page table caches behind it. This * is definitely how x86 works, for example. And if you have an * architected non-legacy page table cache (which I'm not aware of * anybody actually doing), you're going to have some architecturally * explicit flushing for that, likely *separate* from a regular TLB entry * flush, and thus you'd need more than just some range expansion.. * * So if we ever find an architecture * that would want something that odd, I think it is up to that * architecture to do its own odd thing, not cause pain for others * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com * * For now w.r.t page table cache, mark the range_size as PAGE_SIZE */ #define pte_free_tlb(tlb, ptep, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __pte_free_tlb(tlb, ptep, address); \ } while (0) #define pmd_free_tlb(tlb, pmdp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __pmd_free_tlb(tlb, pmdp, address); \ } while (0) #ifndef __ARCH_HAS_4LEVEL_HACK #define pud_free_tlb(tlb, pudp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif #ifndef __ARCH_HAS_5LEVEL_HACK #define p4d_free_tlb(tlb, pudp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __p4d_free_tlb(tlb, pudp, address); \ } while (0) #endif #define tlb_migrate_finish(mm) do {} while (0) #endif /* _ASM_GENERIC__TLB_H */
Name | Type | Size | Permission | Actions |
---|---|---|---|---|
bitops | Folder | 0755 |
|
|
4level-fixup.h | File | 1.07 KB | 0644 |
|
5level-fixup.h | File | 1.15 KB | 0644 |
|
asm-offsets.h | File | 35 B | 0644 |
|
asm-prototypes.h | File | 507 B | 0644 |
|
atomic-long.h | File | 7.09 KB | 0644 |
|
atomic.h | File | 5.11 KB | 0644 |
|
atomic64.h | File | 2.26 KB | 0644 |
|
audit_change_attr.h | File | 484 B | 0644 |
|
audit_dir_write.h | File | 500 B | 0644 |
|
audit_read.h | File | 241 B | 0644 |
|
audit_signal.h | File | 36 B | 0644 |
|
audit_write.h | File | 461 B | 0644 |
|
barrier.h | File | 6.01 KB | 0644 |
|
bitops.h | File | 1.09 KB | 0644 |
|
bitsperlong.h | File | 592 B | 0644 |
|
bug.h | File | 6.6 KB | 0644 |
|
bugs.h | File | 267 B | 0644 |
|
cache.h | File | 384 B | 0644 |
|
cacheflush.h | File | 1.31 KB | 0644 |
|
checksum.h | File | 2.27 KB | 0644 |
|
clkdev.h | File | 706 B | 0644 |
|
cmpxchg-local.h | File | 1.43 KB | 0644 |
|
cmpxchg.h | File | 2.23 KB | 0644 |
|
current.h | File | 256 B | 0644 |
|
delay.h | File | 1.13 KB | 0644 |
|
device.h | File | 245 B | 0644 |
|
div64.h | File | 7.28 KB | 0644 |
|
dma-contiguous.h | File | 238 B | 0644 |
|
dma.h | File | 553 B | 0644 |
|
early_ioremap.h | File | 1.66 KB | 0644 |
|
emergency-restart.h | File | 248 B | 0644 |
|
exec.h | File | 697 B | 0644 |
|
export.h | File | 2.2 KB | 0644 |
|
extable.h | File | 802 B | 0644 |
|
fb.h | File | 271 B | 0644 |
|
fixmap.h | File | 2.84 KB | 0644 |
|
ftrace.h | File | 460 B | 0644 |
|
futex.h | File | 2.88 KB | 0644 |
|
getorder.h | File | 1.22 KB | 0644 |
|
gpio.h | File | 4.45 KB | 0644 |
|
hardirq.h | File | 532 B | 0644 |
|
hugetlb.h | File | 845 B | 0644 |
|
hw_irq.h | File | 270 B | 0644 |
|
ide_iops.h | File | 791 B | 0644 |
|
int-ll64.h | File | 932 B | 0644 |
|
io.h | File | 20.39 KB | 0644 |
|
ioctl.h | File | 506 B | 0644 |
|
iomap.h | File | 3.16 KB | 0644 |
|
irq.h | File | 403 B | 0644 |
|
irq_regs.h | File | 980 B | 0644 |
|
irq_work.h | File | 194 B | 0644 |
|
irqflags.h | File | 1.51 KB | 0644 |
|
kdebug.h | File | 182 B | 0644 |
|
kmap_types.h | File | 198 B | 0644 |
|
kprobes.h | File | 868 B | 0644 |
|
kvm_para.h | File | 480 B | 0644 |
|
linkage.h | File | 225 B | 0644 |
|
local.h | File | 2.23 KB | 0644 |
|
local64.h | File | 3.8 KB | 0644 |
|
mcs_spinlock.h | File | 260 B | 0644 |
|
memory_model.h | File | 2.16 KB | 0644 |
|
mm-arch-hooks.h | File | 388 B | 0644 |
|
mm_hooks.h | File | 885 B | 0644 |
|
mmu.h | File | 449 B | 0644 |
|
mmu_context.h | File | 881 B | 0644 |
|
module.h | File | 1.09 KB | 0644 |
|
msi.h | File | 838 B | 0644 |
|
page.h | File | 2.4 KB | 0644 |
|
param.h | File | 367 B | 0644 |
|
parport.h | File | 604 B | 0644 |
|
pci.h | File | 581 B | 0644 |
|
pci_iomap.h | File | 1.96 KB | 0644 |
|
percpu.h | File | 12.72 KB | 0644 |
|
pgalloc.h | File | 342 B | 0644 |
|
pgtable-nop4d-hack.h | File | 1.86 KB | 0644 |
|
pgtable-nop4d.h | File | 1.64 KB | 0644 |
|
pgtable-nopmd.h | File | 1.9 KB | 0644 |
|
pgtable-nopud.h | File | 1.97 KB | 0644 |
|
pgtable.h | File | 29.5 KB | 0644 |
|
preempt.h | File | 1.94 KB | 0644 |
|
ptrace.h | File | 1.58 KB | 0644 |
|
qrwlock.h | File | 3.78 KB | 0644 |
|
qrwlock_types.h | File | 655 B | 0644 |
|
qspinlock.h | File | 3.67 KB | 0644 |
|
qspinlock_types.h | File | 2.77 KB | 0644 |
|
resource.h | File | 1.07 KB | 0644 |
|
rwsem.h | File | 3.21 KB | 0644 |
|
seccomp.h | File | 1.3 KB | 0644 |
|
sections.h | File | 4.8 KB | 0644 |
|
segment.h | File | 249 B | 0644 |
|
serial.h | File | 345 B | 0644 |
|
set_memory.h | File | 362 B | 0644 |
|
signal.h | File | 308 B | 0644 |
|
simd.h | File | 436 B | 0644 |
|
sizes.h | File | 78 B | 0644 |
|
spinlock.h | File | 329 B | 0644 |
|
statfs.h | File | 169 B | 0644 |
|
string.h | File | 281 B | 0644 |
|
switch_to.h | File | 992 B | 0644 |
|
syscall.h | File | 6.18 KB | 0644 |
|
syscalls.h | File | 739 B | 0644 |
|
termios-base.h | File | 2.11 KB | 0644 |
|
termios.h | File | 2.81 KB | 0644 |
|
timex.h | File | 508 B | 0644 |
|
tlb.h | File | 9.4 KB | 0644 |
|
tlbflush.h | File | 485 B | 0644 |
|
topology.h | File | 2.18 KB | 0644 |
|
trace_clock.h | File | 391 B | 0644 |
|
uaccess.h | File | 5.22 KB | 0644 |
|
unaligned.h | File | 1.05 KB | 0644 |
|
unistd.h | File | 318 B | 0644 |
|
user.h | File | 242 B | 0644 |
|
vga.h | File | 587 B | 0644 |
|
vmlinux.lds.h | File | 29.17 KB | 0644 |
|
vtime.h | File | 52 B | 0644 |
|
word-at-a-time.h | File | 2.75 KB | 0644 |
|
xor.h | File | 13.63 KB | 0644 |
|