404

[ Avaa Bypassed ]




Upload:

Command:

botdev@18.217.142.228: ~ $
/*
 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * vineetg: May 2011
 *  -Refactored get_new_mmu_context( ) to only handle live-mm.
 *   retiring-mm handled in other hooks
 *
 * Vineetg: March 25th, 2008: Bug #92690
 *  -Major rewrite of Core ASID allocation routine get_new_mmu_context
 *
 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
 */

#ifndef _ASM_ARC_MMU_CONTEXT_H
#define _ASM_ARC_MMU_CONTEXT_H

#include <asm/arcregs.h>
#include <asm/tlb.h>
#include <linux/sched/mm.h>

#include <asm-generic/mm_hooks.h>

/*		ARC700 ASID Management
 *
 * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
 * with same vaddr (different tasks) to co-exit. This provides for
 * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
 *
 * Linux assigns each task a unique ASID. A simple round-robin allocation
 * of H/w ASID is done using software tracker @asid_cpu.
 * When it reaches max 255, the allocation cycle starts afresh by flushing
 * the entire TLB and wrapping ASID back to zero.
 *
 * A new allocation cycle, post rollover, could potentially reassign an ASID
 * to a different task. Thus the rule is to refresh the ASID in a new cycle.
 * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits
 * serve as cycle/generation indicator and natural 32 bit unsigned math
 * automagically increments the generation when lower 8 bits rollover.
 */

#define MM_CTXT_ASID_MASK	0x000000ff /* MMU PID reg :8 bit PID */
#define MM_CTXT_CYCLE_MASK	(~MM_CTXT_ASID_MASK)

#define MM_CTXT_FIRST_CYCLE	(MM_CTXT_ASID_MASK + 1)
#define MM_CTXT_NO_ASID		0UL

#define asid_mm(mm, cpu)	mm->context.asid[cpu]
#define hw_pid(mm, cpu)		(asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)

DECLARE_PER_CPU(unsigned int, asid_cache);
#define asid_cpu(cpu)		per_cpu(asid_cache, cpu)

/*
 * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
 * Also set the MMU PID register to existing/updated ASID
 */
static inline void get_new_mmu_context(struct mm_struct *mm)
{
	const unsigned int cpu = smp_processor_id();
	unsigned long flags;

	local_irq_save(flags);

	/*
	 * Move to new ASID if it was not from current alloc-cycle/generation.
	 * This is done by ensuring that the generation bits in both mm->ASID
	 * and cpu's ASID counter are exactly same.
	 *
	 * Note: Callers needing new ASID unconditionally, independent of
	 * 	 generation, e.g. local_flush_tlb_mm() for forking  parent,
	 * 	 first need to destroy the context, setting it to invalid
	 * 	 value.
	 */
	if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
		goto set_hw;

	/* move to new ASID and handle rollover */
	if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {

		local_flush_tlb_all();

		/*
		 * Above check for rollover of 8 bit ASID in 32 bit container.
		 * If the container itself wrapped around, set it to a non zero
		 * "generation" to distinguish from no context
		 */
		if (!asid_cpu(cpu))
			asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;
	}

	/* Assign new ASID to tsk */
	asid_mm(mm, cpu) = asid_cpu(cpu);

set_hw:
	write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE);

	local_irq_restore(flags);
}

/*
 * Initialize the context related info for a new mm_struct
 * instance.
 */
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	int i;

	for_each_possible_cpu(i)
		asid_mm(mm, i) = MM_CTXT_NO_ASID;

	return 0;
}

static inline void destroy_context(struct mm_struct *mm)
{
	unsigned long flags;

	/* Needed to elide CONFIG_DEBUG_PREEMPT warning */
	local_irq_save(flags);
	asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID;
	local_irq_restore(flags);
}

/* Prepare the MMU for task: setup PID reg with allocated ASID
    If task doesn't have an ASID (never alloc or stolen, get a new ASID)
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
			     struct task_struct *tsk)
{
	const int cpu = smp_processor_id();

	/*
	 * Note that the mm_cpumask is "aggregating" only, we don't clear it
	 * for the switched-out task, unlike some other arches.
	 * It is used to enlist cpus for sending TLB flush IPIs and not sending
	 * it to CPUs where a task once ran-on, could cause stale TLB entry
	 * re-use, specially for a multi-threaded task.
	 * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.
	 *      For a non-aggregating mm_cpumask, IPI not sent C1, and if T1
	 *      were to re-migrate to C1, it could access the unmapped region
	 *      via any existing stale TLB entries.
	 */
	cpumask_set_cpu(cpu, mm_cpumask(next));

#ifndef CONFIG_SMP
	/* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
	write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
#endif

	get_new_mmu_context(next);
}

/*
 * Called at the time of execve() to get a new ASID
 * Note the subtlety here: get_new_mmu_context() behaves differently here
 * vs. in switch_mm(). Here it always returns a new ASID, because mm has
 * an unallocated "initial" value, while in latter, it moves to a new ASID,
 * only if it was unallocated
 */
#define activate_mm(prev, next)		switch_mm(prev, next, NULL)

/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
 * for retiring-mm. However destroy_context( ) still needs to do that because
 * between mm_release( ) = >deactive_mm( ) and
 * mmput => .. => __mmdrop( ) => destroy_context( )
 * there is a good chance that task gets sched-out/in, making it's ASID valid
 * again (this teased me for a whole day).
 */
#define deactivate_mm(tsk, mm)   do { } while (0)

#define enter_lazy_tlb(mm, tsk)

#endif /* __ASM_ARC_MMU_CONTEXT_H */

Filemanager

Name Type Size Permission Actions
Kbuild File 681 B 0644
arcregs.h File 8.59 KB 0644
asm-offsets.h File 311 B 0644
atomic.h File 15.14 KB 0644
barrier.h File 1.75 KB 0644
bitops.h File 9.81 KB 0644
bug.h File 938 B 0644
cache.h File 3.77 KB 0644
cacheflush.h File 3.88 KB 0644
checksum.h File 2.45 KB 0644
cmpxchg.h File 5.4 KB 0644
current.h File 695 B 0644
delay.h File 1.99 KB 0644
disasm.h File 3.87 KB 0644
dma-mapping.h File 734 B 0644
dma.h File 459 B 0644
dwarf.h File 892 B 0644
elf.h File 2.15 KB 0644
entry-arcv2.h File 4.85 KB 0644
entry-compact.h File 9.29 KB 0644
entry.h File 6.73 KB 0644
exec.h File 410 B 0644
fb.h File 411 B 0644
futex.h File 3.67 KB 0644
highmem.h File 1.46 KB 0644
hugepage.h File 2.41 KB 0644
io.h File 6.42 KB 0644
irq.h File 825 B 0644
irqflags-arcv2.h File 3.45 KB 0644
irqflags-compact.h File 4.25 KB 0644
irqflags.h File 509 B 0644
kdebug.h File 400 B 0644
kgdb.h File 1.35 KB 0644
kmap_types.h File 489 B 0644
kprobes.h File 1.37 KB 0644
linkage.h File 1.42 KB 0644
mach_desc.h File 2.06 KB 0644
mmu.h File 2.44 KB 0644
mmu_context.h File 5.67 KB 0644
mmzone.h File 989 B 0644
module.h File 661 B 0644
page.h File 2.99 KB 0644
pci.h File 705 B 0644
perf_event.h File 6.86 KB 0644
pgalloc.h File 3.79 KB 0644
pgtable.h File 14.2 KB 0644
processor.h File 4.69 KB 0644
ptrace.h File 3.87 KB 0644
sections.h File 407 B 0644
segment.h File 612 B 0644
serial.h File 644 B 0644
setup.h File 1.18 KB 0644
shmparam.h File 442 B 0644
smp.h File 4.25 KB 0644
spinlock.h File 8.79 KB 0644
spinlock_types.h File 1.03 KB 0644
stacktrace.h File 1.29 KB 0644
string.h File 1.15 KB 0644
switch_to.h File 1.17 KB 0644
syscall.h File 1.57 KB 0644
syscalls.h File 653 B 0644
thread_info.h File 3.39 KB 0644
timex.h File 508 B 0644
tlb-mmu1.h File 3.48 KB 0644
tlb.h File 1.23 KB 0644
tlbflush.h File 1.76 KB 0644
uaccess.h File 18.45 KB 0644
unaligned.h File 771 B 0644
unwind.h File 3.51 KB 0644