/* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __ASM_ARC_CMPXCHG_H #define __ASM_ARC_CMPXCHG_H #include <linux/types.h> #include <asm/barrier.h> #include <asm/smp.h> #ifdef CONFIG_ARC_HAS_LLSC static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) { unsigned long prev; /* * Explicit full memory barrier needed before/after as * LLOCK/SCOND thmeselves don't provide any such semantics */ smp_mb(); __asm__ __volatile__( "1: llock %0, [%1] \n" " brne %0, %2, 2f \n" " scond %3, [%1] \n" " bnz 1b \n" "2: \n" : "=&r"(prev) /* Early clobber, to prevent reg reuse */ : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */ "ir"(expected), "r"(new) /* can't be "ir". scond can't take LIMM for "b" */ : "cc", "memory"); /* so that gcc knows memory is being written here */ smp_mb(); return prev; } #elif !defined(CONFIG_ARC_PLAT_EZNPS) static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) { unsigned long flags; int prev; volatile unsigned long *p = ptr; /* * spin lock/unlock provide the needed smp_mb() before/after */ atomic_ops_lock(flags); prev = *p; if (prev == expected) *p = new; atomic_ops_unlock(flags); return prev; } #else /* CONFIG_ARC_PLAT_EZNPS */ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) { /* * Explicit full memory barrier needed before/after */ smp_mb(); write_aux_reg(CTOP_AUX_GPA1, expected); __asm__ __volatile__( " mov r2, %0\n" " mov r3, %1\n" " .word %2\n" " mov %0, r2" : "+r"(new) : "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3) : "r2", "r3", "memory"); smp_mb(); return new; } #endif /* CONFIG_ARC_HAS_LLSC */ #define cmpxchg(ptr, o, n) ({ \ (typeof(*(ptr)))__cmpxchg((ptr), \ (unsigned long)(o), \ (unsigned long)(n)); \ }) /* * atomic_cmpxchg is same as cmpxchg * LLSC: only different in data-type, semantics are exactly same * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee * semantics, and this lock also happens to be used by atomic_*() */ #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #ifndef CONFIG_ARC_PLAT_EZNPS /* * xchg (reg with memory) based on "Native atomic" EX insn */ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, int size) { extern unsigned long __xchg_bad_pointer(void); switch (size) { case 4: smp_mb(); __asm__ __volatile__( " ex %0, [%1] \n" : "+r"(val) : "r"(ptr) : "memory"); smp_mb(); return val; } return __xchg_bad_pointer(); } #define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \ sizeof(*(ptr)))) /* * xchg() maps directly to ARC EX instruction which guarantees atomicity. * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock * due to a subtle reason: * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h) * Hence xchg() needs to follow same locking rules. * * Technically the lock is also needed for UP (boils down to irq save/restore) * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg() * Other way around, xchg is one instruction anyways, so can't be interrupted * as such */ #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP) #define xchg(ptr, with) \ ({ \ unsigned long flags; \ typeof(*(ptr)) old_val; \ \ atomic_ops_lock(flags); \ old_val = _xchg(ptr, with); \ atomic_ops_unlock(flags); \ old_val; \ }) #else #define xchg(ptr, with) _xchg(ptr, with) #endif #else /* CONFIG_ARC_PLAT_EZNPS */ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, int size) { extern unsigned long __xchg_bad_pointer(void); switch (size) { case 4: /* * Explicit full memory barrier needed before/after */ smp_mb(); __asm__ __volatile__( " mov r2, %0\n" " mov r3, %1\n" " .word %2\n" " mov %0, r2\n" : "+r"(val) : "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3) : "r2", "r3", "memory"); smp_mb(); return val; } return __xchg_bad_pointer(); } #define xchg(ptr, with) ({ \ (typeof(*(ptr)))__xchg((unsigned long)(with), \ (ptr), \ sizeof(*(ptr))); \ }) #endif /* CONFIG_ARC_PLAT_EZNPS */ /* * "atomic" variant of xchg() * REQ: It needs to follow the same serialization rules as other atomic_xxx() * Since xchg() doesn't always do that, it would seem that following defintion * is incorrect. But here's the rationale: * SMP : Even xchg() takes the atomic_ops_lock, so OK. * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC * is natively "SMP safe", no serialization required). * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg() * could clobber them. atomic_xchg() itself would be 1 insn, so it * can't be clobbered by others. Thus no serialization required when * atomic_xchg is involved. */ #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #endif
Name | Type | Size | Permission | Actions |
---|---|---|---|---|
Kbuild | File | 681 B | 0644 |
|
arcregs.h | File | 8.59 KB | 0644 |
|
asm-offsets.h | File | 311 B | 0644 |
|
atomic.h | File | 15.14 KB | 0644 |
|
barrier.h | File | 1.75 KB | 0644 |
|
bitops.h | File | 9.81 KB | 0644 |
|
bug.h | File | 938 B | 0644 |
|
cache.h | File | 3.77 KB | 0644 |
|
cacheflush.h | File | 3.88 KB | 0644 |
|
checksum.h | File | 2.45 KB | 0644 |
|
cmpxchg.h | File | 5.4 KB | 0644 |
|
current.h | File | 695 B | 0644 |
|
delay.h | File | 1.99 KB | 0644 |
|
disasm.h | File | 3.87 KB | 0644 |
|
dma-mapping.h | File | 734 B | 0644 |
|
dma.h | File | 459 B | 0644 |
|
dwarf.h | File | 892 B | 0644 |
|
elf.h | File | 2.15 KB | 0644 |
|
entry-arcv2.h | File | 4.85 KB | 0644 |
|
entry-compact.h | File | 9.29 KB | 0644 |
|
entry.h | File | 6.73 KB | 0644 |
|
exec.h | File | 410 B | 0644 |
|
fb.h | File | 411 B | 0644 |
|
futex.h | File | 3.67 KB | 0644 |
|
highmem.h | File | 1.46 KB | 0644 |
|
hugepage.h | File | 2.41 KB | 0644 |
|
io.h | File | 6.42 KB | 0644 |
|
irq.h | File | 825 B | 0644 |
|
irqflags-arcv2.h | File | 3.45 KB | 0644 |
|
irqflags-compact.h | File | 4.25 KB | 0644 |
|
irqflags.h | File | 509 B | 0644 |
|
kdebug.h | File | 400 B | 0644 |
|
kgdb.h | File | 1.35 KB | 0644 |
|
kmap_types.h | File | 489 B | 0644 |
|
kprobes.h | File | 1.37 KB | 0644 |
|
linkage.h | File | 1.42 KB | 0644 |
|
mach_desc.h | File | 2.06 KB | 0644 |
|
mmu.h | File | 2.44 KB | 0644 |
|
mmu_context.h | File | 5.67 KB | 0644 |
|
mmzone.h | File | 989 B | 0644 |
|
module.h | File | 661 B | 0644 |
|
page.h | File | 2.99 KB | 0644 |
|
pci.h | File | 705 B | 0644 |
|
perf_event.h | File | 6.86 KB | 0644 |
|
pgalloc.h | File | 3.79 KB | 0644 |
|
pgtable.h | File | 14.2 KB | 0644 |
|
processor.h | File | 4.69 KB | 0644 |
|
ptrace.h | File | 3.87 KB | 0644 |
|
sections.h | File | 407 B | 0644 |
|
segment.h | File | 612 B | 0644 |
|
serial.h | File | 644 B | 0644 |
|
setup.h | File | 1.18 KB | 0644 |
|
shmparam.h | File | 442 B | 0644 |
|
smp.h | File | 4.25 KB | 0644 |
|
spinlock.h | File | 8.79 KB | 0644 |
|
spinlock_types.h | File | 1.03 KB | 0644 |
|
stacktrace.h | File | 1.29 KB | 0644 |
|
string.h | File | 1.15 KB | 0644 |
|
switch_to.h | File | 1.17 KB | 0644 |
|
syscall.h | File | 1.57 KB | 0644 |
|
syscalls.h | File | 653 B | 0644 |
|
thread_info.h | File | 3.39 KB | 0644 |
|
timex.h | File | 508 B | 0644 |
|
tlb-mmu1.h | File | 3.48 KB | 0644 |
|
tlb.h | File | 1.23 KB | 0644 |
|
tlbflush.h | File | 1.76 KB | 0644 |
|
uaccess.h | File | 18.45 KB | 0644 |
|
unaligned.h | File | 771 B | 0644 |
|
unwind.h | File | 3.51 KB | 0644 |
|