404

[ Avaa Bypassed ]




Upload:

Command:

botdev@3.17.156.168: ~ $
/*
 * Copyright 2010 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 *
 * Do not include directly; use <linux/atomic.h>.
 */

#ifndef _ASM_TILE_ATOMIC_32_H
#define _ASM_TILE_ATOMIC_32_H

#include <asm/barrier.h>
#include <arch/chip.h>

#ifndef __ASSEMBLY__

/**
 * atomic_add - add integer to atomic variable
 * @i: integer value to add
 * @v: pointer of type atomic_t
 *
 * Atomically adds @i to @v.
 */
static inline void atomic_add(int i, atomic_t *v)
{
	_atomic_xchg_add(&v->counter, i);
}

#define ATOMIC_OPS(op)							\
unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	_atomic_fetch_##op((unsigned long *)&v->counter, i);		\
}									\
static inline int atomic_fetch_##op(int i, atomic_t *v)			\
{									\
	smp_mb();							\
	return _atomic_fetch_##op((unsigned long *)&v->counter, i);	\
}

ATOMIC_OPS(and)
ATOMIC_OPS(or)
ATOMIC_OPS(xor)

#undef ATOMIC_OPS

static inline int atomic_fetch_add(int i, atomic_t *v)
{
	smp_mb();
	return _atomic_xchg_add(&v->counter, i);
}

/**
 * atomic_add_return - add integer and return
 * @v: pointer of type atomic_t
 * @i: integer value to add
 *
 * Atomically adds @i to @v and returns @i + @v
 */
static inline int atomic_add_return(int i, atomic_t *v)
{
	smp_mb();  /* barrier for proper semantics */
	return _atomic_xchg_add(&v->counter, i) + i;
}

/**
 * __atomic_add_unless - add unless the number is already a given value
 * @v: pointer of type atomic_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as @v was not already @u.
 * Returns the old value of @v.
 */
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
	smp_mb();  /* barrier for proper semantics */
	return _atomic_xchg_add_unless(&v->counter, a, u);
}

/**
 * atomic_set - set atomic variable
 * @v: pointer of type atomic_t
 * @i: required value
 *
 * Atomically sets the value of @v to @i.
 *
 * atomic_set() can't be just a raw store, since it would be lost if it
 * fell between the load and store of one of the other atomic ops.
 */
static inline void atomic_set(atomic_t *v, int n)
{
	_atomic_xchg(&v->counter, n);
}

#define atomic_set_release(v, i)	atomic_set((v), (i))

/* A 64bit atomic type */

typedef struct {
	long long counter;
} atomic64_t;

#define ATOMIC64_INIT(val) { (val) }

/**
 * atomic64_read - read atomic variable
 * @v: pointer of type atomic64_t
 *
 * Atomically reads the value of @v.
 */
static inline long long atomic64_read(const atomic64_t *v)
{
	/*
	 * Requires an atomic op to read both 32-bit parts consistently.
	 * Casting away const is safe since the atomic support routines
	 * do not write to memory if the value has not been modified.
	 */
	return _atomic64_xchg_add((long long *)&v->counter, 0);
}

/**
 * atomic64_add - add integer to atomic variable
 * @i: integer value to add
 * @v: pointer of type atomic64_t
 *
 * Atomically adds @i to @v.
 */
static inline void atomic64_add(long long i, atomic64_t *v)
{
	_atomic64_xchg_add(&v->counter, i);
}

#define ATOMIC64_OPS(op)					\
long long _atomic64_fetch_##op(long long *v, long long n);	\
static inline void atomic64_##op(long long i, atomic64_t *v)	\
{								\
	_atomic64_fetch_##op(&v->counter, i);			\
}								\
static inline long long atomic64_fetch_##op(long long i, atomic64_t *v)	\
{								\
	smp_mb();						\
	return _atomic64_fetch_##op(&v->counter, i);		\
}

ATOMIC64_OPS(and)
ATOMIC64_OPS(or)
ATOMIC64_OPS(xor)

#undef ATOMIC64_OPS

static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
{
	smp_mb();
	return _atomic64_xchg_add(&v->counter, i);
}

/**
 * atomic64_add_return - add integer and return
 * @v: pointer of type atomic64_t
 * @i: integer value to add
 *
 * Atomically adds @i to @v and returns @i + @v
 */
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
	smp_mb();  /* barrier for proper semantics */
	return _atomic64_xchg_add(&v->counter, i) + i;
}

/**
 * atomic64_add_unless - add unless the number is already a given value
 * @v: pointer of type atomic64_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as @v was not already @u.
 * Returns non-zero if @v was not @u, and zero otherwise.
 */
static inline long long atomic64_add_unless(atomic64_t *v, long long a,
					long long u)
{
	smp_mb();  /* barrier for proper semantics */
	return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
}

/**
 * atomic64_set - set atomic variable
 * @v: pointer of type atomic64_t
 * @i: required value
 *
 * Atomically sets the value of @v to @i.
 *
 * atomic64_set() can't be just a raw store, since it would be lost if it
 * fell between the load and store of one of the other atomic ops.
 */
static inline void atomic64_set(atomic64_t *v, long long n)
{
	_atomic64_xchg(&v->counter, n);
}

#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v)			atomic64_add(1LL, (v))
#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
#define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
#define atomic64_fetch_sub(i, v)	atomic64_fetch_add(-(i), (v))
#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
#define atomic64_sub(i, v)		atomic64_add(-(i), (v))
#define atomic64_dec(v)			atomic64_sub(1LL, (v))
#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)

#endif /* !__ASSEMBLY__ */

/*
 * Internal definitions only beyond this point.
 */

/*
 * Number of atomic locks in atomic_locks[]. Must be a power of two.
 * There is no reason for more than PAGE_SIZE / 8 entries, since that
 * is the maximum number of pointer bits we can use to index this.
 * And we cannot have more than PAGE_SIZE / 4, since this has to
 * fit on a single page and each entry takes 4 bytes.
 */
#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)

#ifndef __ASSEMBLY__
extern int atomic_locks[];
#endif

/*
 * All the code that may fault while holding an atomic lock must
 * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
 * can correctly release and reacquire the lock.  Note that we
 * mention the register number in a comment in "lib/atomic_asm.S" to help
 * assembly coders from using this register by mistake, so if it
 * is changed here, change that comment as well.
 */
#define ATOMIC_LOCK_REG 20
#define ATOMIC_LOCK_REG_NAME r20

#ifndef __ASSEMBLY__
/* Called from setup to initialize a hash table to point to per_cpu locks. */
void __init_atomic_per_cpu(void);

#ifdef CONFIG_SMP
/* Support releasing the atomic lock in do_page_fault_ics(). */
void __atomic_fault_unlock(int *lock_ptr);
#endif

/* Return a pointer to the lock for the given address. */
int *__atomic_hashed_lock(volatile void *v);

/* Private helper routines in lib/atomic_asm_32.S */
struct __get_user {
	unsigned long val;
	int err;
};
extern struct __get_user __atomic32_cmpxchg(volatile int *p,
					  int *lock, int o, int n);
extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n);
extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n);
extern struct __get_user __atomic32_xchg_add_unless(volatile int *p,
						  int *lock, int o, int n);
extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n);
extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n);
extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n);
extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n);
extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
					long long o, long long n);
extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
					long long n);
extern long long __atomic64_xchg_add_unless(volatile long long *p,
					int *lock, long long o, long long n);
extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n);
extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n);
extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n);

/* Return failure from the atomic wrappers. */
struct __get_user __atomic_bad_address(int __user *addr);

#endif /* !__ASSEMBLY__ */

#endif /* _ASM_TILE_ATOMIC_32_H */

Filemanager

Name Type Size Permission Actions
Kbuild File 439 B 0644
asm-offsets.h File 35 B 0644
atomic.h File 5.14 KB 0644
atomic_32.h File 8.91 KB 0644
atomic_64.h File 5.51 KB 0644
backtrace.h File 3.98 KB 0644
barrier.h File 2.89 KB 0644
bitops.h File 2.37 KB 0644
bitops_32.h File 4.01 KB 0644
bitops_64.h File 2.67 KB 0644
cache.h File 2.6 KB 0644
cacheflush.h File 4.94 KB 0644
checksum.h File 1.23 KB 0644
cmpxchg.h File 3.47 KB 0644
compat.h File 7.38 KB 0644
current.h File 947 B 0644
delay.h File 1.1 KB 0644
device.h File 978 B 0644
div64.h File 319 B 0644
dma-mapping.h File 1.82 KB 0644
dma.h File 762 B 0644
elf.h File 5.42 KB 0644
fixmap.h File 2.73 KB 0644
ftrace.h File 1.08 KB 0644
futex.h File 4.23 KB 0644
hardirq.h File 1.28 KB 0644
hardwall.h File 1.08 KB 0644
highmem.h File 2.08 KB 0644
homecache.h File 4.21 KB 0644
hugetlb.h File 3.04 KB 0644
hv_driver.h File 1.92 KB 0644
ide.h File 758 B 0644
insn.h File 1.79 KB 0644
io.h File 12.43 KB 0644
irq.h File 3.11 KB 0644
irq_work.h File 283 B 0644
irqflags.h File 10.55 KB 0644
jump_label.h File 1.47 KB 0644
kdebug.h File 769 B 0644
kexec.h File 2.23 KB 0644
kgdb.h File 1.99 KB 0644
kmap_types.h File 1.02 KB 0644
kprobes.h File 2.19 KB 0644
linkage.h File 1.46 KB 0644
mmu.h File 965 B 0644
mmu_context.h File 4.5 KB 0644
mmzone.h File 2.07 KB 0644
module.h File 1.12 KB 0644
page.h File 10.32 KB 0644
pci.h File 6.58 KB 0644
percpu.h File 1.79 KB 0644
perf_event.h File 766 B 0644
pgalloc.h File 4.76 KB 0644
pgtable.h File 15.66 KB 0644
pgtable_32.h File 4.08 KB 0644
pgtable_64.h File 5.11 KB 0644
pmc.h File 2.15 KB 0644
processor.h File 10.62 KB 0644
ptrace.h File 2.96 KB 0644
sections.h File 1.37 KB 0644
setup.h File 1.63 KB 0644
sigframe.h File 956 B 0644
signal.h File 1.07 KB 0644
smp.h File 3.98 KB 0644
spinlock.h File 741 B 0644
spinlock_32.h File 2.9 KB 0644
spinlock_64.h File 3.88 KB 0644
spinlock_types.h File 1.58 KB 0644
stack.h File 2.59 KB 0644
string.h File 1.19 KB 0644
switch_to.h File 2.75 KB 0644
syscall.h File 2.88 KB 0644
syscalls.h File 2.35 KB 0644
thread_info.h File 5.54 KB 0644
tile-desc.h File 650 B 0644
tile-desc_32.h File 12.54 KB 0644
tile-desc_64.h File 10.83 KB 0644
timex.h File 1.71 KB 0644
tlb.h File 878 B 0644
tlbflush.h File 3.99 KB 0644
topology.h File 1.52 KB 0644
traps.h File 2.44 KB 0644
uaccess.h File 12.77 KB 0644
unaligned.h File 1.56 KB 0644
unistd.h File 777 B 0644
user.h File 717 B 0644
vdso.h File 1.84 KB 0644
vga.h File 1.05 KB 0644
word-at-a-time.h File 1.07 KB 0644