404

[ Avaa Bypassed ]




Upload:

Command:

botdev@13.59.219.20: ~ $
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_BITOPS_H
#define _ASM_IA64_BITOPS_H

/*
 * Copyright (C) 1998-2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
 * O(1) scheduler patch
 */

#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif

#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/intrinsics.h>
#include <asm/barrier.h>

/**
 * set_bit - Atomically set a bit in memory
 * @nr: the bit to set
 * @addr: the address to start counting from
 *
 * This function is atomic and may not be reordered.  See __set_bit()
 * if you do not require the atomic guarantees.
 * Note that @nr may be almost arbitrarily large; this function is not
 * restricted to acting on a single-word quantity.
 *
 * The address must be (at least) "long" aligned.
 * Note that there are driver (e.g., eepro100) which use these operations to
 * operate on hw-defined data-structures, so we can't easily change these
 * operations to force a bigger alignment.
 *
 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
 */
static __inline__ void
set_bit (int nr, volatile void *addr)
{
	__u32 bit, old, new;
	volatile __u32 *m;
	CMPXCHG_BUGCHECK_DECL

	m = (volatile __u32 *) addr + (nr >> 5);
	bit = 1 << (nr & 31);
	do {
		CMPXCHG_BUGCHECK(m);
		old = *m;
		new = old | bit;
	} while (cmpxchg_acq(m, old, new) != old);
}

/**
 * __set_bit - Set a bit in memory
 * @nr: the bit to set
 * @addr: the address to start counting from
 *
 * Unlike set_bit(), this function is non-atomic and may be reordered.
 * If it's called on the same region of memory simultaneously, the effect
 * may be that only one operation succeeds.
 */
static __inline__ void
__set_bit (int nr, volatile void *addr)
{
	*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
}

/**
 * clear_bit - Clears a bit in memory
 * @nr: Bit to clear
 * @addr: Address to start counting from
 *
 * clear_bit() is atomic and may not be reordered.  However, it does
 * not contain a memory barrier, so if it is used for locking purposes,
 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
 * in order to ensure changes are visible on other processors.
 */
static __inline__ void
clear_bit (int nr, volatile void *addr)
{
	__u32 mask, old, new;
	volatile __u32 *m;
	CMPXCHG_BUGCHECK_DECL

	m = (volatile __u32 *) addr + (nr >> 5);
	mask = ~(1 << (nr & 31));
	do {
		CMPXCHG_BUGCHECK(m);
		old = *m;
		new = old & mask;
	} while (cmpxchg_acq(m, old, new) != old);
}

/**
 * clear_bit_unlock - Clears a bit in memory with release
 * @nr: Bit to clear
 * @addr: Address to start counting from
 *
 * clear_bit_unlock() is atomic and may not be reordered.  It does
 * contain a memory barrier suitable for unlock type operations.
 */
static __inline__ void
clear_bit_unlock (int nr, volatile void *addr)
{
	__u32 mask, old, new;
	volatile __u32 *m;
	CMPXCHG_BUGCHECK_DECL

	m = (volatile __u32 *) addr + (nr >> 5);
	mask = ~(1 << (nr & 31));
	do {
		CMPXCHG_BUGCHECK(m);
		old = *m;
		new = old & mask;
	} while (cmpxchg_rel(m, old, new) != old);
}

/**
 * __clear_bit_unlock - Non-atomically clears a bit in memory with release
 * @nr: Bit to clear
 * @addr: Address to start counting from
 *
 * Similarly to clear_bit_unlock, the implementation uses a store
 * with release semantics. See also arch_spin_unlock().
 */
static __inline__ void
__clear_bit_unlock(int nr, void *addr)
{
	__u32 * const m = (__u32 *) addr + (nr >> 5);
	__u32 const new = *m & ~(1 << (nr & 31));

	ia64_st4_rel_nta(m, new);
}

/**
 * __clear_bit - Clears a bit in memory (non-atomic version)
 * @nr: the bit to clear
 * @addr: the address to start counting from
 *
 * Unlike clear_bit(), this function is non-atomic and may be reordered.
 * If it's called on the same region of memory simultaneously, the effect
 * may be that only one operation succeeds.
 */
static __inline__ void
__clear_bit (int nr, volatile void *addr)
{
	*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
}

/**
 * change_bit - Toggle a bit in memory
 * @nr: Bit to toggle
 * @addr: Address to start counting from
 *
 * change_bit() is atomic and may not be reordered.
 * Note that @nr may be almost arbitrarily large; this function is not
 * restricted to acting on a single-word quantity.
 */
static __inline__ void
change_bit (int nr, volatile void *addr)
{
	__u32 bit, old, new;
	volatile __u32 *m;
	CMPXCHG_BUGCHECK_DECL

	m = (volatile __u32 *) addr + (nr >> 5);
	bit = (1 << (nr & 31));
	do {
		CMPXCHG_BUGCHECK(m);
		old = *m;
		new = old ^ bit;
	} while (cmpxchg_acq(m, old, new) != old);
}

/**
 * __change_bit - Toggle a bit in memory
 * @nr: the bit to toggle
 * @addr: the address to start counting from
 *
 * Unlike change_bit(), this function is non-atomic and may be reordered.
 * If it's called on the same region of memory simultaneously, the effect
 * may be that only one operation succeeds.
 */
static __inline__ void
__change_bit (int nr, volatile void *addr)
{
	*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
}

/**
 * test_and_set_bit - Set a bit and return its old value
 * @nr: Bit to set
 * @addr: Address to count from
 *
 * This operation is atomic and cannot be reordered.  
 * It also implies the acquisition side of the memory barrier.
 */
static __inline__ int
test_and_set_bit (int nr, volatile void *addr)
{
	__u32 bit, old, new;
	volatile __u32 *m;
	CMPXCHG_BUGCHECK_DECL

	m = (volatile __u32 *) addr + (nr >> 5);
	bit = 1 << (nr & 31);
	do {
		CMPXCHG_BUGCHECK(m);
		old = *m;
		new = old | bit;
	} while (cmpxchg_acq(m, old, new) != old);
	return (old & bit) != 0;
}

/**
 * test_and_set_bit_lock - Set a bit and return its old value for lock
 * @nr: Bit to set
 * @addr: Address to count from
 *
 * This is the same as test_and_set_bit on ia64
 */
#define test_and_set_bit_lock test_and_set_bit

/**
 * __test_and_set_bit - Set a bit and return its old value
 * @nr: Bit to set
 * @addr: Address to count from
 *
 * This operation is non-atomic and can be reordered.  
 * If two examples of this operation race, one can appear to succeed
 * but actually fail.  You must protect multiple accesses with a lock.
 */
static __inline__ int
__test_and_set_bit (int nr, volatile void *addr)
{
	__u32 *p = (__u32 *) addr + (nr >> 5);
	__u32 m = 1 << (nr & 31);
	int oldbitset = (*p & m) != 0;

	*p |= m;
	return oldbitset;
}

/**
 * test_and_clear_bit - Clear a bit and return its old value
 * @nr: Bit to clear
 * @addr: Address to count from
 *
 * This operation is atomic and cannot be reordered.  
 * It also implies the acquisition side of the memory barrier.
 */
static __inline__ int
test_and_clear_bit (int nr, volatile void *addr)
{
	__u32 mask, old, new;
	volatile __u32 *m;
	CMPXCHG_BUGCHECK_DECL

	m = (volatile __u32 *) addr + (nr >> 5);
	mask = ~(1 << (nr & 31));
	do {
		CMPXCHG_BUGCHECK(m);
		old = *m;
		new = old & mask;
	} while (cmpxchg_acq(m, old, new) != old);
	return (old & ~mask) != 0;
}

/**
 * __test_and_clear_bit - Clear a bit and return its old value
 * @nr: Bit to clear
 * @addr: Address to count from
 *
 * This operation is non-atomic and can be reordered.  
 * If two examples of this operation race, one can appear to succeed
 * but actually fail.  You must protect multiple accesses with a lock.
 */
static __inline__ int
__test_and_clear_bit(int nr, volatile void * addr)
{
	__u32 *p = (__u32 *) addr + (nr >> 5);
	__u32 m = 1 << (nr & 31);
	int oldbitset = (*p & m) != 0;

	*p &= ~m;
	return oldbitset;
}

/**
 * test_and_change_bit - Change a bit and return its old value
 * @nr: Bit to change
 * @addr: Address to count from
 *
 * This operation is atomic and cannot be reordered.  
 * It also implies the acquisition side of the memory barrier.
 */
static __inline__ int
test_and_change_bit (int nr, volatile void *addr)
{
	__u32 bit, old, new;
	volatile __u32 *m;
	CMPXCHG_BUGCHECK_DECL

	m = (volatile __u32 *) addr + (nr >> 5);
	bit = (1 << (nr & 31));
	do {
		CMPXCHG_BUGCHECK(m);
		old = *m;
		new = old ^ bit;
	} while (cmpxchg_acq(m, old, new) != old);
	return (old & bit) != 0;
}

/**
 * __test_and_change_bit - Change a bit and return its old value
 * @nr: Bit to change
 * @addr: Address to count from
 *
 * This operation is non-atomic and can be reordered.
 */
static __inline__ int
__test_and_change_bit (int nr, void *addr)
{
	__u32 old, bit = (1 << (nr & 31));
	__u32 *m = (__u32 *) addr + (nr >> 5);

	old = *m;
	*m = old ^ bit;
	return (old & bit) != 0;
}

static __inline__ int
test_bit (int nr, const volatile void *addr)
{
	return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
}

/**
 * ffz - find the first zero bit in a long word
 * @x: The long word to find the bit in
 *
 * Returns the bit-number (0..63) of the first (least significant) zero bit.
 * Undefined if no zero exists, so code should check against ~0UL first...
 */
static inline unsigned long
ffz (unsigned long x)
{
	unsigned long result;

	result = ia64_popcnt(x & (~x - 1));
	return result;
}

/**
 * __ffs - find first bit in word.
 * @x: The word to search
 *
 * Undefined if no bit exists, so code should check against 0 first.
 */
static __inline__ unsigned long
__ffs (unsigned long x)
{
	unsigned long result;

	result = ia64_popcnt((x-1) & ~x);
	return result;
}

#ifdef __KERNEL__

/*
 * Return bit number of last (most-significant) bit set.  Undefined
 * for x==0.  Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
 */
static inline unsigned long
ia64_fls (unsigned long x)
{
	long double d = x;
	long exp;

	exp = ia64_getf_exp(d);
	return exp - 0xffff;
}

/*
 * Find the last (most significant) bit set.  Returns 0 for x==0 and
 * bits are numbered from 1..32 (e.g., fls(9) == 4).
 */
static inline int
fls (int t)
{
	unsigned long x = t & 0xffffffffu;

	if (!x)
		return 0;
	x |= x >> 1;
	x |= x >> 2;
	x |= x >> 4;
	x |= x >> 8;
	x |= x >> 16;
	return ia64_popcnt(x);
}

/*
 * Find the last (most significant) bit set.  Undefined for x==0.
 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
 */
static inline unsigned long
__fls (unsigned long x)
{
	x |= x >> 1;
	x |= x >> 2;
	x |= x >> 4;
	x |= x >> 8;
	x |= x >> 16;
	x |= x >> 32;
	return ia64_popcnt(x) - 1;
}

#include <asm-generic/bitops/fls64.h>

#include <asm-generic/bitops/builtin-ffs.h>

/*
 * hweightN: returns the hamming weight (i.e. the number
 * of bits set) of a N-bit word
 */
static __inline__ unsigned long __arch_hweight64(unsigned long x)
{
	unsigned long result;
	result = ia64_popcnt(x);
	return result;
}

#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
#define __arch_hweight8(x)  ((unsigned int) __arch_hweight64((x) & 0xfful))

#include <asm-generic/bitops/const_hweight.h>

#endif /* __KERNEL__ */

#include <asm-generic/bitops/find.h>

#ifdef __KERNEL__

#include <asm-generic/bitops/le.h>

#include <asm-generic/bitops/ext2-atomic-setbit.h>

#include <asm-generic/bitops/sched.h>

#endif /* __KERNEL__ */

#endif /* _ASM_IA64_BITOPS_H */

Filemanager

Name Type Size Permission Actions
native Folder 0755
sn Folder 0755
uv Folder 0755
Kbuild File 224 B 0644
acenv.h File 1.27 KB 0644
acpi-ext.h File 590 B 0644
acpi.h File 4.1 KB 0644
agp.h File 857 B 0644
asm-offsets.h File 35 B 0644
asm-prototypes.h File 890 B 0644
asmmacro.h File 3.29 KB 0644
atomic.h File 9.47 KB 0644
barrier.h File 2.36 KB 0644
bitops.h File 10.84 KB 0644
bug.h File 404 B 0644
bugs.h File 436 B 0644
cache.h File 771 B 0644
cacheflush.h File 1.71 KB 0644
checksum.h File 2.1 KB 0644
clocksource.h File 276 B 0644
cpu.h File 456 B 0644
cputime.h File 855 B 0644
current.h File 418 B 0644
cyclone.h File 442 B 0644
delay.h File 1.7 KB 0644
device.h File 323 B 0644
div64.h File 31 B 0644
dma-mapping.h File 1.17 KB 0644
dma.h File 466 B 0644
dmi.h File 343 B 0644
early_ioremap.h File 428 B 0644
elf.h File 9.83 KB 0644
emergency-restart.h File 149 B 0644
esi.h File 887 B 0644
exception.h File 1.13 KB 0644
export.h File 115 B 0644
extable.h File 330 B 0644
fb.h File 569 B 0644
fpswa.h File 1.88 KB 0644
ftrace.h File 748 B 0644
futex.h File 2.56 KB 0644
gcc_intrin.h File 368 B 0644
hardirq.h File 564 B 0644
hpsim.h File 364 B 0644
hugetlb.h File 1.67 KB 0644
hw_irq.h File 6.33 KB 0644
idle.h File 200 B 0644
intrinsics.h File 306 B 0644
io.h File 11.77 KB 0644
iommu.h File 555 B 0644
iommu_table.h File 175 B 0644
iosapic.h File 3.16 KB 0644
irq.h File 1.02 KB 0644
irq_regs.h File 34 B 0644
irq_remapping.h File 142 B 0644
irqflags.h File 2.11 KB 0644
kdebug.h File 1.64 KB 0644
kexec.h File 1.57 KB 0644
kmap_types.h File 260 B 0644
kprobes.h File 3.82 KB 0644
kregs.h File 6.73 KB 0644
libata-portmap.h File 225 B 0644
linkage.h File 398 B 0644
local.h File 31 B 0644
local64.h File 33 B 0644
machvec.h File 12.1 KB 0644
machvec_dig.h File 449 B 0644
machvec_dig_vtd.h File 558 B 0644
machvec_hpsim.h File 544 B 0644
machvec_hpzx1.h File 544 B 0644
machvec_hpzx1_swiotlb.h File 632 B 0644
machvec_init.h File 1.33 KB 0644
machvec_sn2.h File 4.71 KB 0644
machvec_uv.h File 684 B 0644
mca.h File 5.91 KB 0644
mca_asm.h File 7.18 KB 0644
meminit.h File 2.24 KB 0644
mman.h File 432 B 0644
mmu.h File 374 B 0644
mmu_context.h File 5.29 KB 0644
mmzone.h File 1.1 KB 0644
module.h File 1.1 KB 0644
msidef.h File 1.4 KB 0644
nodedata.h File 1.85 KB 0644
numa.h File 2.18 KB 0644
page.h File 6.49 KB 0644
pal.h File 53.39 KB 0644
param.h File 439 B 0644
parport.h File 534 B 0644
patch.h File 1.19 KB 0644
pci.h File 2.83 KB 0644
percpu.h File 1.32 KB 0644
perfmon.h File 4.33 KB 0644
pgalloc.h File 2.84 KB 0644
pgtable.h File 20.92 KB 0644
processor.h File 17.98 KB 0644
ptrace.h File 5.2 KB 0644
rwsem.h File 3.82 KB 0644
sal.h File 26.51 KB 0644
sections.h File 1.35 KB 0644
segment.h File 162 B 0644
serial.h File 446 B 0644
shmparam.h File 445 B 0644
signal.h File 749 B 0644
smp.h File 3.21 KB 0644
sparsemem.h File 621 B 0644
spinlock.h File 6.92 KB 0644
spinlock_types.h File 475 B 0644
string.h File 659 B 0644
swiotlb.h File 344 B 0644
switch_to.h File 2.89 KB 0644
syscall.h File 2.06 KB 0644
termios.h File 1.88 KB 0644
thread_info.h File 4.66 KB 0644
timex.h File 1.47 KB 0644
tlb.h File 8.42 KB 0644
tlbflush.h File 2.33 KB 0644
topology.h File 1.58 KB 0644
types.h File 828 B 0644
uaccess.h File 9.86 KB 0644
unaligned.h File 337 B 0644
uncached.h File 463 B 0644
unistd.h File 1.45 KB 0644
unwind.h File 5.74 KB 0644
user.h File 2.25 KB 0644
ustack.h File 403 B 0644
vga.h File 657 B 0644
xor.h File 1.12 KB 0644