File manager - Edit - /home/newsbmcs.com/public_html/static/img/logo/cmpxchg.h.tar
Back
usr/src/linux-headers-5.15.0-142/arch/ia64/include/uapi/asm/cmpxchg.h 0000644 00000010265 15030347034 0020372 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_IA64_CMPXCHG_H #define _UAPI_ASM_IA64_CMPXCHG_H /* * Compare/Exchange, forked from asm/intrinsics.h * which was: * * Copyright (C) 2002-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #ifndef __ASSEMBLY__ #include <linux/types.h> /* include compiler specific intrinsics */ #include <asm/ia64regs.h> #ifdef __INTEL_COMPILER # include <asm/intel_intrin.h> #else # include <asm/gcc_intrin.h> #endif /* * This function doesn't exist, so you'll get a linker error if * something tries to do an invalid xchg(). */ extern void ia64_xchg_called_with_bad_pointer(void); #define __xchg(x, ptr, size) \ ({ \ unsigned long __xchg_result; \ \ switch (size) { \ case 1: \ __xchg_result = ia64_xchg1((__u8 *)ptr, x); \ break; \ \ case 2: \ __xchg_result = ia64_xchg2((__u16 *)ptr, x); \ break; \ \ case 4: \ __xchg_result = ia64_xchg4((__u32 *)ptr, x); \ break; \ \ case 8: \ __xchg_result = ia64_xchg8((__u64 *)ptr, x); \ break; \ default: \ ia64_xchg_called_with_bad_pointer(); \ } \ __xchg_result; \ }) #ifndef __KERNEL__ #define xchg(ptr, x) \ ({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));}) #endif /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ /* * This function doesn't exist, so you'll get a linker error * if something tries to do an invalid cmpxchg(). */ extern long ia64_cmpxchg_called_with_bad_pointer(void); #define ia64_cmpxchg(sem, ptr, old, new, size) \ ({ \ __u64 _o_, _r_; \ \ switch (size) { \ case 1: \ _o_ = (__u8) (long) (old); \ break; \ case 2: \ _o_ = (__u16) (long) (old); \ break; \ case 4: \ _o_ = (__u32) (long) (old); \ break; \ case 8: \ _o_ = (__u64) (long) (old); \ break; \ default: \ break; \ } \ switch (size) { \ case 1: \ _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \ break; \ \ case 2: \ _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \ break; \ \ case 4: \ _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \ break; \ \ case 8: \ _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \ break; \ \ default: \ _r_ = ia64_cmpxchg_called_with_bad_pointer(); \ break; \ } \ (__typeof__(old)) _r_; \ }) #define cmpxchg_acq(ptr, o, n) \ ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr))) #define cmpxchg_rel(ptr, o, n) \ ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) /* * Worse still - early processor implementations actually just ignored * the acquire/release and did a full fence all the time. Unfortunately * this meant a lot of badly written code that used .acq when they really * wanted .rel became legacy out in the wild - so when we made a cpu * that strictly did the .acq or .rel ... all that code started breaking - so * we had to back-pedal and keep the "legacy" behavior of a full fence :-( */ #ifndef __KERNEL__ /* for compatibility with other platforms: */ #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define cmpxchg_local cmpxchg #define cmpxchg64_local cmpxchg64 #endif #ifdef CONFIG_IA64_DEBUG_CMPXCHG # define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128; # define CMPXCHG_BUGCHECK(v) \ do { \ if (_cmpxchg_bugcheck_count-- <= 0) { \ void *ip; \ extern int _printk(const char *fmt, ...); \ ip = (void *) ia64_getreg(_IA64_REG_IP); \ _printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));\ break; \ } \ } while (0) #else /* !CONFIG_IA64_DEBUG_CMPXCHG */ # define CMPXCHG_BUGCHECK_DECL # define CMPXCHG_BUGCHECK(v) #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ #endif /* !__ASSEMBLY__ */ #endif /* _UAPI_ASM_IA64_CMPXCHG_H */ usr/src/linux-headers-5.15.0-133/arch/ia64/include/uapi/asm/cmpxchg.h 0000644 00000010265 15030414617 0020374 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_IA64_CMPXCHG_H #define _UAPI_ASM_IA64_CMPXCHG_H /* * Compare/Exchange, forked from asm/intrinsics.h * which was: * * Copyright (C) 2002-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #ifndef __ASSEMBLY__ #include <linux/types.h> /* include compiler specific intrinsics */ #include <asm/ia64regs.h> #ifdef __INTEL_COMPILER # include <asm/intel_intrin.h> #else # include <asm/gcc_intrin.h> #endif /* * This function doesn't exist, so you'll get a linker error if * something tries to do an invalid xchg(). */ extern void ia64_xchg_called_with_bad_pointer(void); #define __xchg(x, ptr, size) \ ({ \ unsigned long __xchg_result; \ \ switch (size) { \ case 1: \ __xchg_result = ia64_xchg1((__u8 *)ptr, x); \ break; \ \ case 2: \ __xchg_result = ia64_xchg2((__u16 *)ptr, x); \ break; \ \ case 4: \ __xchg_result = ia64_xchg4((__u32 *)ptr, x); \ break; \ \ case 8: \ __xchg_result = ia64_xchg8((__u64 *)ptr, x); \ break; \ default: \ ia64_xchg_called_with_bad_pointer(); \ } \ __xchg_result; \ }) #ifndef __KERNEL__ #define xchg(ptr, x) \ ({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));}) #endif /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ /* * This function doesn't exist, so you'll get a linker error * if something tries to do an invalid cmpxchg(). */ extern long ia64_cmpxchg_called_with_bad_pointer(void); #define ia64_cmpxchg(sem, ptr, old, new, size) \ ({ \ __u64 _o_, _r_; \ \ switch (size) { \ case 1: \ _o_ = (__u8) (long) (old); \ break; \ case 2: \ _o_ = (__u16) (long) (old); \ break; \ case 4: \ _o_ = (__u32) (long) (old); \ break; \ case 8: \ _o_ = (__u64) (long) (old); \ break; \ default: \ break; \ } \ switch (size) { \ case 1: \ _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \ break; \ \ case 2: \ _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \ break; \ \ case 4: \ _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \ break; \ \ case 8: \ _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \ break; \ \ default: \ _r_ = ia64_cmpxchg_called_with_bad_pointer(); \ break; \ } \ (__typeof__(old)) _r_; \ }) #define cmpxchg_acq(ptr, o, n) \ ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr))) #define cmpxchg_rel(ptr, o, n) \ ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) /* * Worse still - early processor implementations actually just ignored * the acquire/release and did a full fence all the time. Unfortunately * this meant a lot of badly written code that used .acq when they really * wanted .rel became legacy out in the wild - so when we made a cpu * that strictly did the .acq or .rel ... all that code started breaking - so * we had to back-pedal and keep the "legacy" behavior of a full fence :-( */ #ifndef __KERNEL__ /* for compatibility with other platforms: */ #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define cmpxchg_local cmpxchg #define cmpxchg64_local cmpxchg64 #endif #ifdef CONFIG_IA64_DEBUG_CMPXCHG # define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128; # define CMPXCHG_BUGCHECK(v) \ do { \ if (_cmpxchg_bugcheck_count-- <= 0) { \ void *ip; \ extern int _printk(const char *fmt, ...); \ ip = (void *) ia64_getreg(_IA64_REG_IP); \ _printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));\ break; \ } \ } while (0) #else /* !CONFIG_IA64_DEBUG_CMPXCHG */ # define CMPXCHG_BUGCHECK_DECL # define CMPXCHG_BUGCHECK(v) #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ #endif /* !__ASSEMBLY__ */ #endif /* _UAPI_ASM_IA64_CMPXCHG_H */ usr/src/linux-headers-5.15.0-133/arch/arc/include/asm/cmpxchg.h 0000644 00000006164 15030444777 0017455 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_ARC_CMPXCHG_H #define __ASM_ARC_CMPXCHG_H #include <linux/build_bug.h> #include <linux/types.h> #include <asm/barrier.h> #include <asm/smp.h> #ifdef CONFIG_ARC_HAS_LLSC /* * if (*ptr == @old) * *ptr = @new */ #define __cmpxchg(ptr, old, new) \ ({ \ __typeof__(*(ptr)) _prev; \ \ __asm__ __volatile__( \ "1: llock %0, [%1] \n" \ " brne %0, %2, 2f \n" \ " scond %3, [%1] \n" \ " bnz 1b \n" \ "2: \n" \ : "=&r"(_prev) /* Early clobber prevent reg reuse */ \ : "r"(ptr), /* Not "m": llock only supports reg */ \ "ir"(old), \ "r"(new) /* Not "ir": scond can't take LIMM */ \ : "cc", \ "memory"); /* gcc knows memory is clobbered */ \ \ _prev; \ }) #define arch_cmpxchg_relaxed(ptr, old, new) \ ({ \ __typeof__(ptr) _p_ = (ptr); \ __typeof__(*(ptr)) _o_ = (old); \ __typeof__(*(ptr)) _n_ = (new); \ __typeof__(*(ptr)) _prev_; \ \ switch(sizeof((_p_))) { \ case 4: \ _prev_ = __cmpxchg(_p_, _o_, _n_); \ break; \ default: \ BUILD_BUG(); \ } \ _prev_; \ }) #else #define arch_cmpxchg(ptr, old, new) \ ({ \ volatile __typeof__(ptr) _p_ = (ptr); \ __typeof__(*(ptr)) _o_ = (old); \ __typeof__(*(ptr)) _n_ = (new); \ __typeof__(*(ptr)) _prev_; \ unsigned long __flags; \ \ BUILD_BUG_ON(sizeof(_p_) != 4); \ \ /* \ * spin lock/unlock provide the needed smp_mb() before/after \ */ \ atomic_ops_lock(__flags); \ _prev_ = *_p_; \ if (_prev_ == _o_) \ *_p_ = _n_; \ atomic_ops_unlock(__flags); \ _prev_; \ }) #endif /* * xchg */ #ifdef CONFIG_ARC_HAS_LLSC #define __xchg(ptr, val) \ ({ \ __asm__ __volatile__( \ " ex %0, [%1] \n" /* set new value */ \ : "+r"(val) \ : "r"(ptr) \ : "memory"); \ _val_; /* get old value */ \ }) #define arch_xchg_relaxed(ptr, val) \ ({ \ __typeof__(ptr) _p_ = (ptr); \ __typeof__(*(ptr)) _val_ = (val); \ \ switch(sizeof(*(_p_))) { \ case 4: \ _val_ = __xchg(_p_, _val_); \ break; \ default: \ BUILD_BUG(); \ } \ _val_; \ }) #else /* !CONFIG_ARC_HAS_LLSC */ /* * EX instructions is baseline and present in !LLSC too. But in this * regime it still needs use @atomic_ops_lock spinlock to allow interop * with cmpxchg() which uses spinlock in !LLSC * (llist.h use xchg and cmpxchg on sama data) */ #define arch_xchg(ptr, val) \ ({ \ __typeof__(ptr) _p_ = (ptr); \ __typeof__(*(ptr)) _val_ = (val); \ \ unsigned long __flags; \ \ atomic_ops_lock(__flags); \ \ __asm__ __volatile__( \ " ex %0, [%1] \n" \ : "+r"(_val_) \ : "r"(_p_) \ : "memory"); \ \ atomic_ops_unlock(__flags); \ _val_; \ }) #endif #endif usr/src/linux-headers-5.15.0-142/arch/parisc/include/asm/cmpxchg.h 0000644 00000007307 15030520441 0020150 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * forked from parisc asm/atomic.h which was: * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> */ #ifndef _ASM_PARISC_CMPXCHG_H_ #define _ASM_PARISC_CMPXCHG_H_ /* This should get optimized out since it's never called. ** Or get a link error if xchg is used "wrong". */ extern void __xchg_called_with_bad_pointer(void); /* __xchg32/64 defined in arch/parisc/lib/bitops.c */ extern unsigned long __xchg8(char, volatile char *); extern unsigned long __xchg32(int, volatile int *); #ifdef CONFIG_64BIT extern unsigned long __xchg64(unsigned long, volatile unsigned long *); #endif /* optimizer better get rid of switch since size is a constant */ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) { switch (size) { #ifdef CONFIG_64BIT case 8: return __xchg64(x, (volatile unsigned long *) ptr); #endif case 4: return __xchg32((int) x, (volatile int *) ptr); case 1: return __xchg8((char) x, (volatile char *) ptr); } __xchg_called_with_bad_pointer(); return x; } /* ** REVISIT - Abandoned use of LDCW in xchg() for now: ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes ** o and while we are at it, could CONFIG_64BIT code use LDCD too? ** ** if (__builtin_constant_p(x) && (x == NULL)) ** if (((unsigned long)p & 0xf) == 0) ** return __ldcw(p); */ #define arch_xchg(ptr, x) \ ({ \ __typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) _x_ = (x); \ __ret = (__typeof__(*(ptr))) \ __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \ __ret; \ }) /* bug catcher for when unsupported size is used - won't link */ extern void __cmpxchg_called_with_bad_pointer(void); /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); /* don't worry...optimizer will get rid of most of this */ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) { switch (size) { #ifdef CONFIG_64BIT case 8: return __cmpxchg_u64((u64 *)ptr, old, new_); #endif case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int)old, (unsigned int)new_); case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff); } __cmpxchg_called_with_bad_pointer(); return old; } #define arch_cmpxchg(ptr, o, n) \ ({ \ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ (unsigned long)_n_, sizeof(*(ptr))); \ }) #include <asm-generic/cmpxchg-local.h> static inline unsigned long __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new_, int size) { switch (size) { #ifdef CONFIG_64BIT case 8: return __cmpxchg_u64((u64 *)ptr, old, new_); #endif case 4: return __cmpxchg_u32(ptr, old, new_); default: return __generic_cmpxchg_local(ptr, old, new_, size); } } /* * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * them available. */ #define arch_cmpxchg_local(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr)))) #ifdef CONFIG_64BIT #define arch_cmpxchg64_local(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg_local((ptr), (o), (n)); \ }) #else #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) #endif #define arch_cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n) #endif /* _ASM_PARISC_CMPXCHG_H_ */ usr/src/linux-headers-5.15.0-142/arch/arc/include/asm/cmpxchg.h 0000644 00000006164 15030565240 0017442 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_ARC_CMPXCHG_H #define __ASM_ARC_CMPXCHG_H #include <linux/build_bug.h> #include <linux/types.h> #include <asm/barrier.h> #include <asm/smp.h> #ifdef CONFIG_ARC_HAS_LLSC /* * if (*ptr == @old) * *ptr = @new */ #define __cmpxchg(ptr, old, new) \ ({ \ __typeof__(*(ptr)) _prev; \ \ __asm__ __volatile__( \ "1: llock %0, [%1] \n" \ " brne %0, %2, 2f \n" \ " scond %3, [%1] \n" \ " bnz 1b \n" \ "2: \n" \ : "=&r"(_prev) /* Early clobber prevent reg reuse */ \ : "r"(ptr), /* Not "m": llock only supports reg */ \ "ir"(old), \ "r"(new) /* Not "ir": scond can't take LIMM */ \ : "cc", \ "memory"); /* gcc knows memory is clobbered */ \ \ _prev; \ }) #define arch_cmpxchg_relaxed(ptr, old, new) \ ({ \ __typeof__(ptr) _p_ = (ptr); \ __typeof__(*(ptr)) _o_ = (old); \ __typeof__(*(ptr)) _n_ = (new); \ __typeof__(*(ptr)) _prev_; \ \ switch(sizeof((_p_))) { \ case 4: \ _prev_ = __cmpxchg(_p_, _o_, _n_); \ break; \ default: \ BUILD_BUG(); \ } \ _prev_; \ }) #else #define arch_cmpxchg(ptr, old, new) \ ({ \ volatile __typeof__(ptr) _p_ = (ptr); \ __typeof__(*(ptr)) _o_ = (old); \ __typeof__(*(ptr)) _n_ = (new); \ __typeof__(*(ptr)) _prev_; \ unsigned long __flags; \ \ BUILD_BUG_ON(sizeof(_p_) != 4); \ \ /* \ * spin lock/unlock provide the needed smp_mb() before/after \ */ \ atomic_ops_lock(__flags); \ _prev_ = *_p_; \ if (_prev_ == _o_) \ *_p_ = _n_; \ atomic_ops_unlock(__flags); \ _prev_; \ }) #endif /* * xchg */ #ifdef CONFIG_ARC_HAS_LLSC #define __xchg(ptr, val) \ ({ \ __asm__ __volatile__( \ " ex %0, [%1] \n" /* set new value */ \ : "+r"(val) \ : "r"(ptr) \ : "memory"); \ _val_; /* get old value */ \ }) #define arch_xchg_relaxed(ptr, val) \ ({ \ __typeof__(ptr) _p_ = (ptr); \ __typeof__(*(ptr)) _val_ = (val); \ \ switch(sizeof(*(_p_))) { \ case 4: \ _val_ = __xchg(_p_, _val_); \ break; \ default: \ BUILD_BUG(); \ } \ _val_; \ }) #else /* !CONFIG_ARC_HAS_LLSC */ /* * EX instructions is baseline and present in !LLSC too. But in this * regime it still needs use @atomic_ops_lock spinlock to allow interop * with cmpxchg() which uses spinlock in !LLSC * (llist.h use xchg and cmpxchg on sama data) */ #define arch_xchg(ptr, val) \ ({ \ __typeof__(ptr) _p_ = (ptr); \ __typeof__(*(ptr)) _val_ = (val); \ \ unsigned long __flags; \ \ atomic_ops_lock(__flags); \ \ __asm__ __volatile__( \ " ex %0, [%1] \n" \ : "+r"(_val_) \ : "r"(_p_) \ : "memory"); \ \ atomic_ops_unlock(__flags); \ _val_; \ }) #endif #endif
| ver. 1.4 |
Github
|
.
| PHP 8.2.28 | Generation time: 0.02 |
proxy
|
phpinfo
|
Settings