File manager - Edit - /home/newsbmcs.com/public_html/static/img/logo/atomic-llsc.h.tar
Back
usr/src/linux-headers-5.15.0-142/arch/sh/include/asm/atomic-llsc.h 0000644 00000004212 15030276160 0020055 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_SH_ATOMIC_LLSC_H #define __ASM_SH_ATOMIC_LLSC_H /* * SH-4A note: * * We basically get atomic_xxx_return() for free compared with * atomic_xxx(). movli.l/movco.l require r0 due to the instruction * encoding, so the retval is automatically set without having to * do any special work. */ /* * To get proper branch prediction for the main line, we must branch * forward to code at the end of this object's .text section, then * branch back to restart the operation. */ #define ATOMIC_OP(op) \ static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned long tmp; \ \ __asm__ __volatile__ ( \ "1: movli.l @%2, %0 ! atomic_" #op "\n" \ " " #op " %1, %0 \n" \ " movco.l %0, @%2 \n" \ " bf 1b \n" \ : "=&z" (tmp) \ : "r" (i), "r" (&v->counter) \ : "t"); \ } #define ATOMIC_OP_RETURN(op) \ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long temp; \ \ __asm__ __volatile__ ( \ "1: movli.l @%2, %0 ! atomic_" #op "_return \n" \ " " #op " %1, %0 \n" \ " movco.l %0, @%2 \n" \ " bf 1b \n" \ " synco \n" \ : "=&z" (temp) \ : "r" (i), "r" (&v->counter) \ : "t"); \ \ return temp; \ } #define ATOMIC_FETCH_OP(op) \ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long res, temp; \ \ __asm__ __volatile__ ( \ "1: movli.l @%3, %0 ! atomic_fetch_" #op " \n" \ " mov %0, %1 \n" \ " " #op " %2, %0 \n" \ " movco.l %0, @%3 \n" \ " bf 1b \n" \ " synco \n" \ : "=&z" (temp), "=&r" (res) \ : "r" (i), "r" (&v->counter) \ : "t"); \ \ return res; \ } #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) ATOMIC_OPS(add) ATOMIC_OPS(sub) #undef ATOMIC_OPS #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OPS(and) ATOMIC_OPS(or) ATOMIC_OPS(xor) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP #endif /* __ASM_SH_ATOMIC_LLSC_H */ usr/src/linux-headers-5.15.0-133/arch/arc/include/asm/atomic-llsc.h 0000644 00000005460 15030343601 0020211 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _ASM_ARC_ATOMIC_LLSC_H #define _ASM_ARC_ATOMIC_LLSC_H #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define ATOMIC_OP(op, c_op, asm_op) \ static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned int val; \ \ __asm__ __volatile__( \ "1: llock %[val], [%[ctr]] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ " bnz 1b \n" \ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ [i] "ir" (i) \ : "cc", "memory"); \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ { \ unsigned int val; \ \ __asm__ __volatile__( \ "1: llock %[val], [%[ctr]] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ " bnz 1b \n" \ : [val] "=&r" (val) \ : [ctr] "r" (&v->counter), \ [i] "ir" (i) \ : "cc", "memory"); \ \ return val; \ } #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ { \ unsigned int val, orig; \ \ __asm__ __volatile__( \ "1: llock %[orig], [%[ctr]] \n" \ " " #asm_op " %[val], %[orig], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ " bnz 1b \n" \ : [val] "=&r" (val), \ [orig] "=&r" (orig) \ : [ctr] "r" (&v->counter), \ [i] "ir" (i) \ : "cc", "memory"); \ \ return orig; \ } #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP_RETURN(op, c_op, asm_op) \ ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(and, &=, and) ATOMIC_OPS(andnot, &= ~, bic) ATOMIC_OPS(or, |=, or) ATOMIC_OPS(xor, ^=, xor) #define arch_atomic_andnot arch_atomic_andnot #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP #endif usr/src/linux-headers-5.15.0-142/arch/arc/include/asm/atomic-llsc.h 0000644 00000005460 15030352344 0020215 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _ASM_ARC_ATOMIC_LLSC_H #define _ASM_ARC_ATOMIC_LLSC_H #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define ATOMIC_OP(op, c_op, asm_op) \ static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned int val; \ \ __asm__ __volatile__( \ "1: llock %[val], [%[ctr]] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ " bnz 1b \n" \ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ [i] "ir" (i) \ : "cc", "memory"); \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ { \ unsigned int val; \ \ __asm__ __volatile__( \ "1: llock %[val], [%[ctr]] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ " bnz 1b \n" \ : [val] "=&r" (val) \ : [ctr] "r" (&v->counter), \ [i] "ir" (i) \ : "cc", "memory"); \ \ return val; \ } #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ { \ unsigned int val, orig; \ \ __asm__ __volatile__( \ "1: llock %[orig], [%[ctr]] \n" \ " " #asm_op " %[val], %[orig], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ " bnz 1b \n" \ : [val] "=&r" (val), \ [orig] "=&r" (orig) \ : [ctr] "r" (&v->counter), \ [i] "ir" (i) \ : "cc", "memory"); \ \ return orig; \ } #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP_RETURN(op, c_op, asm_op) \ ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(and, &=, and) ATOMIC_OPS(andnot, &= ~, bic) ATOMIC_OPS(or, |=, or) ATOMIC_OPS(xor, ^=, xor) #define arch_atomic_andnot arch_atomic_andnot #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP #endif usr/src/linux-headers-5.15.0-141/arch/arc/include/asm/atomic-llsc.h 0000644 00000005460 15030427750 0020220 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _ASM_ARC_ATOMIC_LLSC_H #define _ASM_ARC_ATOMIC_LLSC_H #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define ATOMIC_OP(op, c_op, asm_op) \ static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned int val; \ \ __asm__ __volatile__( \ "1: llock %[val], [%[ctr]] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ " bnz 1b \n" \ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ [i] "ir" (i) \ : "cc", "memory"); \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ { \ unsigned int val; \ \ __asm__ __volatile__( \ "1: llock %[val], [%[ctr]] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ " bnz 1b \n" \ : [val] "=&r" (val) \ : [ctr] "r" (&v->counter), \ [i] "ir" (i) \ : "cc", "memory"); \ \ return val; \ } #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ { \ unsigned int val, orig; \ \ __asm__ __volatile__( \ "1: llock %[orig], [%[ctr]] \n" \ " " #asm_op " %[val], %[orig], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ " bnz 1b \n" \ : [val] "=&r" (val), \ [orig] "=&r" (orig) \ : [ctr] "r" (&v->counter), \ [i] "ir" (i) \ : "cc", "memory"); \ \ return orig; \ } #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP_RETURN(op, c_op, asm_op) \ ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(and, &=, and) ATOMIC_OPS(andnot, &= ~, bic) ATOMIC_OPS(or, |=, or) ATOMIC_OPS(xor, ^=, xor) #define arch_atomic_andnot arch_atomic_andnot #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP #endif
| ver. 1.4 |
Github
|
.
| PHP 8.2.28 | Generation time: 0.02 |
proxy
|
phpinfo
|
Settings