File manager - Edit - /home/newsbmcs.com/public_html/static/img/logo/barrier.h.tar
Back
usr/src/linux-headers-5.15.0-142/arch/mips/include/asm/barrier.h 0000644 00000006663 15030273101 0017636 0 ustar 00 /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) */ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H #include <asm/addrspace.h> #include <asm/sync.h> static inline void __sync(void) { asm volatile(__SYNC(full, always) ::: "memory"); } static inline void rmb(void) { asm volatile(__SYNC(rmb, always) ::: "memory"); } #define rmb rmb static inline void wmb(void) { asm volatile(__SYNC(wmb, always) ::: "memory"); } #define wmb wmb #define fast_mb() __sync() #define __fast_iob() \ __asm__ __volatile__( \ ".set push\n\t" \ ".set noreorder\n\t" \ "lw $0,%0\n\t" \ "nop\n\t" \ ".set pop" \ : /* no output */ \ : "m" (*(int *)CKSEG1) \ : "memory") #ifdef CONFIG_CPU_CAVIUM_OCTEON # define fast_iob() do { } while (0) #else /* ! CONFIG_CPU_CAVIUM_OCTEON */ # ifdef CONFIG_SGI_IP28 # define fast_iob() \ __asm__ __volatile__( \ ".set push\n\t" \ ".set noreorder\n\t" \ "lw $0,%0\n\t" \ "sync\n\t" \ "lw $0,%0\n\t" \ ".set pop" \ : /* no output */ \ : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \ : "memory") # else # define fast_iob() \ do { \ __sync(); \ __fast_iob(); \ } while (0) # endif #endif /* CONFIG_CPU_CAVIUM_OCTEON */ #ifdef CONFIG_CPU_HAS_WB #include <asm/wbflush.h> #define mb() wbflush() #define iob() wbflush() #else /* !CONFIG_CPU_HAS_WB */ #define mb() fast_mb() #define iob() fast_iob() #endif /* !CONFIG_CPU_HAS_WB */ #if defined(CONFIG_WEAK_ORDERING) # define __smp_mb() __sync() # define __smp_rmb() rmb() # define __smp_wmb() wmb() #else # define __smp_mb() barrier() # define __smp_rmb() barrier() # define __smp_wmb() barrier() #endif /* * When LL/SC does imply order, it must also be a compiler barrier to avoid the * compiler from reordering where the CPU will not. When it does not imply * order, the compiler is also free to reorder across the LL/SC loop and * ordering will be done by smp_llsc_mb() and friends. */ #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) # define __WEAK_LLSC_MB sync # define smp_llsc_mb() \ __asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory") # define __LLSC_CLOBBER #else # define __WEAK_LLSC_MB # define smp_llsc_mb() do { } while (0) # define __LLSC_CLOBBER "memory" #endif #ifdef CONFIG_CPU_CAVIUM_OCTEON #define smp_mb__before_llsc() smp_wmb() #define __smp_mb__before_llsc() __smp_wmb() /* Cause previous writes to become visible on all CPUs as soon as possible */ #define nudge_writes() __asm__ __volatile__(".set push\n\t" \ ".set arch=octeon\n\t" \ "syncw\n\t" \ ".set pop" : : : "memory") #else #define smp_mb__before_llsc() smp_llsc_mb() #define __smp_mb__before_llsc() smp_llsc_mb() #define nudge_writes() mb() #endif /* * In the Loongson3 LL/SC workaround case, all of our LL/SC loops already have * a completion barrier immediately preceding the LL instruction. Therefore we * can skip emitting a barrier from __smp_mb__before_atomic(). */ #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS # define __smp_mb__before_atomic() #else # define __smp_mb__before_atomic() __smp_mb__before_llsc() #endif #define __smp_mb__after_atomic() smp_llsc_mb() static inline void sync_ginv(void) { asm volatile(__SYNC(ginv, always)); } #include <asm-generic/barrier.h> #endif /* __ASM_BARRIER_H */ usr/src/linux-headers-5.15.0-142/arch/ia64/include/asm/barrier.h 0000644 00000004556 15030302477 0017441 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Memory barrier definitions. This is based on information published * in the Processor Abstraction Layer and the System Abstraction Layer * manual. * * Copyright (C) 1998-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> */ #ifndef _ASM_IA64_BARRIER_H #define _ASM_IA64_BARRIER_H #include <linux/compiler.h> /* * Macros to force memory ordering. In these descriptions, "previous" * and "subsequent" refer to program order; "visible" means that all * architecturally visible effects of a memory access have occurred * (at a minimum, this means the memory has been read or written). * * wmb(): Guarantees that all preceding stores to memory- * like regions are visible before any subsequent * stores and that all following stores will be * visible only after all previous stores. * rmb(): Like wmb(), but for reads. * mb(): wmb()/rmb() combo, i.e., all previous memory * accesses are visible before all subsequent * accesses and vice versa. This is also known as * a "fence." * * Note: "mb()" and its variants cannot be used as a fence to order * accesses to memory mapped I/O registers. For that, mf.a needs to * be used. However, we don't want to always use mf.a because (a) * it's (presumably) much slower than mf and (b) mf.a is supported for * sequential memory pages only. */ #define mb() ia64_mf() #define rmb() mb() #define wmb() mb() #define dma_rmb() mb() #define dma_wmb() mb() # define __smp_mb() mb() #define __smp_mb__before_atomic() barrier() #define __smp_mb__after_atomic() barrier() /* * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no * need for asm trickery! */ #define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) #define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ }) /* * The group barrier in front of the rsm & ssm are necessary to ensure * that none of the previous instructions in the same group are * affected by the rsm/ssm. */ #include <asm-generic/barrier.h> #endif /* _ASM_IA64_BARRIER_H */ usr/src/linux-headers-5.15.0-141/arch/microblaze/include/asm/barrier.h 0000644 00000000471 15030355201 0021005 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved. */ #ifndef _ASM_MICROBLAZE_BARRIER_H #define _ASM_MICROBLAZE_BARRIER_H #define mb() __asm__ __volatile__ ("mbar 1" : : : "memory") #include <asm-generic/barrier.h> #endif /* _ASM_MICROBLAZE_BARRIER_H */ usr/src/linux-headers-5.15.0-133/arch/arc/include/asm/barrier.h 0000644 00000002540 15030427643 0017435 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H #ifdef CONFIG_ISA_ARCV2 /* * ARCv2 based HS38 cores are in-order issue, but still weakly ordered * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ... * * Explicit barrier provided by DMB instruction * - Operand supports fine grained load/store/load+store semantics * - Ensures that selected memory operation issued before it will complete * before any subsequent memory operation of same type * - DMB guarantees SMP as well as local barrier semantics * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e. * UP: barrier(), SMP: smp_*mb == *mb) * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed * in the general case. Plus it only provides full barrier. */ #define mb() asm volatile("dmb 3\n" : : : "memory") #define rmb() asm volatile("dmb 1\n" : : : "memory") #define wmb() asm volatile("dmb 2\n" : : : "memory") #else /* * ARCompact based cores (ARC700) only have SYNC instruction which is super * heavy weight as it flushes the pipeline as well. * There are no real SMP implementations of such cores. */ #define mb() asm volatile("sync\n" : : : "memory") #endif #include <asm-generic/barrier.h> #endif usr/src/linux-headers-5.15.0-133/arch/s390/include/asm/barrier.h 0000644 00000003531 15030523504 0017360 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp. 1999, 2009 * * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> */ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. */ #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES /* Fast-BCR without checkpoint synchronization */ #define __ASM_BARRIER "bcr 14,0\n" #else #define __ASM_BARRIER "bcr 15,0\n" #endif #define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0) #define rmb() barrier() #define wmb() barrier() #define dma_rmb() mb() #define dma_wmb() mb() #define __smp_mb() mb() #define __smp_rmb() rmb() #define __smp_wmb() wmb() #define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) #define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ }) #define __smp_mb__before_atomic() barrier() #define __smp_mb__after_atomic() barrier() /** * array_index_mask_nospec - generate a mask for array_idx() that is * ~0UL when the bounds check succeeds and 0 otherwise * @index: array element index * @size: number of elements in array */ #define array_index_mask_nospec array_index_mask_nospec static inline unsigned long array_index_mask_nospec(unsigned long index, unsigned long size) { unsigned long mask; if (__builtin_constant_p(size) && size > 0) { asm(" clgr %2,%1\n" " slbgr %0,%0\n" :"=d" (mask) : "d" (size-1), "d" (index) :"cc"); return mask; } asm(" clgr %1,%2\n" " slbgr %0,%0\n" :"=d" (mask) : "d" (size), "d" (index) :"cc"); return ~mask; } #include <asm-generic/barrier.h> #endif /* __ASM_BARRIER_H */ usr/src/linux-headers-5.15.0-133/arch/parisc/include/asm/barrier.h 0000644 00000005003 15030541316 0020140 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H #include <asm/alternative.h> #ifndef __ASSEMBLY__ /* The synchronize caches instruction executes as a nop on systems in which all memory references are performed in order. */ #define synchronize_caches() asm volatile("sync" \ ALTERNATIVE(ALT_COND_NO_SMP, INSN_NOP) \ : : : "memory") #if defined(CONFIG_SMP) #define mb() do { synchronize_caches(); } while (0) #define rmb() mb() #define wmb() mb() #define dma_rmb() mb() #define dma_wmb() mb() #else #define mb() barrier() #define rmb() barrier() #define wmb() barrier() #define dma_rmb() barrier() #define dma_wmb() barrier() #endif #define __smp_mb() mb() #define __smp_rmb() mb() #define __smp_wmb() mb() #define __smp_store_release(p, v) \ do { \ typeof(p) __p = (p); \ union { typeof(*p) __val; char __c[1]; } __u = \ { .__val = (__force typeof(*p)) (v) }; \ compiletime_assert_atomic_type(*p); \ switch (sizeof(*p)) { \ case 1: \ asm volatile("stb,ma %0,0(%1)" \ : : "r"(*(__u8 *)__u.__c), "r"(__p) \ : "memory"); \ break; \ case 2: \ asm volatile("sth,ma %0,0(%1)" \ : : "r"(*(__u16 *)__u.__c), "r"(__p) \ : "memory"); \ break; \ case 4: \ asm volatile("stw,ma %0,0(%1)" \ : : "r"(*(__u32 *)__u.__c), "r"(__p) \ : "memory"); \ break; \ case 8: \ if (IS_ENABLED(CONFIG_64BIT)) \ asm volatile("std,ma %0,0(%1)" \ : : "r"(*(__u64 *)__u.__c), "r"(__p) \ : "memory"); \ break; \ } \ } while (0) #define __smp_load_acquire(p) \ ({ \ union { typeof(*p) __val; char __c[1]; } __u; \ typeof(p) __p = (p); \ compiletime_assert_atomic_type(*p); \ switch (sizeof(*p)) { \ case 1: \ asm volatile("ldb,ma 0(%1),%0" \ : "=r"(*(__u8 *)__u.__c) : "r"(__p) \ : "memory"); \ break; \ case 2: \ asm volatile("ldh,ma 0(%1),%0" \ : "=r"(*(__u16 *)__u.__c) : "r"(__p) \ : "memory"); \ break; \ case 4: \ asm volatile("ldw,ma 0(%1),%0" \ : "=r"(*(__u32 *)__u.__c) : "r"(__p) \ : "memory"); \ break; \ case 8: \ if (IS_ENABLED(CONFIG_64BIT)) \ asm volatile("ldd,ma 0(%1),%0" \ : "=r"(*(__u64 *)__u.__c) : "r"(__p) \ : "memory"); \ break; \ } \ __u.__val; \ }) #include <asm-generic/barrier.h> #endif /* !__ASSEMBLY__ */ #endif /* __ASM_BARRIER_H */ usr/src/linux-headers-5.15.0-142/arch/sh/include/asm/barrier.h 0000644 00000002737 15030545672 0017315 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima * Copyright (C) 2002 Paul Mundt */ #ifndef __ASM_SH_BARRIER_H #define __ASM_SH_BARRIER_H #if defined(CONFIG_CPU_SH4A) #include <asm/cache_insns.h> #endif /* * A brief note on ctrl_barrier(), the control register write barrier. * * Legacy SH cores typically require a sequence of 8 nops after * modification of a control register in order for the changes to take * effect. On newer cores (like the sh4a and sh5) this is accomplished * with icbi. * * Also note that on sh4a in the icbi case we can forego a synco for the * write barrier, as it's not necessary for control registers. * * Historically we have only done this type of barrier for the MMUCR, but * it's also necessary for the CCR, so we make it generic here instead. */ #if defined(CONFIG_CPU_SH4A) #define mb() __asm__ __volatile__ ("synco": : :"memory") #define rmb() mb() #define wmb() mb() #define ctrl_barrier() __icbi(PAGE_OFFSET) #else #if defined(CONFIG_CPU_J2) && defined(CONFIG_SMP) #define __smp_mb() do { int tmp = 0; __asm__ __volatile__ ("cas.l %0,%0,@%1" : "+r"(tmp) : "z"(&tmp) : "memory", "t"); } while(0) #define __smp_rmb() __smp_mb() #define __smp_wmb() __smp_mb() #endif #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") #endif #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) #include <asm-generic/barrier.h> #endif /* __ASM_SH_BARRIER_H */ usr/src/linux-headers-5.15.0-142/arch/parisc/include/asm/barrier.h 0000644 00000005003 15030574664 0020154 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H #include <asm/alternative.h> #ifndef __ASSEMBLY__ /* The synchronize caches instruction executes as a nop on systems in which all memory references are performed in order. */ #define synchronize_caches() asm volatile("sync" \ ALTERNATIVE(ALT_COND_NO_SMP, INSN_NOP) \ : : : "memory") #if defined(CONFIG_SMP) #define mb() do { synchronize_caches(); } while (0) #define rmb() mb() #define wmb() mb() #define dma_rmb() mb() #define dma_wmb() mb() #else #define mb() barrier() #define rmb() barrier() #define wmb() barrier() #define dma_rmb() barrier() #define dma_wmb() barrier() #endif #define __smp_mb() mb() #define __smp_rmb() mb() #define __smp_wmb() mb() #define __smp_store_release(p, v) \ do { \ typeof(p) __p = (p); \ union { typeof(*p) __val; char __c[1]; } __u = \ { .__val = (__force typeof(*p)) (v) }; \ compiletime_assert_atomic_type(*p); \ switch (sizeof(*p)) { \ case 1: \ asm volatile("stb,ma %0,0(%1)" \ : : "r"(*(__u8 *)__u.__c), "r"(__p) \ : "memory"); \ break; \ case 2: \ asm volatile("sth,ma %0,0(%1)" \ : : "r"(*(__u16 *)__u.__c), "r"(__p) \ : "memory"); \ break; \ case 4: \ asm volatile("stw,ma %0,0(%1)" \ : : "r"(*(__u32 *)__u.__c), "r"(__p) \ : "memory"); \ break; \ case 8: \ if (IS_ENABLED(CONFIG_64BIT)) \ asm volatile("std,ma %0,0(%1)" \ : : "r"(*(__u64 *)__u.__c), "r"(__p) \ : "memory"); \ break; \ } \ } while (0) #define __smp_load_acquire(p) \ ({ \ union { typeof(*p) __val; char __c[1]; } __u; \ typeof(p) __p = (p); \ compiletime_assert_atomic_type(*p); \ switch (sizeof(*p)) { \ case 1: \ asm volatile("ldb,ma 0(%1),%0" \ : "=r"(*(__u8 *)__u.__c) : "r"(__p) \ : "memory"); \ break; \ case 2: \ asm volatile("ldh,ma 0(%1),%0" \ : "=r"(*(__u16 *)__u.__c) : "r"(__p) \ : "memory"); \ break; \ case 4: \ asm volatile("ldw,ma 0(%1),%0" \ : "=r"(*(__u32 *)__u.__c) : "r"(__p) \ : "memory"); \ break; \ case 8: \ if (IS_ENABLED(CONFIG_64BIT)) \ asm volatile("ldd,ma 0(%1),%0" \ : "=r"(*(__u64 *)__u.__c) : "r"(__p) \ : "memory"); \ break; \ } \ __u.__val; \ }) #include <asm-generic/barrier.h> #endif /* !__ASSEMBLY__ */ #endif /* __ASM_BARRIER_H */
| ver. 1.4 |
Github
|
.
| PHP 8.2.28 | Generation time: 0.02 |
proxy
|
phpinfo
|
Settings