21 #if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_ia32_H) 22 #error Do not #include this internal file directly; use public TBB headers instead. 25 #define __TBB_machine_linux_ia32_H 28 #include "gcc_ia32_common.h" 30 #define __TBB_WORDSIZE 4 31 #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE 33 #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") 34 #define __TBB_control_consistency_helper() __TBB_compiler_fence() 35 #define __TBB_acquire_consistency_helper() __TBB_compiler_fence() 36 #define __TBB_release_consistency_helper() __TBB_compiler_fence() 37 #define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") 39 #if __TBB_ICC_ASM_VOLATILE_BROKEN 40 #define __TBB_VOLATILE 42 #define __TBB_VOLATILE volatile 45 #define __TBB_MACHINE_DEFINE_ATOMICS(S,T,X,R) \ 46 static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \ 50 __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \ 51 : "=a"(result), "=m"(*(__TBB_VOLATILE T*)ptr) \ 52 : "q"(value), "0"(comparand), "m"(*(__TBB_VOLATILE T*)ptr) \ 57 static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \ 60 __asm__ __volatile__("lock\nxadd" X " %0,%1" \ 61 : R (result), "=m"(*(__TBB_VOLATILE T*)ptr) \ 62 : "0"(addend), "m"(*(__TBB_VOLATILE T*)ptr) \ 67 static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \ 70 __asm__ __volatile__("lock\nxchg" X " %0,%1" \ 71 : R (result), "=m"(*(__TBB_VOLATILE T*)ptr) \ 72 : "0"(value), "m"(*(__TBB_VOLATILE T*)ptr) \ 77 __TBB_MACHINE_DEFINE_ATOMICS(1,int8_t,
"",
"=q")
78 __TBB_MACHINE_DEFINE_ATOMICS(2,int16_t,"","=r")
79 __TBB_MACHINE_DEFINE_ATOMICS(4,int32_t,"l","=r")
82 #pragma warning( push ) 84 #pragma warning( disable: 998 ) 87 #if __TBB_GCC_CAS8_BUILTIN_INLINING_BROKEN 88 #define __TBB_IA32_CAS8_NOINLINE __attribute__ ((noinline)) 90 #define __TBB_IA32_CAS8_NOINLINE 93 static inline __TBB_IA32_CAS8_NOINLINE int64_t __TBB_machine_cmpswp8 (
volatile void *ptr, int64_t value, int64_t comparand ) {
95 #if (__TBB_GCC_BUILTIN_ATOMICS_PRESENT || (__TBB_GCC_VERSION >= 40102)) && !__TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN 96 return __sync_val_compare_and_swap( reinterpret_cast<volatile int64_t*>(ptr), comparand, value );
109 __asm__ __volatile__ (
113 "lock\n\t cmpxchg8b %1\n\t" 115 "lock\n\t cmpxchg8b (%3)\n\t" 119 ,
"=m"(*(__TBB_VOLATILE int64_t *)ptr)
122 :
"m"(*(__TBB_VOLATILE int64_t *)ptr)
127 ,
"m"(i32[0]),
"c"(i32[1])
134 __asm__ __volatile__ (
135 "lock\n\t cmpxchg8b %1\n\t" 136 :
"=A"(result),
"=m"(*(__TBB_VOLATILE int64_t *)ptr)
137 :
"m"(*(__TBB_VOLATILE int64_t *)ptr)
139 ,
"b"(i32[0]),
"c"(i32[1])
147 #undef __TBB_IA32_CAS8_NOINLINE 150 #pragma warning( pop ) 151 #endif // warning 998 is back 153 static inline void __TBB_machine_or(
volatile void *ptr, uint32_t addend ) {
154 __asm__ __volatile__(
"lock\norl %1,%0" :
"=m"(*(__TBB_VOLATILE uint32_t *)ptr) :
"r"(addend),
"m"(*(__TBB_VOLATILE uint32_t *)ptr) :
"memory");
157 static inline void __TBB_machine_and(
volatile void *ptr, uint32_t addend ) {
158 __asm__ __volatile__(
"lock\nandl %1,%0" :
"=m"(*(__TBB_VOLATILE uint32_t *)ptr) :
"r"(addend),
"m"(*(__TBB_VOLATILE uint32_t *)ptr) :
"memory");
166 #define __TBB_fildq "fildll" 167 #define __TBB_fistpq "fistpll" 169 #define __TBB_fildq "fildq" 170 #define __TBB_fistpq "fistpq" 173 static inline int64_t __TBB_machine_aligned_load8 (
const volatile void *ptr) {
174 __TBB_ASSERT(tbb::internal::is_aligned(ptr,8),
"__TBB_machine_aligned_load8 should be used with 8 byte aligned locations only \n");
176 __asm__ __volatile__ ( __TBB_fildq
" %1\n\t" 177 __TBB_fistpq
" %0" :
"=m"(result) :
"m"(*(
const __TBB_VOLATILE uint64_t*)ptr) :
"memory" );
181 static inline void __TBB_machine_aligned_store8 (
volatile void *ptr, int64_t value ) {
182 __TBB_ASSERT(tbb::internal::is_aligned(ptr,8),
"__TBB_machine_aligned_store8 should be used with 8 byte aligned locations only \n");
184 __asm__ __volatile__ ( __TBB_fildq
" %1\n\t" 185 __TBB_fistpq
" %0" :
"=m"(*(__TBB_VOLATILE int64_t*)ptr) :
"m"(value) :
"memory" );
188 static inline int64_t __TBB_machine_load8 (
const volatile void *ptr) {
189 #if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 190 if( tbb::internal::is_aligned(ptr,8)) {
192 return __TBB_machine_aligned_load8(ptr);
193 #if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 196 return __TBB_machine_cmpswp8(const_cast<void*>(ptr),0,0);
203 extern "C" void __TBB_machine_store8_slow(
volatile void *ptr, int64_t value );
204 extern "C" void __TBB_machine_store8_slow_perf_warning(
volatile void *ptr );
206 static inline void __TBB_machine_store8(
volatile void *ptr, int64_t value) {
207 #if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 208 if( tbb::internal::is_aligned(ptr,8)) {
210 __TBB_machine_aligned_store8(ptr,value);
211 #if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 214 #if TBB_USE_PERFORMANCE_WARNINGS 215 __TBB_machine_store8_slow_perf_warning(ptr);
217 __TBB_machine_store8_slow(ptr,value);
223 #define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) 224 #define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) 226 #define __TBB_USE_GENERIC_DWORD_FETCH_ADD 1 227 #define __TBB_USE_GENERIC_DWORD_FETCH_STORE 1 228 #define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 229 #define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 230 #define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 231 #define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 */
Definition: material.h:665