21 #if !defined(__TBB_machine_H) || defined(__TBB_machine_icc_generic_H) 22 #error Do not #include this internal file directly; use public TBB headers instead. 25 #if ! __TBB_ICC_BUILTIN_ATOMICS_PRESENT 26 #error "Intel C++ Compiler of at least 12.0 version is needed to use ICC intrinsics port" 29 #define __TBB_machine_icc_generic_H 33 #include "msvc_ia32_common.h" 35 #include "gcc_ia32_common.h" 42 #define __TBB_WORDSIZE 4 44 #define __TBB_WORDSIZE 8 46 #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE 49 #ifndef __TBB_compiler_fence 52 #pragma intrinsic(_ReadWriteBarrier) 53 #define __TBB_compiler_fence() _ReadWriteBarrier() 55 #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") 59 #ifndef __TBB_full_memory_fence 62 #pragma intrinsic(_mm_mfence) 63 #define __TBB_full_memory_fence() _mm_mfence() 65 #define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") 69 #ifndef __TBB_control_consistency_helper 70 #define __TBB_control_consistency_helper() __TBB_compiler_fence() 78 typedef enum memory_order {
79 memory_order_relaxed, memory_order_consume, memory_order_acquire,
80 memory_order_release, memory_order_acq_rel, memory_order_seq_cst
83 namespace icc_intrinsics_port {
85 T convert_argument(T value){
92 void* convert_argument(T* value){
97 template <
typename T,
size_t S>
99 static T load_with_acquire (
const volatile T& location ) {
100 return __atomic_load_explicit(&location, memory_order_acquire);
102 static void store_with_release (
volatile T &location, T value ) {
103 __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_release);
107 template <
typename T,
size_t S>
109 static inline T load (
const T& location ) {
110 return __atomic_load_explicit(&location, memory_order_relaxed);
112 static inline void store ( T& location, T value ) {
113 __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_relaxed);
117 template <
typename T,
size_t S>
119 static T load (
const volatile T& location ) {
120 return __atomic_load_explicit(&location, memory_order_seq_cst);
123 static void store (
volatile T &location, T value ) {
124 __atomic_store_explicit(&location, value, memory_order_seq_cst);
130 namespace tbb{
namespace internal {
namespace icc_intrinsics_port{
131 typedef enum memory_order_map {
132 relaxed = memory_order_relaxed,
133 acquire = memory_order_acquire,
134 release = memory_order_release,
139 #define __TBB_MACHINE_DEFINE_ATOMICS(S,T,M) \ 140 inline T __TBB_machine_cmpswp##S##M( volatile void *ptr, T value, T comparand ) { \ 141 __atomic_compare_exchange_strong_explicit( \ 145 , tbb::internal::icc_intrinsics_port::M \ 146 , tbb::internal::icc_intrinsics_port::M); \ 150 inline T __TBB_machine_fetchstore##S##M(volatile void *ptr, T value) { \ 151 return __atomic_exchange_explicit((T*)ptr, value, tbb::internal::icc_intrinsics_port::M); \ 154 inline T __TBB_machine_fetchadd##S##M(volatile void *ptr, T value) { \ 155 return __atomic_fetch_add_explicit((T*)ptr, value, tbb::internal::icc_intrinsics_port::M); \ 158 __TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t,
full_fence)
159 __TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t,
acquire)
160 __TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t,
release)
161 __TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t,
relaxed)
163 __TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t,
full_fence)
164 __TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t,
acquire)
165 __TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t,
release)
166 __TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t,
relaxed)
168 __TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t,
full_fence)
169 __TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t,
acquire)
170 __TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t,
release)
171 __TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t,
relaxed)
173 __TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t,
full_fence)
174 __TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t,
acquire)
175 __TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t,
release)
176 __TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t,
relaxed)
179 #undef __TBB_MACHINE_DEFINE_ATOMICS 181 #define __TBB_USE_FENCED_ATOMICS 1 184 #if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 185 __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(
full_fence)
186 __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(
full_fence)
188 __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(
acquire)
189 __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(
release)
191 __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(
relaxed)
192 __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(
relaxed)
194 template <
typename T>
196 static T load_with_acquire (
const volatile T& location ) {
197 if( tbb::internal::is_aligned(&location,8)) {
198 return __atomic_load_explicit(&location, memory_order_acquire);
200 return __TBB_machine_generic_load8acquire(&location);
203 static void store_with_release (
volatile T &location, T value ) {
204 if( tbb::internal::is_aligned(&location,8)) {
205 __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_release);
207 return __TBB_machine_generic_store8release(&location,value);
212 template <
typename T>
214 static T load(
const volatile T& location ) {
215 if( tbb::internal::is_aligned(&location,8)) {
216 return __atomic_load_explicit(&location, memory_order_relaxed);
218 return __TBB_machine_generic_load8relaxed(&location);
221 static void store(
volatile T &location, T value ) {
222 if( tbb::internal::is_aligned(&location,8)) {
223 __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_relaxed);
225 return __TBB_machine_generic_store8relaxed(&location,value);
230 template <
typename T >
232 static T load (
const volatile T& location ) {
233 if( tbb::internal::is_aligned(&location,8)) {
234 return __atomic_load_explicit(&location, memory_order_seq_cst);
236 return __TBB_machine_generic_load8full_fence(&location);
241 static void store (
volatile T &location, T value ) {
242 if( tbb::internal::is_aligned(&location,8)) {
243 __atomic_store_explicit(&location, value, memory_order_seq_cst);
245 return __TBB_machine_generic_store8full_fence(&location,value);
253 template <
typename T>
254 inline void __TBB_machine_OR( T *operand, T addend ) {
255 __atomic_fetch_or_explicit(operand, addend, tbb::internal::memory_order_seq_cst);
258 template <
typename T>
259 inline void __TBB_machine_AND( T *operand, T addend ) {
260 __atomic_fetch_and_explicit(operand, addend, tbb::internal::memory_order_seq_cst);
Definition: icc_generic.h:118
Definition: gcc_armv7.h:186
Acquire.
Definition: atomic.h:47
Definition: icc_generic.h:98
No ordering.
Definition: atomic.h:51
Sequential consistency.
Definition: atomic.h:45
Definition: _flow_graph_async_msg_impl.h:32
Release.
Definition: atomic.h:49
The namespace tbb contains all components of the library.
Definition: parallel_for.h:44