21 #ifndef __TBB_atomic_H 22 #define __TBB_atomic_H 27 #define __TBB_LONG_LONG __int64 29 #define __TBB_LONG_LONG long long 32 #include "tbb_machine.h" 34 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) 36 #pragma warning (push) 37 #pragma warning (disable: 4244 4267 4512) 57 #if __TBB_ATTRIBUTE_ALIGNED_PRESENT 58 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a))); 59 #elif __TBB_DECLSPEC_ALIGN_PRESENT 60 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f; 62 #error Do not know syntax for forcing alignment. 69 struct atomic_rep<1> {
73 struct atomic_rep<2> {
77 struct atomic_rep<4> {
78 #if _MSC_VER && !_WIN64 80 typedef intptr_t word;
85 #if __TBB_64BIT_ATOMICS 87 struct atomic_rep<8> {
92 template<
typename value_type,
size_t size>
93 struct aligned_storage;
96 #if __TBB_ATOMIC_CTORS 97 #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \ 98 template<typename value_type> \ 99 struct aligned_storage<value_type,S> { \ 100 __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \ 101 aligned_storage() = default ; \ 102 constexpr aligned_storage(value_type value):my_value(value){} \ 106 #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \ 107 template<typename value_type> \ 108 struct aligned_storage<value_type,S> { \ 109 __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \ 114 template<
typename value_type>
115 struct aligned_storage<value_type,1> {
117 #if __TBB_ATOMIC_CTORS 118 aligned_storage() = default ;
119 constexpr aligned_storage(value_type value):my_value(value){}
123 ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(2)
124 ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(4)
125 #if __TBB_64BIT_ATOMICS 126 ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(8)
129 template<
size_t Size, memory_semantics M>
130 struct atomic_traits;
132 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \ 133 template<> struct atomic_traits<S,M> { \ 134 typedef atomic_rep<S>::word word; \ 135 inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \ 136 return __TBB_machine_cmpswp##S##M(location,new_value,comparand); \ 138 inline static word fetch_and_add( volatile void* location, word addend ) { \ 139 return __TBB_machine_fetchadd##S##M(location,addend); \ 141 inline static word fetch_and_store( volatile void* location, word value ) { \ 142 return __TBB_machine_fetchstore##S##M(location,value); \ 146 #define __TBB_DECL_ATOMIC_PRIMITIVES(S) \ 147 template<memory_semantics M> \ 148 struct atomic_traits<S,M> { \ 149 typedef atomic_rep<S>::word word; \ 150 inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \ 151 return __TBB_machine_cmpswp##S(location,new_value,comparand); \ 153 inline static word fetch_and_add( volatile void* location, word addend ) { \ 154 return __TBB_machine_fetchadd##S(location,addend); \ 156 inline static word fetch_and_store( volatile void* location, word value ) { \ 157 return __TBB_machine_fetchstore##S(location,value); \ 161 template<memory_semantics M>
162 struct atomic_load_store_traits;
164 #define __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(M) \ 165 template<> struct atomic_load_store_traits<M> { \ 166 template <typename T> \ 167 inline static T load( const volatile T& location ) { \ 168 return __TBB_load_##M( location ); \ 170 template <typename T> \ 171 inline static void store( volatile T& location, T value ) { \ 172 __TBB_store_##M( location, value ); \ 176 #if __TBB_USE_FENCED_ATOMICS 177 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,
full_fence)
178 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,
full_fence)
179 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,
full_fence)
180 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,
acquire)
181 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,
acquire)
182 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,
acquire)
183 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,
release)
184 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,
release)
185 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,
release)
186 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,
relaxed)
187 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,
relaxed)
188 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,
relaxed)
189 #if __TBB_64BIT_ATOMICS 190 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,
full_fence)
191 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,
acquire)
192 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,
release)
193 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,
relaxed)
196 __TBB_DECL_ATOMIC_PRIMITIVES(1)
197 __TBB_DECL_ATOMIC_PRIMITIVES(2)
198 __TBB_DECL_ATOMIC_PRIMITIVES(4)
199 #if __TBB_64BIT_ATOMICS 200 __TBB_DECL_ATOMIC_PRIMITIVES(8)
204 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(
full_fence);
205 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(
acquire);
206 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(
release);
207 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(
relaxed);
212 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1))) 220 aligned_storage<T,sizeof(T)> my_storage;
224 template<
typename value_type>
226 typedef typename atomic_rep<sizeof(value_type)>::word bits_type;
228 converter(value_type a_value) : value(a_value) {}
233 template<
typename value_t>
234 static typename converter<value_t>::bits_type to_bits(value_t value){
235 return converter<value_t>(value).bits;
237 template<
typename value_t>
238 static value_t to_value(
typename converter<value_t>::bits_type bits){
239 converter<value_t> u;
244 template<
typename value_t>
247 template<
typename value_t>
248 union ptr_converter<value_t *> {
250 ptr_converter(value_t* a_value) : value(a_value) {}
256 template<
typename value_t>
257 static typename converter<value_t>::bits_type & to_bits_ref(value_t& value){
261 return *(
typename converter<value_t>::bits_type*)ptr_converter<value_t*>(&value).bits;
263 return *(
typename converter<value_t>::bits_type*)(&value);
269 typedef T value_type;
271 #if __TBB_ATOMIC_CTORS 272 atomic_impl() = default ;
273 constexpr atomic_impl(value_type value):my_storage(value){}
275 template<memory_semantics M>
276 value_type fetch_and_store( value_type value ) {
277 return to_value<value_type>(
278 internal::atomic_traits<sizeof(value_type),M>::fetch_and_store( &my_storage.my_value, to_bits(value) )
282 value_type fetch_and_store( value_type value ) {
283 return fetch_and_store<full_fence>(value);
286 template<memory_semantics M>
287 value_type compare_and_swap( value_type value, value_type comparand ) {
288 return to_value<value_type>(
289 internal::atomic_traits<sizeof(value_type),M>::compare_and_swap( &my_storage.my_value, to_bits(value), to_bits(comparand) )
293 value_type compare_and_swap( value_type value, value_type comparand ) {
294 return compare_and_swap<full_fence>(value,comparand);
297 operator value_type()
const volatile {
298 return to_value<value_type>(
299 __TBB_load_with_acquire( to_bits_ref(my_storage.my_value) )
303 template<memory_semantics M>
304 value_type load ()
const {
305 return to_value<value_type>(
306 internal::atomic_load_store_traits<M>::load( to_bits_ref(my_storage.my_value) )
310 value_type load ()
const {
311 return load<acquire>();
314 template<memory_semantics M>
315 void store ( value_type value ) {
316 internal::atomic_load_store_traits<M>::store( to_bits_ref(my_storage.my_value), to_bits(value));
319 void store ( value_type value ) {
320 store<release>( value );
324 value_type store_with_release( value_type rhs ) {
326 __TBB_store_with_release( to_bits_ref(my_storage.my_value), to_bits(rhs) );
335 template<
typename I,
typename D,
typename StepType>
336 struct atomic_impl_with_arithmetic: atomic_impl<I> {
338 typedef I value_type;
339 #if __TBB_ATOMIC_CTORS 340 atomic_impl_with_arithmetic() = default ;
341 constexpr atomic_impl_with_arithmetic(value_type value): atomic_impl<I>(value){}
343 template<memory_semantics M>
344 value_type fetch_and_add( D addend ) {
345 return value_type(internal::atomic_traits<
sizeof(value_type),M>::fetch_and_add( &this->my_storage.my_value, addend*
sizeof(StepType) ));
348 value_type fetch_and_add( D addend ) {
349 return fetch_and_add<full_fence>(addend);
352 template<memory_semantics M>
353 value_type fetch_and_increment() {
354 return fetch_and_add<M>(1);
357 value_type fetch_and_increment() {
358 return fetch_and_add(1);
361 template<memory_semantics M>
362 value_type fetch_and_decrement() {
363 return fetch_and_add<M>(__TBB_MINUS_ONE(D));
366 value_type fetch_and_decrement() {
367 return fetch_and_add(__TBB_MINUS_ONE(D));
371 value_type operator+=( D value ) {
372 return fetch_and_add(value)+value;
375 value_type operator-=( D value ) {
378 return operator+=(D(0)-value);
381 value_type operator++() {
382 return fetch_and_add(1)+1;
385 value_type operator--() {
386 return fetch_and_add(__TBB_MINUS_ONE(D))-1;
389 value_type operator++(
int) {
390 return fetch_and_add(1);
393 value_type operator--(
int) {
394 return fetch_and_add(__TBB_MINUS_ONE(D));
405 struct atomic: internal::atomic_impl<T> {
406 #if __TBB_ATOMIC_CTORS 408 constexpr
atomic(T arg): internal::atomic_impl<T>(arg) {}
410 T operator=( T rhs ) {
412 return this->store_with_release(rhs);
414 atomic<T>& operator=(
const atomic<T>& rhs ) {this->store_with_release(rhs);
return *
this;}
417 #if __TBB_ATOMIC_CTORS 418 #define __TBB_DECL_ATOMIC(T) \ 419 template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \ 420 atomic() = default; \ 421 constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,char>(arg) {} \ 423 T operator=( T rhs ) {return store_with_release(rhs);} \ 424 atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \ 427 #define __TBB_DECL_ATOMIC(T) \ 428 template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \ 429 T operator=( T rhs ) {return store_with_release(rhs);} \ 430 atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \ 434 #if __TBB_64BIT_ATOMICS 436 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
437 __TBB_DECL_ATOMIC(
unsigned __TBB_LONG_LONG)
441 __TBB_DECL_ATOMIC(
long)
442 __TBB_DECL_ATOMIC(
unsigned long)
444 #if _MSC_VER && !_WIN64 445 #if __TBB_ATOMIC_CTORS 451 #define __TBB_DECL_ATOMIC_ALT(T,U) \ 452 template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \ 453 atomic() = default ; \ 454 constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,char>(arg) {} \ 455 T operator=( U rhs ) {return store_with_release(T(rhs));} \ 456 atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \ 459 #define __TBB_DECL_ATOMIC_ALT(T,U) \ 460 template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \ 461 T operator=( U rhs ) {return store_with_release(T(rhs));} \ 462 atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \ 465 __TBB_DECL_ATOMIC_ALT(
unsigned,
size_t)
466 __TBB_DECL_ATOMIC_ALT(
int,ptrdiff_t)
468 __TBB_DECL_ATOMIC(
unsigned)
469 __TBB_DECL_ATOMIC(
int)
472 __TBB_DECL_ATOMIC(
unsigned short)
473 __TBB_DECL_ATOMIC(
short)
474 __TBB_DECL_ATOMIC(
char)
475 __TBB_DECL_ATOMIC(
signed char)
476 __TBB_DECL_ATOMIC(
unsigned char)
478 #if !_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED) 479 __TBB_DECL_ATOMIC(
wchar_t)
483 template<
typename T>
struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
484 #if __TBB_ATOMIC_CTORS 486 constexpr
atomic(T* arg): internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T>(arg) {}
488 T* operator=( T* rhs ) {
490 return this->store_with_release(rhs);
493 this->store_with_release(rhs);
return *
this;
495 T* operator->()
const {
501 template<>
struct atomic<void*>: internal::atomic_impl<void*> {
502 #if __TBB_ATOMIC_CTORS 504 constexpr
atomic(
void* arg): internal::atomic_impl<void*>(arg) {}
506 void* operator=(
void* rhs ) {
508 return this->store_with_release(rhs);
511 this->store_with_release(rhs);
return *
this;
518 template <memory_semantics M,
typename T>
519 T load (
const atomic<T>& a ) {
return a.template load<M>(); }
521 template <memory_semantics M,
typename T>
522 void store (
atomic<T>& a, T value ) { a.template store<M>(value); }
524 namespace interface6{
533 using interface6::make_atomic;
536 template<memory_semantics M,
typename T >
538 T tmp = load<M>(lhs);
539 store<M>(lhs,load<M>(rhs));
552 #if _MSC_VER && !__INTEL_COMPILER 553 #pragma warning (pop) 554 #endif // warnings 4244, 4267 are back Acquire.
Definition: atomic.h:47
Specialization for atomic<T*> with arithmetic and operator->.
Definition: atomic.h:483
No ordering.
Definition: atomic.h:51
memory_semantics
Specifies memory semantics.
Definition: atomic.h:43
Sequential consistency.
Definition: atomic.h:45
Primary template for atomic.
Definition: atomic.h:405
Definition: _flow_graph_async_msg_impl.h:32
Release.
Definition: atomic.h:49
The namespace tbb contains all components of the library.
Definition: parallel_for.h:44
Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
Definition: atomic.h:501