BRE12
atomic.h
1 /*
2  Copyright 2005-2016 Intel Corporation. All Rights Reserved.
3 
4  This file is part of Threading Building Blocks. Threading Building Blocks is free software;
5  you can redistribute it and/or modify it under the terms of the GNU General Public License
6  version 2 as published by the Free Software Foundation. Threading Building Blocks is
7  distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8  implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9  See the GNU General Public License for more details. You should have received a copy of
10  the GNU General Public License along with Threading Building Blocks; if not, write to the
11  Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
12 
13  As a special exception, you may use this file as part of a free software library without
14  restriction. Specifically, if other files instantiate templates or use macros or inline
15  functions from this file, or you compile this file and link it with other files to produce
16  an executable, this file does not by itself cause the resulting executable to be covered
17  by the GNU General Public License. This exception does not however invalidate any other
18  reasons why the executable file might be covered by the GNU General Public License.
19 */
20 
21 #ifndef __TBB_atomic_H
22 #define __TBB_atomic_H
23 
24 #include <cstddef>
25 
26 #if _MSC_VER
27 #define __TBB_LONG_LONG __int64
28 #else
29 #define __TBB_LONG_LONG long long
30 #endif /* _MSC_VER */
31 
32 #include "tbb_machine.h"
33 
34 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
35  // Workaround for overzealous compiler warnings
36  #pragma warning (push)
37  #pragma warning (disable: 4244 4267 4512)
38 #endif
39 
40 namespace tbb {
41 
52 };
53 
55 namespace internal {
56 
57 #if __TBB_ATTRIBUTE_ALIGNED_PRESENT
58  #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a)));
59 #elif __TBB_DECLSPEC_ALIGN_PRESENT
60  #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
61 #else
62  #error Do not know syntax for forcing alignment.
63 #endif
64 
65 template<size_t S>
66 struct atomic_rep; // Primary template declared, but never defined.
67 
68 template<>
69 struct atomic_rep<1> { // Specialization
70  typedef int8_t word;
71 };
72 template<>
73 struct atomic_rep<2> { // Specialization
74  typedef int16_t word;
75 };
76 template<>
77 struct atomic_rep<4> { // Specialization
78 #if _MSC_VER && !_WIN64
79  // Work-around that avoids spurious /Wp64 warnings
80  typedef intptr_t word;
81 #else
82  typedef int32_t word;
83 #endif
84 };
85 #if __TBB_64BIT_ATOMICS
86 template<>
87 struct atomic_rep<8> { // Specialization
88  typedef int64_t word;
89 };
90 #endif
91 
92 template<typename value_type, size_t size>
93 struct aligned_storage;
94 
95 //the specializations are needed to please MSVC syntax of __declspec(align()) which accept _literal_ constants only
96 #if __TBB_ATOMIC_CTORS
97  #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \
98  template<typename value_type> \
99  struct aligned_storage<value_type,S> { \
100  __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \
101  aligned_storage() = default ; \
102  constexpr aligned_storage(value_type value):my_value(value){} \
103  }; \
104 
105 #else
106  #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \
107  template<typename value_type> \
108  struct aligned_storage<value_type,S> { \
109  __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \
110  }; \
111 
112 #endif
113 
114 template<typename value_type>
115 struct aligned_storage<value_type,1> {
116  value_type my_value;
117 #if __TBB_ATOMIC_CTORS
118  aligned_storage() = default ;
119  constexpr aligned_storage(value_type value):my_value(value){}
120 #endif
121 };
122 
123 ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(2)
124 ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(4)
125 #if __TBB_64BIT_ATOMICS
126 ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(8)
127 #endif
128 
129 template<size_t Size, memory_semantics M>
130 struct atomic_traits; // Primary template declared, but not defined.
131 
132 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \
133  template<> struct atomic_traits<S,M> { \
134  typedef atomic_rep<S>::word word; \
135  inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \
136  return __TBB_machine_cmpswp##S##M(location,new_value,comparand); \
137  } \
138  inline static word fetch_and_add( volatile void* location, word addend ) { \
139  return __TBB_machine_fetchadd##S##M(location,addend); \
140  } \
141  inline static word fetch_and_store( volatile void* location, word value ) { \
142  return __TBB_machine_fetchstore##S##M(location,value); \
143  } \
144  };
145 
146 #define __TBB_DECL_ATOMIC_PRIMITIVES(S) \
147  template<memory_semantics M> \
148  struct atomic_traits<S,M> { \
149  typedef atomic_rep<S>::word word; \
150  inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \
151  return __TBB_machine_cmpswp##S(location,new_value,comparand); \
152  } \
153  inline static word fetch_and_add( volatile void* location, word addend ) { \
154  return __TBB_machine_fetchadd##S(location,addend); \
155  } \
156  inline static word fetch_and_store( volatile void* location, word value ) { \
157  return __TBB_machine_fetchstore##S(location,value); \
158  } \
159  };
160 
161 template<memory_semantics M>
162 struct atomic_load_store_traits; // Primary template declaration
163 
164 #define __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(M) \
165  template<> struct atomic_load_store_traits<M> { \
166  template <typename T> \
167  inline static T load( const volatile T& location ) { \
168  return __TBB_load_##M( location ); \
169  } \
170  template <typename T> \
171  inline static void store( volatile T& location, T value ) { \
172  __TBB_store_##M( location, value ); \
173  } \
174  }
175 
176 #if __TBB_USE_FENCED_ATOMICS
177 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,full_fence)
178 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,full_fence)
179 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,full_fence)
180 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)
181 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)
182 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)
183 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)
184 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)
185 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)
186 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,relaxed)
187 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,relaxed)
188 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,relaxed)
189 #if __TBB_64BIT_ATOMICS
190 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,full_fence)
191 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)
192 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)
193 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,relaxed)
194 #endif
195 #else /* !__TBB_USE_FENCED_ATOMICS */
196 __TBB_DECL_ATOMIC_PRIMITIVES(1)
197 __TBB_DECL_ATOMIC_PRIMITIVES(2)
198 __TBB_DECL_ATOMIC_PRIMITIVES(4)
199 #if __TBB_64BIT_ATOMICS
200 __TBB_DECL_ATOMIC_PRIMITIVES(8)
201 #endif
202 #endif /* !__TBB_USE_FENCED_ATOMICS */
203 
204 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(full_fence);
205 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(acquire);
206 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(release);
207 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(relaxed);
208 
210 
212 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
213 
215 
217 template<typename T>
218 struct atomic_impl {
219 protected:
220  aligned_storage<T,sizeof(T)> my_storage;
221 private:
222  //TODO: rechecks on recent versions of gcc if union is still the _only_ way to do a conversion without warnings
224  template<typename value_type>
225  union converter {
226  typedef typename atomic_rep<sizeof(value_type)>::word bits_type;
227  converter(){}
228  converter(value_type a_value) : value(a_value) {}
229  value_type value;
230  bits_type bits;
231  };
232 
233  template<typename value_t>
234  static typename converter<value_t>::bits_type to_bits(value_t value){
235  return converter<value_t>(value).bits;
236  }
237  template<typename value_t>
238  static value_t to_value(typename converter<value_t>::bits_type bits){
239  converter<value_t> u;
240  u.bits = bits;
241  return u.value;
242  }
243 
244  template<typename value_t>
245  union ptr_converter; //Primary template declared, but never defined.
246 
247  template<typename value_t>
248  union ptr_converter<value_t *> {
249  ptr_converter(){}
250  ptr_converter(value_t* a_value) : value(a_value) {}
251  value_t* value;
252  uintptr_t bits;
253  };
254  //TODO: check if making to_bits accepting reference (thus unifying it with to_bits_ref)
255  //does not hurt performance
256  template<typename value_t>
257  static typename converter<value_t>::bits_type & to_bits_ref(value_t& value){
258  //TODO: this #ifdef is temporary workaround, as union conversion seems to fail
259  //on suncc for 64 bit types for 32 bit target
260  #if !__SUNPRO_CC
261  return *(typename converter<value_t>::bits_type*)ptr_converter<value_t*>(&value).bits;
262  #else
263  return *(typename converter<value_t>::bits_type*)(&value);
264  #endif
265  }
266 
267 
268 public:
269  typedef T value_type;
270 
271 #if __TBB_ATOMIC_CTORS
272  atomic_impl() = default ;
273  constexpr atomic_impl(value_type value):my_storage(value){}
274 #endif
275  template<memory_semantics M>
276  value_type fetch_and_store( value_type value ) {
277  return to_value<value_type>(
278  internal::atomic_traits<sizeof(value_type),M>::fetch_and_store( &my_storage.my_value, to_bits(value) )
279  );
280  }
281 
282  value_type fetch_and_store( value_type value ) {
283  return fetch_and_store<full_fence>(value);
284  }
285 
286  template<memory_semantics M>
287  value_type compare_and_swap( value_type value, value_type comparand ) {
288  return to_value<value_type>(
289  internal::atomic_traits<sizeof(value_type),M>::compare_and_swap( &my_storage.my_value, to_bits(value), to_bits(comparand) )
290  );
291  }
292 
293  value_type compare_and_swap( value_type value, value_type comparand ) {
294  return compare_and_swap<full_fence>(value,comparand);
295  }
296 
297  operator value_type() const volatile { // volatile qualifier here for backwards compatibility
298  return to_value<value_type>(
299  __TBB_load_with_acquire( to_bits_ref(my_storage.my_value) )
300  );
301  }
302 
303  template<memory_semantics M>
304  value_type load () const {
305  return to_value<value_type>(
306  internal::atomic_load_store_traits<M>::load( to_bits_ref(my_storage.my_value) )
307  );
308  }
309 
310  value_type load () const {
311  return load<acquire>();
312  }
313 
314  template<memory_semantics M>
315  void store ( value_type value ) {
316  internal::atomic_load_store_traits<M>::store( to_bits_ref(my_storage.my_value), to_bits(value));
317  }
318 
319  void store ( value_type value ) {
320  store<release>( value );
321  }
322 
323 protected:
324  value_type store_with_release( value_type rhs ) {
325  //TODO: unify with store<release>
326  __TBB_store_with_release( to_bits_ref(my_storage.my_value), to_bits(rhs) );
327  return rhs;
328  }
329 };
330 
332 
335 template<typename I, typename D, typename StepType>
336 struct atomic_impl_with_arithmetic: atomic_impl<I> {
337 public:
338  typedef I value_type;
339 #if __TBB_ATOMIC_CTORS
340  atomic_impl_with_arithmetic() = default ;
341  constexpr atomic_impl_with_arithmetic(value_type value): atomic_impl<I>(value){}
342 #endif
343  template<memory_semantics M>
344  value_type fetch_and_add( D addend ) {
345  return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->my_storage.my_value, addend*sizeof(StepType) ));
346  }
347 
348  value_type fetch_and_add( D addend ) {
349  return fetch_and_add<full_fence>(addend);
350  }
351 
352  template<memory_semantics M>
353  value_type fetch_and_increment() {
354  return fetch_and_add<M>(1);
355  }
356 
357  value_type fetch_and_increment() {
358  return fetch_and_add(1);
359  }
360 
361  template<memory_semantics M>
362  value_type fetch_and_decrement() {
363  return fetch_and_add<M>(__TBB_MINUS_ONE(D));
364  }
365 
366  value_type fetch_and_decrement() {
367  return fetch_and_add(__TBB_MINUS_ONE(D));
368  }
369 
370 public:
371  value_type operator+=( D value ) {
372  return fetch_and_add(value)+value;
373  }
374 
375  value_type operator-=( D value ) {
376  // Additive inverse of value computed using binary minus,
377  // instead of unary minus, for sake of avoiding compiler warnings.
378  return operator+=(D(0)-value);
379  }
380 
381  value_type operator++() {
382  return fetch_and_add(1)+1;
383  }
384 
385  value_type operator--() {
386  return fetch_and_add(__TBB_MINUS_ONE(D))-1;
387  }
388 
389  value_type operator++(int) {
390  return fetch_and_add(1);
391  }
392 
393  value_type operator--(int) {
394  return fetch_and_add(__TBB_MINUS_ONE(D));
395  }
396 };
397 
398 } /* Internal */
400 
402 
404 template<typename T>
405 struct atomic: internal::atomic_impl<T> {
406 #if __TBB_ATOMIC_CTORS
407  atomic() = default;
408  constexpr atomic(T arg): internal::atomic_impl<T>(arg) {}
409 #endif
410  T operator=( T rhs ) {
411  // "this" required here in strict ISO C++ because store_with_release is a dependent name
412  return this->store_with_release(rhs);
413  }
414  atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}
415 };
416 
417 #if __TBB_ATOMIC_CTORS
418  #define __TBB_DECL_ATOMIC(T) \
419  template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
420  atomic() = default; \
421  constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,char>(arg) {} \
422  \
423  T operator=( T rhs ) {return store_with_release(rhs);} \
424  atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
425  };
426 #else
427  #define __TBB_DECL_ATOMIC(T) \
428  template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
429  T operator=( T rhs ) {return store_with_release(rhs);} \
430  atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
431  };
432 #endif
433 
434 #if __TBB_64BIT_ATOMICS
435 //TODO: consider adding non-default (and atomic) copy constructor for 32bit platform
436 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
437 __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)
438 #else
439 // test_atomic will verify that sizeof(long long)==8
440 #endif
441 __TBB_DECL_ATOMIC(long)
442 __TBB_DECL_ATOMIC(unsigned long)
443 
444 #if _MSC_VER && !_WIN64
445 #if __TBB_ATOMIC_CTORS
446 /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option.
447  It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T)
448  with an operator=(U) that explicitly converts the U to a T. Types T and U should be
449  type synonyms on the platform. Type U should be the wider variant of T from the
450  perspective of /Wp64. */
451 #define __TBB_DECL_ATOMIC_ALT(T,U) \
452  template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
453  atomic() = default ; \
454  constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,char>(arg) {} \
455  T operator=( U rhs ) {return store_with_release(T(rhs));} \
456  atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
457  };
458 #else
459 #define __TBB_DECL_ATOMIC_ALT(T,U) \
460  template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
461  T operator=( U rhs ) {return store_with_release(T(rhs));} \
462  atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
463  };
464 #endif
465 __TBB_DECL_ATOMIC_ALT(unsigned,size_t)
466 __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
467 #else
468 __TBB_DECL_ATOMIC(unsigned)
469 __TBB_DECL_ATOMIC(int)
470 #endif /* _MSC_VER && !_WIN64 */
471 
472 __TBB_DECL_ATOMIC(unsigned short)
473 __TBB_DECL_ATOMIC(short)
474 __TBB_DECL_ATOMIC(char)
475 __TBB_DECL_ATOMIC(signed char)
476 __TBB_DECL_ATOMIC(unsigned char)
477 
478 #if !_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED)
479 __TBB_DECL_ATOMIC(wchar_t)
480 #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
481 
483 template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
484 #if __TBB_ATOMIC_CTORS
485  atomic() = default ;
486  constexpr atomic(T* arg): internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T>(arg) {}
487 #endif
488  T* operator=( T* rhs ) {
489  // "this" required here in strict ISO C++ because store_with_release is a dependent name
490  return this->store_with_release(rhs);
491  }
492  atomic<T*>& operator=( const atomic<T*>& rhs ) {
493  this->store_with_release(rhs); return *this;
494  }
495  T* operator->() const {
496  return (*this);
497  }
498 };
499 
501 template<> struct atomic<void*>: internal::atomic_impl<void*> {
502 #if __TBB_ATOMIC_CTORS
503  atomic() = default ;
504  constexpr atomic(void* arg): internal::atomic_impl<void*>(arg) {}
505 #endif
506  void* operator=( void* rhs ) {
507  // "this" required here in strict ISO C++ because store_with_release is a dependent name
508  return this->store_with_release(rhs);
509  }
510  atomic<void*>& operator=( const atomic<void*>& rhs ) {
511  this->store_with_release(rhs); return *this;
512  }
513 };
514 
515 // Helpers to workaround ugly syntax of calling template member function of a
516 // template class with template argument dependent on template parameters.
517 
518 template <memory_semantics M, typename T>
519 T load ( const atomic<T>& a ) { return a.template load<M>(); }
520 
521 template <memory_semantics M, typename T>
522 void store ( atomic<T>& a, T value ) { a.template store<M>(value); }
523 
524 namespace interface6{
526 template<typename T>
527 atomic<T> make_atomic(T t) {
528  atomic<T> a;
529  store<relaxed>(a,t);
530  return a;
531 }
532 }
533 using interface6::make_atomic;
534 
535 namespace internal {
536 template<memory_semantics M, typename T >
537 void swap(atomic<T> & lhs, atomic<T> & rhs){
538  T tmp = load<M>(lhs);
539  store<M>(lhs,load<M>(rhs));
540  store<M>(rhs,tmp);
541 }
542 
543 // only to aid in the gradual conversion of ordinary variables to proper atomics
544 template<typename T>
545 inline atomic<T>& as_atomic( T& t ) {
546  return (atomic<T>&)t;
547 }
548 } // namespace tbb::internal
549 
550 } // namespace tbb
551 
552 #if _MSC_VER && !__INTEL_COMPILER
553  #pragma warning (pop)
554 #endif // warnings 4244, 4267 are back
555 
556 #endif /* __TBB_atomic_H */
Acquire.
Definition: atomic.h:47
Specialization for atomic<T*> with arithmetic and operator->.
Definition: atomic.h:483
No ordering.
Definition: atomic.h:51
memory_semantics
Specifies memory semantics.
Definition: atomic.h:43
Sequential consistency.
Definition: atomic.h:45
Primary template for atomic.
Definition: atomic.h:405
Definition: _flow_graph_async_msg_impl.h:32
Release.
Definition: atomic.h:49
The namespace tbb contains all components of the library.
Definition: parallel_for.h:44
Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
Definition: atomic.h:501