BRE12
enumerable_thread_specific.h
1 /*
2  Copyright 2005-2016 Intel Corporation. All Rights Reserved.
3 
4  This file is part of Threading Building Blocks. Threading Building Blocks is free software;
5  you can redistribute it and/or modify it under the terms of the GNU General Public License
6  version 2 as published by the Free Software Foundation. Threading Building Blocks is
7  distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8  implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9  See the GNU General Public License for more details. You should have received a copy of
10  the GNU General Public License along with Threading Building Blocks; if not, write to the
11  Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
12 
13  As a special exception, you may use this file as part of a free software library without
14  restriction. Specifically, if other files instantiate templates or use macros or inline
15  functions from this file, or you compile this file and link it with other files to produce
16  an executable, this file does not by itself cause the resulting executable to be covered
17  by the GNU General Public License. This exception does not however invalidate any other
18  reasons why the executable file might be covered by the GNU General Public License.
19 */
20 
21 #ifndef __TBB_enumerable_thread_specific_H
22 #define __TBB_enumerable_thread_specific_H
23 
24 #include "atomic.h"
25 #include "concurrent_vector.h"
26 #include "tbb_thread.h"
27 #include "tbb_allocator.h"
28 #include "cache_aligned_allocator.h"
29 #include "aligned_space.h"
30 #include "internal/_template_helpers.h"
31 #include "internal/_tbb_hash_compare_impl.h"
32 #include "tbb_profiling.h"
33 #include <string.h> // for memcpy
34 
35 #if _WIN32||_WIN64
36 #include "machine/windows_api.h"
37 #else
38 #include <pthread.h>
39 #endif
40 
41 #define __TBB_ETS_USE_CPP11 \
42  (__TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \
43  && __TBB_CPP11_DECLTYPE_PRESENT && __TBB_CPP11_LAMBDAS_PRESENT)
44 
45 namespace tbb {
46 
48 enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
49 
50 namespace interface6 {
51 
52  // Forward declaration to use in internal classes
53  template <typename T, typename Allocator, ets_key_usage_type ETS_key_type>
55 
57  namespace internal {
58 
59  using namespace tbb::internal;
60 
61  template<ets_key_usage_type ETS_key_type>
62  class ets_base: tbb::internal::no_copy {
63  protected:
64  typedef tbb_thread::id key_type;
65 #if __TBB_PROTECTED_NESTED_CLASS_BROKEN
66  public:
67 #endif
68  struct slot;
69 
70  struct array {
71  array* next;
72  size_t lg_size;
73  slot& at( size_t k ) {
74  return ((slot*)(void*)(this+1))[k];
75  }
76  size_t size() const {return size_t(1)<<lg_size;}
77  size_t mask() const {return size()-1;}
78  size_t start( size_t h ) const {
79  return h>>(8*sizeof(size_t)-lg_size);
80  }
81  };
82  struct slot {
83  key_type key;
84  void* ptr;
85  bool empty() const {return key == key_type();}
86  bool match( key_type k ) const {return key == k;}
87  bool claim( key_type k ) {
88  // TODO: maybe claim ptr, because key_type is not guaranteed to fit into word size
89  return atomic_compare_and_swap(key, k, key_type()) == key_type();
90  }
91  };
92 #if __TBB_PROTECTED_NESTED_CLASS_BROKEN
93  protected:
94 #endif
95 
97 
99  atomic<array*> my_root;
100  atomic<size_t> my_count;
101  virtual void* create_local() = 0;
102  virtual void* create_array(size_t _size) = 0; // _size in bytes
103  virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes
104  array* allocate( size_t lg_size ) {
105  size_t n = size_t(1)<<lg_size;
106  array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
107  a->lg_size = lg_size;
108  std::memset( a+1, 0, n*sizeof(slot) );
109  return a;
110  }
111  void free(array* a) {
112  size_t n = size_t(1)<<(a->lg_size);
113  free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
114  }
115 
116  ets_base() {my_root=NULL; my_count=0;}
117  virtual ~ets_base(); // g++ complains if this is not virtual
118  void* table_lookup( bool& exists );
119  void table_clear();
120  // The following functions are not used in concurrent context,
121  // so we don't need synchronization and ITT annotations there.
122  void table_elementwise_copy( const ets_base& other,
123  void*(*add_element)(ets_base&, void*) ) {
124  __TBB_ASSERT(!my_root,NULL);
125  __TBB_ASSERT(!my_count,NULL);
126  if( !other.my_root ) return;
127  array* root = my_root = allocate(other.my_root->lg_size);
128  root->next = NULL;
129  my_count = other.my_count;
130  size_t mask = root->mask();
131  for( array* r=other.my_root; r; r=r->next ) {
132  for( size_t i=0; i<r->size(); ++i ) {
133  slot& s1 = r->at(i);
134  if( !s1.empty() ) {
135  for( size_t j = root->start(tbb::tbb_hash<key_type>()(s1.key)); ; j=(j+1)&mask ) {
136  slot& s2 = root->at(j);
137  if( s2.empty() ) {
138  s2.ptr = add_element(*this, s1.ptr);
139  s2.key = s1.key;
140  break;
141  }
142  else if( s2.match(s1.key) )
143  break;
144  }
145  }
146  }
147  }
148  }
149  void table_swap( ets_base& other ) {
150  __TBB_ASSERT(this!=&other, "Don't swap an instance with itself");
151  tbb::internal::swap<relaxed>(my_root, other.my_root);
152  tbb::internal::swap<relaxed>(my_count, other.my_count);
153  }
154  };
155 
156  template<ets_key_usage_type ETS_key_type>
157  ets_base<ETS_key_type>::~ets_base() {
158  __TBB_ASSERT(!my_root, NULL);
159  }
160 
161  template<ets_key_usage_type ETS_key_type>
162  void ets_base<ETS_key_type>::table_clear() {
163  while( array* r = my_root ) {
164  my_root = r->next;
165  free(r);
166  }
167  my_count = 0;
168  }
169 
170  template<ets_key_usage_type ETS_key_type>
171  void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
172  const key_type k = tbb::this_tbb_thread::get_id();
173 
174  __TBB_ASSERT(k != key_type(),NULL);
175  void* found;
176  size_t h = tbb::tbb_hash<key_type>()(k);
177  for( array* r=my_root; r; r=r->next ) {
178  call_itt_notify(acquired,r);
179  size_t mask=r->mask();
180  for(size_t i = r->start(h); ;i=(i+1)&mask) {
181  slot& s = r->at(i);
182  if( s.empty() ) break;
183  if( s.match(k) ) {
184  if( r==my_root ) {
185  // Success at top level
186  exists = true;
187  return s.ptr;
188  } else {
189  // Success at some other level. Need to insert at top level.
190  exists = true;
191  found = s.ptr;
192  goto insert;
193  }
194  }
195  }
196  }
197  // Key does not yet exist. The density of slots in the table does not exceed 0.5,
198  // for if this will occur a new table is allocated with double the current table
199  // size, which is swapped in as the new root table. So an empty slot is guaranteed.
200  exists = false;
201  found = create_local();
202  {
203  size_t c = ++my_count;
204  array* r = my_root;
205  call_itt_notify(acquired,r);
206  if( !r || c>r->size()/2 ) {
207  size_t s = r ? r->lg_size : 2;
208  while( c>size_t(1)<<(s-1) ) ++s;
209  array* a = allocate(s);
210  for(;;) {
211  a->next = r;
212  call_itt_notify(releasing,a);
213  array* new_r = my_root.compare_and_swap(a,r);
214  if( new_r==r ) break;
215  call_itt_notify(acquired, new_r);
216  if( new_r->lg_size>=s ) {
217  // Another thread inserted an equal or bigger array, so our array is superfluous.
218  free(a);
219  break;
220  }
221  r = new_r;
222  }
223  }
224  }
225  insert:
226  // Whether a slot has been found in an older table, or if it has been inserted at this level,
227  // it has already been accounted for in the total. Guaranteed to be room for it, and it is
228  // not present, so search for empty slot and use it.
229  array* ir = my_root;
230  call_itt_notify(acquired, ir);
231  size_t mask = ir->mask();
232  for(size_t i = ir->start(h);;i=(i+1)&mask) {
233  slot& s = ir->at(i);
234  if( s.empty() ) {
235  if( s.claim(k) ) {
236  s.ptr = found;
237  return found;
238  }
239  }
240  }
241  }
242 
244  template <>
245  class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key> {
246  typedef ets_base<ets_no_key> super;
247 #if _WIN32||_WIN64
248 #if __TBB_WIN8UI_SUPPORT
249  typedef DWORD tls_key_t;
250  void create_key() { my_key = FlsAlloc(NULL); }
251  void destroy_key() { FlsFree(my_key); }
252  void set_tls(void * value) { FlsSetValue(my_key, (LPVOID)value); }
253  void* get_tls() { return (void *)FlsGetValue(my_key); }
254 #else
255  typedef DWORD tls_key_t;
256  void create_key() { my_key = TlsAlloc(); }
257  void destroy_key() { TlsFree(my_key); }
258  void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
259  void* get_tls() { return (void *)TlsGetValue(my_key); }
260 #endif
261 #else
262  typedef pthread_key_t tls_key_t;
263  void create_key() { pthread_key_create(&my_key, NULL); }
264  void destroy_key() { pthread_key_delete(my_key); }
265  void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
266  void* get_tls() const { return pthread_getspecific(my_key); }
267 #endif
268  tls_key_t my_key;
269  virtual void* create_local() = 0;
270  virtual void* create_array(size_t _size) = 0; // _size in bytes
271  virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes
272  protected:
273  ets_base() {create_key();}
274  ~ets_base() {destroy_key();}
275  void* table_lookup( bool& exists ) {
276  void* found = get_tls();
277  if( found ) {
278  exists=true;
279  } else {
280  found = super::table_lookup(exists);
281  set_tls(found);
282  }
283  return found;
284  }
285  void table_clear() {
286  destroy_key();
287  create_key();
288  super::table_clear();
289  }
290  void table_swap( ets_base& other ) {
291  using std::swap;
292  __TBB_ASSERT(this!=&other, "Don't swap an instance with itself");
293  swap(my_key, other.my_key);
294  super::table_swap(other);
295  }
296  };
297 
299  template< typename Container, typename Value >
300  class enumerable_thread_specific_iterator
301 #if defined(_WIN64) && defined(_MSC_VER)
302  // Ensure that Microsoft's internal template function _Val_type works correctly.
303  : public std::iterator<std::random_access_iterator_tag,Value>
304 #endif /* defined(_WIN64) && defined(_MSC_VER) */
305  {
307 
308  Container *my_container;
309  typename Container::size_type my_index;
310  mutable Value *my_value;
311 
312  template<typename C, typename T>
313  friend enumerable_thread_specific_iterator<C,T>
314  operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator<C,T>& v );
315 
316  template<typename C, typename T, typename U>
317  friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i,
318  const enumerable_thread_specific_iterator<C,U>& j );
319 
320  template<typename C, typename T, typename U>
321  friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i,
322  const enumerable_thread_specific_iterator<C,U>& j );
323 
324  template<typename C, typename T, typename U>
325  friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i,
326  const enumerable_thread_specific_iterator<C,U>& j );
327 
328  template<typename C, typename U>
329  friend class enumerable_thread_specific_iterator;
330 
331  public:
332 
333  enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) :
334  my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
335 
337  enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
338 
339  template<typename U>
340  enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
341  my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
342 
343  enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
344  return enumerable_thread_specific_iterator(*my_container, my_index + offset);
345  }
346 
347  enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
348  my_index += offset;
349  my_value = NULL;
350  return *this;
351  }
352 
353  enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
354  return enumerable_thread_specific_iterator( *my_container, my_index-offset );
355  }
356 
357  enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
358  my_index -= offset;
359  my_value = NULL;
360  return *this;
361  }
362 
363  Value& operator*() const {
364  Value* value = my_value;
365  if( !value ) {
366  value = my_value = (*my_container)[my_index].value();
367  }
368  __TBB_ASSERT( value==(*my_container)[my_index].value(), "corrupt cache" );
369  return *value;
370  }
371 
372  Value& operator[]( ptrdiff_t k ) const {
373  return (*my_container)[my_index + k].value;
374  }
375 
376  Value* operator->() const {return &operator*();}
377 
378  enumerable_thread_specific_iterator& operator++() {
379  ++my_index;
380  my_value = NULL;
381  return *this;
382  }
383 
384  enumerable_thread_specific_iterator& operator--() {
385  --my_index;
386  my_value = NULL;
387  return *this;
388  }
389 
391  enumerable_thread_specific_iterator operator++(int) {
392  enumerable_thread_specific_iterator result = *this;
393  ++my_index;
394  my_value = NULL;
395  return result;
396  }
397 
399  enumerable_thread_specific_iterator operator--(int) {
400  enumerable_thread_specific_iterator result = *this;
401  --my_index;
402  my_value = NULL;
403  return result;
404  }
405 
406  // STL support
407  typedef ptrdiff_t difference_type;
408  typedef Value value_type;
409  typedef Value* pointer;
410  typedef Value& reference;
411  typedef std::random_access_iterator_tag iterator_category;
412  };
413 
414  template<typename Container, typename T>
415  enumerable_thread_specific_iterator<Container,T>
416  operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator<Container,T>& v ) {
417  return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
418  }
419 
420  template<typename Container, typename T, typename U>
421  bool operator==( const enumerable_thread_specific_iterator<Container,T>& i,
422  const enumerable_thread_specific_iterator<Container,U>& j ) {
423  return i.my_index==j.my_index && i.my_container == j.my_container;
424  }
425 
426  template<typename Container, typename T, typename U>
427  bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i,
428  const enumerable_thread_specific_iterator<Container,U>& j ) {
429  return !(i==j);
430  }
431 
432  template<typename Container, typename T, typename U>
433  bool operator<( const enumerable_thread_specific_iterator<Container,T>& i,
434  const enumerable_thread_specific_iterator<Container,U>& j ) {
435  return i.my_index<j.my_index;
436  }
437 
438  template<typename Container, typename T, typename U>
439  bool operator>( const enumerable_thread_specific_iterator<Container,T>& i,
440  const enumerable_thread_specific_iterator<Container,U>& j ) {
441  return j<i;
442  }
443 
444  template<typename Container, typename T, typename U>
445  bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i,
446  const enumerable_thread_specific_iterator<Container,U>& j ) {
447  return !(i<j);
448  }
449 
450  template<typename Container, typename T, typename U>
451  bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i,
452  const enumerable_thread_specific_iterator<Container,U>& j ) {
453  return !(j<i);
454  }
455 
456  template<typename Container, typename T, typename U>
457  ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i,
458  const enumerable_thread_specific_iterator<Container,U>& j ) {
459  return i.my_index-j.my_index;
460  }
461 
462  template<typename SegmentedContainer, typename Value >
463  class segmented_iterator
464 #if defined(_WIN64) && defined(_MSC_VER)
465  : public std::iterator<std::input_iterator_tag, Value>
466 #endif
467  {
468  template<typename C, typename T, typename U>
469  friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
470 
471  template<typename C, typename T, typename U>
472  friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
473 
474  template<typename C, typename U>
475  friend class segmented_iterator;
476 
477  public:
478 
479  segmented_iterator() {my_segcont = NULL;}
480 
481  segmented_iterator( const SegmentedContainer& _segmented_container ) :
482  my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
483  outer_iter(my_segcont->end()) { }
484 
485  ~segmented_iterator() {}
486 
487  typedef typename SegmentedContainer::iterator outer_iterator;
488  typedef typename SegmentedContainer::value_type InnerContainer;
489  typedef typename InnerContainer::iterator inner_iterator;
490 
491  // STL support
492  typedef ptrdiff_t difference_type;
493  typedef Value value_type;
494  typedef typename SegmentedContainer::size_type size_type;
495  typedef Value* pointer;
496  typedef Value& reference;
497  typedef std::input_iterator_tag iterator_category;
498 
499  // Copy Constructor
500  template<typename U>
501  segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
502  my_segcont(other.my_segcont),
503  outer_iter(other.outer_iter),
504  // can we assign a default-constructed iterator to inner if we're at the end?
505  inner_iter(other.inner_iter)
506  {}
507 
508  // assignment
509  template<typename U>
510  segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
511  if(this != &other) {
512  my_segcont = other.my_segcont;
513  outer_iter = other.outer_iter;
514  if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
515  }
516  return *this;
517  }
518 
519  // allow assignment of outer iterator to segmented iterator. Once it is
520  // assigned, move forward until a non-empty inner container is found or
521  // the end of the outer container is reached.
522  segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
523  __TBB_ASSERT(my_segcont != NULL, NULL);
524  // check that this iterator points to something inside the segmented container
525  for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
526  if( !outer_iter->empty() ) {
527  inner_iter = outer_iter->begin();
528  break;
529  }
530  }
531  return *this;
532  }
533 
534  // pre-increment
535  segmented_iterator& operator++() {
536  advance_me();
537  return *this;
538  }
539 
540  // post-increment
541  segmented_iterator operator++(int) {
542  segmented_iterator tmp = *this;
543  operator++();
544  return tmp;
545  }
546 
547  bool operator==(const outer_iterator& other_outer) const {
548  __TBB_ASSERT(my_segcont != NULL, NULL);
549  return (outer_iter == other_outer &&
550  (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
551  }
552 
553  bool operator!=(const outer_iterator& other_outer) const {
554  return !operator==(other_outer);
555 
556  }
557 
558  // (i)* RHS
559  reference operator*() const {
560  __TBB_ASSERT(my_segcont != NULL, NULL);
561  __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
562  __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
563  return *inner_iter;
564  }
565 
566  // i->
567  pointer operator->() const { return &operator*();}
568 
569  private:
570  SegmentedContainer* my_segcont;
571  outer_iterator outer_iter;
572  inner_iterator inner_iter;
573 
574  void advance_me() {
575  __TBB_ASSERT(my_segcont != NULL, NULL);
576  __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
577  __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
578  ++inner_iter;
579  while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
580  inner_iter = outer_iter->begin();
581  }
582  }
583  }; // segmented_iterator
584 
585  template<typename SegmentedContainer, typename T, typename U>
586  bool operator==( const segmented_iterator<SegmentedContainer,T>& i,
587  const segmented_iterator<SegmentedContainer,U>& j ) {
588  if(i.my_segcont != j.my_segcont) return false;
589  if(i.my_segcont == NULL) return true;
590  if(i.outer_iter != j.outer_iter) return false;
591  if(i.outer_iter == i.my_segcont->end()) return true;
592  return i.inner_iter == j.inner_iter;
593  }
594 
595  // !=
596  template<typename SegmentedContainer, typename T, typename U>
597  bool operator!=( const segmented_iterator<SegmentedContainer,T>& i,
598  const segmented_iterator<SegmentedContainer,U>& j ) {
599  return !(i==j);
600  }
601 
602  template<typename T>
603  struct construct_by_default: tbb::internal::no_assign {
604  void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization.
605  construct_by_default( int ) {}
606  };
607 
608  template<typename T>
609  struct construct_by_exemplar: tbb::internal::no_assign {
610  const T exemplar;
611  void construct(void*where) {new(where) T(exemplar);}
612  construct_by_exemplar( const T& t ) : exemplar(t) {}
613 #if __TBB_ETS_USE_CPP11
614  construct_by_exemplar( T&& t ) : exemplar(std::move(t)) {}
615 #endif
616  };
617 
618  template<typename T, typename Finit>
619  struct construct_by_finit: tbb::internal::no_assign {
620  Finit f;
621  void construct(void* where) {new(where) T(f());}
622  construct_by_finit( const Finit& f_ ) : f(f_) {}
623 #if __TBB_ETS_USE_CPP11
624  construct_by_finit( Finit&& f_ ) : f(std::move(f_)) {}
625 #endif
626  };
627 
628 #if __TBB_ETS_USE_CPP11
629  template<typename T, typename... P>
630  struct construct_by_args: tbb::internal::no_assign {
631  internal::stored_pack<P...> pack;
632  void construct(void* where) {
633  internal::call( [where](const typename strip<P>::type&... args ){
634  new(where) T(args...);
635  }, pack );
636  }
637  construct_by_args( P&& ... args ) : pack(std::forward<P>(args)...) {}
638  };
639 #endif
640 
641  // storage for initialization function pointer
642  // TODO: consider removing the template parameter T here and in callback_leaf
643  template<typename T>
644  class callback_base {
645  public:
646  // Clone *this
647  virtual callback_base* clone() const = 0;
648  // Destruct and free *this
649  virtual void destroy() = 0;
650  // Need virtual destructor to satisfy GCC compiler warning
651  virtual ~callback_base() { }
652  // Construct T at where
653  virtual void construct(void* where) = 0;
654  };
655 
656  template <typename T, typename Constructor>
657  class callback_leaf: public callback_base<T>, Constructor {
658 #if __TBB_ETS_USE_CPP11
659  template<typename... P> callback_leaf( P&& ... params ) : Constructor(std::forward<P>(params)...) {}
660 #else
661  template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
662 #endif
663  // TODO: make the construction/destruction consistent (use allocator.construct/destroy)
664  typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
665 
666  /*override*/ callback_base<T>* clone() const {
667  return make(*this);
668  }
669 
670  /*override*/ void destroy() {
671  my_allocator_type().destroy(this);
672  my_allocator_type().deallocate(this,1);
673  }
674 
675  /*override*/ void construct(void* where) {
676  Constructor::construct(where);
677  }
678  public:
679 #if __TBB_ETS_USE_CPP11
680  template<typename... P>
681  static callback_base<T>* make( P&& ... params ) {
682  void* where = my_allocator_type().allocate(1);
683  return new(where) callback_leaf( std::forward<P>(params)... );
684  }
685 #else
686  template<typename X>
687  static callback_base<T>* make( const X& x ) {
688  void* where = my_allocator_type().allocate(1);
689  return new(where) callback_leaf(x);
690  }
691 #endif
692  };
693 
695 
703  // TODO: make a constructor for ets_element that takes a callback_base. make is_built private
704  template<typename U>
705  struct ets_element {
706  tbb::aligned_space<U> my_space;
707  bool is_built;
708  ets_element() { is_built = false; } // not currently-built
709  U* value() { return my_space.begin(); }
710  U* value_committed() { is_built = true; return my_space.begin(); }
711  ~ets_element() {
712  if(is_built) {
713  my_space.begin()->~U();
714  is_built = false;
715  }
716  }
717  };
718 
719  // A predicate that can be used for a compile-time compatibility check of ETS instances
720  // Ideally, it should have been declared inside the ETS class, but unfortunately
721  // in that case VS2013 does not enable the variadic constructor.
722  template<typename T, typename ETS> struct is_compatible_ets { static const bool value = false; };
723  template<typename T, typename U, typename A, ets_key_usage_type C>
724  struct is_compatible_ets< T, enumerable_thread_specific<U,A,C> > { static const bool value = internal::is_same_type<T,U>::value; };
725 
726 #if __TBB_ETS_USE_CPP11
727  // A predicate that checks whether, for a variable 'foo' of type T, foo() is a valid expression
728  template <typename T>
729  class is_callable_no_args {
730  private:
731  typedef char yes[1];
732  typedef char no [2];
733 
734  template<typename U> static yes& decide( decltype(declval<U>()())* );
735  template<typename U> static no& decide(...);
736  public:
737  static const bool value = (sizeof(decide<T>(NULL)) == sizeof(yes));
738  };
739 #endif
740 
741  } // namespace internal
743 
745 
764  template <typename T,
765  typename Allocator=cache_aligned_allocator<T>,
766  ets_key_usage_type ETS_key_type=ets_no_key >
767  class enumerable_thread_specific: internal::ets_base<ETS_key_type> {
768 
769  template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
770 
771  typedef internal::padded< internal::ets_element<T> > padded_element;
772 
774  template<typename I>
775  class generic_range_type: public blocked_range<I> {
776  public:
777  typedef T value_type;
778  typedef T& reference;
779  typedef const T& const_reference;
780  typedef I iterator;
781  typedef ptrdiff_t difference_type;
782  generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
783  template<typename U>
784  generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
785  generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
786  };
787 
788  typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
790 
791  internal::callback_base<T> *my_construct_callback;
792 
793  internal_collection_type my_locals;
794 
795  // TODO: consider unifying the callback mechanism for all create_local* methods below
796  // (likely non-compatible and requires interface version increase)
797  /*override*/ void* create_local() {
798  padded_element& lref = *my_locals.grow_by(1);
799  my_construct_callback->construct(lref.value());
800  return lref.value_committed();
801  }
802 
803  static void* create_local_by_copy( internal::ets_base<ets_no_key>& base, void* p ) {
804  enumerable_thread_specific& ets = static_cast<enumerable_thread_specific&>(base);
805  padded_element& lref = *ets.my_locals.grow_by(1);
806  new(lref.value()) T(*static_cast<T*>(p));
807  return lref.value_committed();
808  }
809 
810 #if __TBB_ETS_USE_CPP11
811  static void* create_local_by_move( internal::ets_base<ets_no_key>& base, void* p ) {
812  enumerable_thread_specific& ets = static_cast<enumerable_thread_specific&>(base);
813  padded_element& lref = *ets.my_locals.grow_by(1);
814  new(lref.value()) T(std::move(*static_cast<T*>(p)));
815  return lref.value_committed();
816  }
817 #endif
818 
819  typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
820 
821  // _size is in bytes
822  /*override*/ void* create_array(size_t _size) {
823  size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
824  return array_allocator_type().allocate(nelements);
825  }
826 
827  /*override*/ void free_array( void* _ptr, size_t _size) {
828  size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
829  array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
830  }
831 
832  public:
833 
835  typedef Allocator allocator_type;
836  typedef T value_type;
837  typedef T& reference;
838  typedef const T& const_reference;
839  typedef T* pointer;
840  typedef const T* const_pointer;
841  typedef typename internal_collection_type::size_type size_type;
842  typedef typename internal_collection_type::difference_type difference_type;
843 
844  // Iterator types
845  typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
846  typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
847 
848  // Parallel range types
849  typedef generic_range_type< iterator > range_type;
850  typedef generic_range_type< const_iterator > const_range_type;
851 
853  enumerable_thread_specific() : my_construct_callback(
854  internal::callback_leaf<T,internal::construct_by_default<T> >::make(/*dummy argument*/0)
855  ){}
856 
858  template <typename Finit
859 #if __TBB_ETS_USE_CPP11
861 #endif
862  >
863  enumerable_thread_specific( Finit finit ) : my_construct_callback(
864  internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( tbb::internal::move(finit) )
865  ){}
866 
868  enumerable_thread_specific( const T& exemplar ) : my_construct_callback(
869  internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar )
870  ){}
871 
872 #if __TBB_ETS_USE_CPP11
873  enumerable_thread_specific( T&& exemplar ) : my_construct_callback(
874  internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( std::move(exemplar) )
875  ){}
876 
878  template <typename P1, typename... P,
880  && !internal::is_compatible_ets<T, typename internal::strip<P1>::type>::value
882  >::type>
883  enumerable_thread_specific( P1&& arg1, P&& ... args ) : my_construct_callback(
884  internal::callback_leaf<T,internal::construct_by_args<T,P1,P...> >::make( std::forward<P1>(arg1), std::forward<P>(args)... )
885  ){}
886 #endif
887 
890  if(my_construct_callback) my_construct_callback->destroy();
891  // Deallocate the hash table before overridden free_array() becomes inaccessible
892  this->internal::ets_base<ets_no_key>::table_clear();
893  }
894 
896  reference local() {
897  bool exists;
898  return local(exists);
899  }
900 
902  reference local(bool& exists) {
903  void* ptr = this->table_lookup(exists);
904  return *(T*)ptr;
905  }
906 
908  size_type size() const { return my_locals.size(); }
909 
911  bool empty() const { return my_locals.empty(); }
912 
914  iterator begin() { return iterator( my_locals, 0 ); }
916  iterator end() { return iterator(my_locals, my_locals.size() ); }
917 
919  const_iterator begin() const { return const_iterator(my_locals, 0); }
920 
922  const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
923 
925  range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); }
926 
928  const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
929 
931  void clear() {
932  my_locals.clear();
933  this->table_clear();
934  // callback is not destroyed
935  }
936 
937  private:
938 
939  template<typename A2, ets_key_usage_type C2>
940  void internal_copy(const enumerable_thread_specific<T, A2, C2>& other) {
941 #if __TBB_ETS_USE_CPP11 && TBB_USE_ASSERT
942  // this tests is_compatible_ets
943  __TBB_STATIC_ASSERT( (internal::is_compatible_ets<T, typename internal::strip<decltype(other)>::type>::value), "is_compatible_ets fails" );
944 #endif
945  // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.
946  my_construct_callback = other.my_construct_callback->clone();
947  __TBB_ASSERT(my_locals.size()==0,NULL);
948  my_locals.reserve(other.size());
949  this->table_elementwise_copy( other, create_local_by_copy );
950  }
951 
952  void internal_swap(enumerable_thread_specific& other) {
953  using std::swap;
954  __TBB_ASSERT( this!=&other, NULL );
955  swap(my_construct_callback, other.my_construct_callback);
956  // concurrent_vector::swap() preserves storage space,
957  // so addresses to the vector kept in ETS hash table remain valid.
958  swap(my_locals, other.my_locals);
959  this->internal::ets_base<ETS_key_type>::table_swap(other);
960  }
961 
962 #if __TBB_ETS_USE_CPP11
963  template<typename A2, ets_key_usage_type C2>
964  void internal_move(enumerable_thread_specific<T, A2, C2>&& other) {
965 #if TBB_USE_ASSERT
966  // this tests is_compatible_ets
967  __TBB_STATIC_ASSERT( (internal::is_compatible_ets<T, typename internal::strip<decltype(other)>::type>::value), "is_compatible_ets fails" );
968 #endif
969  my_construct_callback = other.my_construct_callback;
970  other.my_construct_callback = NULL;
971  __TBB_ASSERT(my_locals.size()==0,NULL);
972  my_locals.reserve(other.size());
973  this->table_elementwise_copy( other, create_local_by_move );
974  }
975 #endif
976 
977  public:
978 
979  enumerable_thread_specific( const enumerable_thread_specific& other )
980  : internal::ets_base<ETS_key_type>() /* prevents GCC warnings with -Wextra */
981  {
982  internal_copy(other);
983  }
984 
985  template<typename Alloc, ets_key_usage_type Cachetype>
986  enumerable_thread_specific( const enumerable_thread_specific<T, Alloc, Cachetype>& other )
987  {
988  internal_copy(other);
989  }
990 
991 #if __TBB_ETS_USE_CPP11
992  enumerable_thread_specific( enumerable_thread_specific&& other ) : my_construct_callback()
993  {
994  internal_swap(other);
995  }
996 
997  template<typename Alloc, ets_key_usage_type Cachetype>
998  enumerable_thread_specific( enumerable_thread_specific<T, Alloc, Cachetype>&& other ) : my_construct_callback()
999  {
1000  internal_move(std::move(other));
1001  }
1002 #endif
1003 
1004  enumerable_thread_specific& operator=( const enumerable_thread_specific& other )
1005  {
1006  if( this != &other ) {
1007  this->clear();
1008  my_construct_callback->destroy();
1009  internal_copy( other );
1010  }
1011  return *this;
1012  }
1013 
1014  template<typename Alloc, ets_key_usage_type Cachetype>
1015  enumerable_thread_specific& operator=( const enumerable_thread_specific<T, Alloc, Cachetype>& other )
1016  {
1017  __TBB_ASSERT( static_cast<void*>(this)!=static_cast<const void*>(&other), NULL ); // Objects of different types
1018  this->clear();
1019  my_construct_callback->destroy();
1020  internal_copy(other);
1021  return *this;
1022  }
1023 
1024 #if __TBB_ETS_USE_CPP11
1025  enumerable_thread_specific& operator=( enumerable_thread_specific&& other )
1026  {
1027  if( this != &other )
1028  internal_swap(other);
1029  return *this;
1030  }
1031 
1032  template<typename Alloc, ets_key_usage_type Cachetype>
1033  enumerable_thread_specific& operator=( enumerable_thread_specific<T, Alloc, Cachetype>&& other )
1034  {
1035  __TBB_ASSERT( static_cast<void*>(this)!=static_cast<const void*>(&other), NULL ); // Objects of different types
1036  this->clear();
1037  my_construct_callback->destroy();
1038  internal_move(std::move(other));
1039  return *this;
1040  }
1041 #endif
1042 
1043  // combine_func_t has signature T(T,T) or T(const T&, const T&)
1044  template <typename combine_func_t>
1045  T combine(combine_func_t f_combine) {
1046  if(begin() == end()) {
1047  internal::ets_element<T> location;
1048  my_construct_callback->construct(location.value());
1049  return *location.value_committed();
1050  }
1051  const_iterator ci = begin();
1052  T my_result = *ci;
1053  while(++ci != end())
1054  my_result = f_combine( my_result, *ci );
1055  return my_result;
1056  }
1057 
1058  // combine_func_t takes T by value or by [const] reference, and returns nothing
1059  template <typename combine_func_t>
1060  void combine_each(combine_func_t f_combine) {
1061  for(iterator ci = begin(); ci != end(); ++ci) {
1062  f_combine( *ci );
1063  }
1064  }
1065 
1066  }; // enumerable_thread_specific
1067 
1068  template< typename Container >
1069  class flattened2d {
1070 
1071  // This intermediate typedef is to address issues with VC7.1 compilers
1072  typedef typename Container::value_type conval_type;
1073 
1074  public:
1075 
1077  typedef typename conval_type::size_type size_type;
1078  typedef typename conval_type::difference_type difference_type;
1079  typedef typename conval_type::allocator_type allocator_type;
1080  typedef typename conval_type::value_type value_type;
1081  typedef typename conval_type::reference reference;
1082  typedef typename conval_type::const_reference const_reference;
1083  typedef typename conval_type::pointer pointer;
1084  typedef typename conval_type::const_pointer const_pointer;
1085 
1086  typedef typename internal::segmented_iterator<Container, value_type> iterator;
1087  typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
1088 
1089  flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) :
1090  my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
1091 
1092  flattened2d( const Container &c ) :
1093  my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
1094 
1095  iterator begin() { return iterator(*my_container) = my_begin; }
1096  iterator end() { return iterator(*my_container) = my_end; }
1097  const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
1098  const_iterator end() const { return const_iterator(*my_container) = my_end; }
1099 
1100  size_type size() const {
1101  size_type tot_size = 0;
1102  for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
1103  tot_size += i->size();
1104  }
1105  return tot_size;
1106  }
1107 
1108  private:
1109 
1110  Container *my_container;
1111  typename Container::const_iterator my_begin;
1112  typename Container::const_iterator my_end;
1113 
1114  };
1115 
1116  template <typename Container>
1117  flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
1118  return flattened2d<Container>(c, b, e);
1119  }
1120 
1121  template <typename Container>
1122  flattened2d<Container> flatten2d(const Container &c) {
1123  return flattened2d<Container>(c);
1124  }
1125 
1126 } // interface6
1127 
1128 namespace internal {
1129 using interface6::internal::segmented_iterator;
1130 }
1131 
1134 using interface6::flatten2d;
1135 
1136 } // namespace tbb
1137 
1138 #endif
Enables one or the other code branches.
Definition: _template_helpers.h:29
Definition: _tbb_hash_compare_impl.h:89
Detects whether two given types are the same.
Definition: _template_helpers.h:56
conval_type::size_type size_type
Basic types.
Definition: enumerable_thread_specific.h:1077
Definition: atomic.h:535
void clear()
Destroys local copies.
Definition: enumerable_thread_specific.h:931
const_iterator end() const
end const iterator
Definition: enumerable_thread_specific.h:922
const_iterator begin() const
begin const iterator
Definition: enumerable_thread_specific.h:919
const_range_type range(size_t grainsize=1) const
Get const range for parallel algorithms.
Definition: enumerable_thread_specific.h:928
Definition: _tbb_windef.h:37
bool empty() const
true if there have been no local copies created
Definition: enumerable_thread_specific.h:911
reference local()
returns reference to local, discarding exists
Definition: enumerable_thread_specific.h:896
A range over which to iterate.
Definition: blocked_range.h:40
Block of space aligned sufficiently to construct an array T with N elements.
Definition: aligned_space.h:33
reference local(bool &exists)
Returns reference to calling thread&#39;s local copy, creating one if necessary.
Definition: enumerable_thread_specific.h:902
The enumerable_thread_specific container.
Definition: enumerable_thread_specific.h:54
ets_key_usage_type
enum for selecting between single key and key-per-instance versions
Definition: enumerable_thread_specific.h:48
range_type range(size_t grainsize=1)
Get range for parallel algorithms.
Definition: enumerable_thread_specific.h:925
*/
Definition: material.h:665
Allocator allocator_type
Basic types.
Definition: enumerable_thread_specific.h:835
enumerable_thread_specific()
Default constructor. Each local instance of T is default constructed.
Definition: enumerable_thread_specific.h:853
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
Definition: cache_aligned_allocator.h:60
enumerable_thread_specific(const T &exemplar)
Constructor with exemplar. Each local instance of T is copy-constructed from the exemplar.
Definition: enumerable_thread_specific.h:868
T * begin()
Pointer to beginning of array.
Definition: aligned_space.h:39
Definition: enumerable_thread_specific.h:1069
Primary template for atomic.
Definition: atomic.h:405
Definition: _flow_graph_async_msg_impl.h:32
iterator grow_by(size_type delta)
Grow by "delta" elements.
Definition: concurrent_vector.h:789
Definition: tbb_thread.h:233
iterator end()
end iterator
Definition: enumerable_thread_specific.h:916
The namespace tbb contains all components of the library.
Definition: parallel_for.h:44
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
Definition: tbb_allocator.h:73
size_type size() const
Get the number of local copies.
Definition: enumerable_thread_specific.h:908
enumerable_thread_specific(Finit finit)
Constructor with initializer functor. Each local instance of T is constructed by T(finit()).
Definition: enumerable_thread_specific.h:863
Strips its template type argument from cv- and ref-qualifiers.
Definition: _template_helpers.h:33
~enumerable_thread_specific()
Destructor.
Definition: enumerable_thread_specific.h:889
iterator begin()
begin iterator
Definition: enumerable_thread_specific.h:914