BRE12
concurrent_vector.h
1 /*
2  Copyright 2005-2016 Intel Corporation. All Rights Reserved.
3 
4  This file is part of Threading Building Blocks. Threading Building Blocks is free software;
5  you can redistribute it and/or modify it under the terms of the GNU General Public License
6  version 2 as published by the Free Software Foundation. Threading Building Blocks is
7  distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8  implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9  See the GNU General Public License for more details. You should have received a copy of
10  the GNU General Public License along with Threading Building Blocks; if not, write to the
11  Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
12 
13  As a special exception, you may use this file as part of a free software library without
14  restriction. Specifically, if other files instantiate templates or use macros or inline
15  functions from this file, or you compile this file and link it with other files to produce
16  an executable, this file does not by itself cause the resulting executable to be covered
17  by the GNU General Public License. This exception does not however invalidate any other
18  reasons why the executable file might be covered by the GNU General Public License.
19 */
20 
21 #ifndef __TBB_concurrent_vector_H
22 #define __TBB_concurrent_vector_H
23 
24 #include "tbb_stddef.h"
25 #include "tbb_exception.h"
26 #include "atomic.h"
27 #include "cache_aligned_allocator.h"
28 #include "blocked_range.h"
29 #include "tbb_machine.h"
30 #include "tbb_profiling.h"
31 #include <new>
32 #include <cstring> // for memset()
33 
34 #if !TBB_USE_EXCEPTIONS && _MSC_VER
35  // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
36  #pragma warning (push)
37  #pragma warning (disable: 4530)
38 #endif
39 
40 #include <algorithm>
41 #include <iterator>
42 
43 #if !TBB_USE_EXCEPTIONS && _MSC_VER
44  #pragma warning (pop)
45 #endif
46 
47 #if _MSC_VER==1500 && !__INTEL_COMPILER
48  // VS2008/VC9 seems to have an issue; limits pull in math.h
49  #pragma warning( push )
50  #pragma warning( disable: 4985 )
51 #endif
52 #include <limits> /* std::numeric_limits */
53 #if _MSC_VER==1500 && !__INTEL_COMPILER
54  #pragma warning( pop )
55 #endif
56 
57 #if __TBB_INITIALIZER_LISTS_PRESENT
58  #include <initializer_list>
59 #endif
60 
61 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
62  // Workaround for overzealous compiler warnings in /Wp64 mode
63  #pragma warning (push)
64 #if defined(_Wp64)
65  #pragma warning (disable: 4267)
66 #endif
67  #pragma warning (disable: 4127) //warning C4127: conditional expression is constant
68 #endif
69 
70 namespace tbb {
71 
72 template<typename T, class A = cache_aligned_allocator<T> >
74 
76 namespace internal {
77 
78  template<typename Container, typename Value>
79  class vector_iterator;
80 
82  static void *const vector_allocation_error_flag = reinterpret_cast<void*>(size_t(63));
83 
85  template<typename T>
86  void handle_unconstructed_elements(T* array, size_t n_of_elements){
87  std::memset( array, 0, n_of_elements * sizeof( T ) );
88  }
89 
91 
92  class concurrent_vector_base_v3 {
93  protected:
94 
95  // Basic types declarations
96  typedef size_t segment_index_t;
97  typedef size_t size_type;
98 
99  // Using enumerations due to Mac linking problems of static const variables
100  enum {
101  // Size constants
102  default_initial_segments = 1, // 2 initial items
104  pointers_per_short_table = 3, // to fit into 8 words of entire structure
105  pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit
106  };
107 
108  struct segment_not_used {};
109  struct segment_allocated {};
110  struct segment_allocation_failed {};
111 
112  class segment_t;
113  class segment_value_t {
114  void* array;
115  private:
116  //TODO: More elegant way to grant access to selected functions _only_?
117  friend class segment_t;
118  explicit segment_value_t(void* an_array):array(an_array) {}
119  public:
120  friend bool operator==(segment_value_t const& lhs, segment_not_used ) { return lhs.array == 0;}
121  friend bool operator==(segment_value_t const& lhs, segment_allocated) { return lhs.array > internal::vector_allocation_error_flag;}
122  friend bool operator==(segment_value_t const& lhs, segment_allocation_failed) { return lhs.array == internal::vector_allocation_error_flag;}
123  template<typename argument_type>
124  friend bool operator!=(segment_value_t const& lhs, argument_type arg) { return ! (lhs == arg);}
125 
126  template<typename T>
127  T* pointer() const { return static_cast<T*>(const_cast<void*>(array)); }
128  };
129 
130  friend void enforce_segment_allocated(segment_value_t const& s, internal::exception_id exception = eid_bad_last_alloc){
131  if(s != segment_allocated()){
132  internal::throw_exception(exception);
133  }
134  }
135 
136  // Segment pointer.
137  class segment_t {
138  atomic<void*> array;
139  public:
140  segment_t(){ store<relaxed>(segment_not_used());}
141  //Copy ctor and assignment operator are defined to ease using of stl algorithms.
142  //These algorithms usually not a synchronization point, so, semantic is
143  //intentionally relaxed here.
144  segment_t(segment_t const& rhs ){ array.store<relaxed>(rhs.array.load<relaxed>());}
145 
146  void swap(segment_t & rhs ){
147  tbb::internal::swap<relaxed>(array, rhs.array);
148  }
149 
150  segment_t& operator=(segment_t const& rhs ){
151  array.store<relaxed>(rhs.array.load<relaxed>());
152  return *this;
153  }
154 
155  template<memory_semantics M>
156  segment_value_t load() const { return segment_value_t(array.load<M>());}
157 
158  template<memory_semantics M>
159  void store(segment_not_used) {
160  array.store<M>(0);
161  }
162 
163  template<memory_semantics M>
164  void store(segment_allocation_failed) {
165  __TBB_ASSERT(load<relaxed>() != segment_allocated(),"transition from \"allocated\" to \"allocation failed\" state looks non-logical");
166  array.store<M>(internal::vector_allocation_error_flag);
167  }
168 
169  template<memory_semantics M>
170  void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true) {
171  __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == segment_allocated(),
172  "other overloads of store should be used for marking segment as not_used or allocation_failed" );
173  array.store<M>(allocated_segment_pointer);
174  }
175 
176 #if TBB_USE_ASSERT
177  ~segment_t() {
178  __TBB_ASSERT(load<relaxed>() != segment_allocated(), "should have been freed by clear" );
179  }
180 #endif /* TBB_USE_ASSERT */
181  };
182  friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true);
183 
184  // Data fields
185 
187  void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t);
188 
190  atomic<size_type> my_first_block;
191 
193  atomic<size_type> my_early_size;
194 
196  atomic<segment_t*> my_segment;
197 
199  segment_t my_storage[pointers_per_short_table];
200 
201  // Methods
202 
203  concurrent_vector_base_v3() {
204  //Here the semantic is intentionally relaxed.
205  //The reason this is next:
206  //Object that is in middle of construction (i.e. its constructor is not yet finished)
207  //cannot be used concurrently until the construction is finished.
208  //Thus to flag other threads that construction is finished, some synchronization with
209  //acquire-release semantic should be done by the (external) code that uses the vector.
210  //So, no need to do the synchronization inside the vector.
211 
212  my_early_size.store<relaxed>(0);
213  my_first_block.store<relaxed>(0); // here is not default_initial_segments
214  my_segment.store<relaxed>(my_storage);
215  }
216 
217  __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3();
218 
219  //these helpers methods use the fact that segments are allocated so
220  //that every segment size is a (increasing) power of 2.
221  //with one exception 0 segment has size of 2 as well segment 1;
222  //e.g. size of segment with index of 3 is 2^3=8;
223  static segment_index_t segment_index_of( size_type index ) {
224  return segment_index_t( __TBB_Log2( index|1 ) );
225  }
226 
227  static segment_index_t segment_base( segment_index_t k ) {
228  return (segment_index_t(1)<<k & ~segment_index_t(1));
229  }
230 
231  static inline segment_index_t segment_base_index_of( segment_index_t &index ) {
232  segment_index_t k = segment_index_of( index );
233  index -= segment_base(k);
234  return k;
235  }
236 
237  static size_type segment_size( segment_index_t k ) {
238  return segment_index_t(1)<<k; // fake value for k==0
239  }
240 
241 
242  static bool is_first_element_in_segment(size_type element_index){
243  //check if element_index is a power of 2 that is at least 2.
244  //The idea is to detect if the iterator crosses a segment boundary,
245  //and 2 is the minimal index for which it's true
246  __TBB_ASSERT(element_index, "there should be no need to call "
247  "is_first_element_in_segment for 0th element" );
248  return is_power_of_two_at_least( element_index, 2 );
249  }
250 
252  typedef void (__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin, size_type n );
253 
255  typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n );
256 
258  struct internal_segments_table {
259  segment_index_t first_block;
260  segment_t table[pointers_per_long_table];
261  };
262 
263  void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );
264  size_type __TBB_EXPORTED_METHOD internal_capacity() const;
265  void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src );
266  size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src );
267  void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index );
268  segment_index_t __TBB_EXPORTED_METHOD internal_clear( internal_array_op1 destroy );
269  void* __TBB_EXPORTED_METHOD internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy );
270  void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op2 copy );
271  void __TBB_EXPORTED_METHOD internal_assign( const concurrent_vector_base_v3& src, size_type element_size,
272  internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy );
274  void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const;
275  void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3& v);
276 
277  void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
278  internal_array_op1 destroy, internal_array_op2 init );
279  size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
280 
282  void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
283 private:
285  class helper;
286  friend class helper;
287 
288  template<typename Container, typename Value>
289  friend class vector_iterator;
290 
291  };
292 
293  inline void swap(concurrent_vector_base_v3::segment_t & lhs, concurrent_vector_base_v3::segment_t & rhs) __TBB_NOEXCEPT(true) {
294  lhs.swap(rhs);
295  }
296 
297  typedef concurrent_vector_base_v3 concurrent_vector_base;
298 
300 
302  template<typename Container, typename Value>
303  class vector_iterator
304  {
306  Container* my_vector;
307 
309  size_t my_index;
310 
312 
313  mutable Value* my_item;
314 
315  template<typename C, typename T>
316  friend vector_iterator<C,T> operator+( ptrdiff_t offset, const vector_iterator<C,T>& v );
317 
318  template<typename C, typename T, typename U>
319  friend bool operator==( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
320 
321  template<typename C, typename T, typename U>
322  friend bool operator<( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
323 
324  template<typename C, typename T, typename U>
325  friend ptrdiff_t operator-( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
326 
327  template<typename C, typename U>
328  friend class internal::vector_iterator;
329 
330 #if !__TBB_TEMPLATE_FRIENDS_BROKEN
331  template<typename T, class A>
332  friend class tbb::concurrent_vector;
333 #else
334 public:
335 #endif
336 
337  vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) :
338  my_vector(const_cast<Container*>(&vector)),
339  my_index(index),
340  my_item(static_cast<Value*>(ptr))
341  {}
342 
343  public:
345  vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}
346 
347  vector_iterator( const vector_iterator<Container,typename Container::value_type>& other ) :
348  my_vector(other.my_vector),
349  my_index(other.my_index),
350  my_item(other.my_item)
351  {}
352 
353  vector_iterator operator+( ptrdiff_t offset ) const {
354  return vector_iterator( *my_vector, my_index+offset );
355  }
356  vector_iterator &operator+=( ptrdiff_t offset ) {
357  my_index+=offset;
358  my_item = NULL;
359  return *this;
360  }
361  vector_iterator operator-( ptrdiff_t offset ) const {
362  return vector_iterator( *my_vector, my_index-offset );
363  }
364  vector_iterator &operator-=( ptrdiff_t offset ) {
365  my_index-=offset;
366  my_item = NULL;
367  return *this;
368  }
369  Value& operator*() const {
370  Value* item = my_item;
371  if( !item ) {
372  item = my_item = &my_vector->internal_subscript(my_index);
373  }
374  __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" );
375  return *item;
376  }
377  Value& operator[]( ptrdiff_t k ) const {
378  return my_vector->internal_subscript(my_index+k);
379  }
380  Value* operator->() const {return &operator*();}
381 
383  vector_iterator& operator++() {
384  size_t element_index = ++my_index;
385  if( my_item ) {
386  //TODO: consider using of knowledge about "first_block optimization" here as well?
387  if( concurrent_vector_base::is_first_element_in_segment(element_index)) {
388  //if the iterator crosses a segment boundary, the pointer become invalid
389  //as possibly next segment is in another memory location
390  my_item= NULL;
391  } else {
392  ++my_item;
393  }
394  }
395  return *this;
396  }
397 
399  vector_iterator& operator--() {
400  __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" );
401  size_t element_index = my_index--;
402  if( my_item ) {
403  if(concurrent_vector_base::is_first_element_in_segment(element_index)) {
404  //if the iterator crosses a segment boundary, the pointer become invalid
405  //as possibly next segment is in another memory location
406  my_item= NULL;
407  } else {
408  --my_item;
409  }
410  }
411  return *this;
412  }
413 
415  vector_iterator operator++(int) {
416  vector_iterator result = *this;
417  operator++();
418  return result;
419  }
420 
422  vector_iterator operator--(int) {
423  vector_iterator result = *this;
424  operator--();
425  return result;
426  }
427 
428  // STL support
429 
430  typedef ptrdiff_t difference_type;
431  typedef Value value_type;
432  typedef Value* pointer;
433  typedef Value& reference;
434  typedef std::random_access_iterator_tag iterator_category;
435  };
436 
437  template<typename Container, typename T>
438  vector_iterator<Container,T> operator+( ptrdiff_t offset, const vector_iterator<Container,T>& v ) {
439  return vector_iterator<Container,T>( *v.my_vector, v.my_index+offset );
440  }
441 
442  template<typename Container, typename T, typename U>
443  bool operator==( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
444  return i.my_index==j.my_index && i.my_vector == j.my_vector;
445  }
446 
447  template<typename Container, typename T, typename U>
448  bool operator!=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
449  return !(i==j);
450  }
451 
452  template<typename Container, typename T, typename U>
453  bool operator<( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
454  return i.my_index<j.my_index;
455  }
456 
457  template<typename Container, typename T, typename U>
458  bool operator>( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
459  return j<i;
460  }
461 
462  template<typename Container, typename T, typename U>
463  bool operator>=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
464  return !(i<j);
465  }
466 
467  template<typename Container, typename T, typename U>
468  bool operator<=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
469  return !(j<i);
470  }
471 
472  template<typename Container, typename T, typename U>
473  ptrdiff_t operator-( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
474  return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);
475  }
476 
477  template<typename T, class A>
478  class allocator_base {
479  public:
480  typedef typename A::template
481  rebind<T>::other allocator_type;
482  allocator_type my_allocator;
483 
484  allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {}
485 
486  };
487 
488 } // namespace internal
490 
492 
553 template<typename T, class A>
554 class concurrent_vector: protected internal::allocator_base<T, A>,
555  private internal::concurrent_vector_base {
556 private:
557  template<typename I>
558  class generic_range_type: public blocked_range<I> {
559  public:
560  typedef T value_type;
561  typedef T& reference;
562  typedef const T& const_reference;
563  typedef I iterator;
564  typedef ptrdiff_t difference_type;
565  generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
566  template<typename U>
567  generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
568  generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
569  };
570 
571  template<typename C, typename U>
572  friend class internal::vector_iterator;
573 
574 public:
575  //------------------------------------------------------------------------
576  // STL compatible types
577  //------------------------------------------------------------------------
578  typedef internal::concurrent_vector_base_v3::size_type size_type;
579  typedef typename internal::allocator_base<T, A>::allocator_type allocator_type;
580 
581  typedef T value_type;
582  typedef ptrdiff_t difference_type;
583  typedef T& reference;
584  typedef const T& const_reference;
585  typedef T *pointer;
586  typedef const T *const_pointer;
587 
588  typedef internal::vector_iterator<concurrent_vector,T> iterator;
589  typedef internal::vector_iterator<concurrent_vector,const T> const_iterator;
590 
591 #if !defined(_MSC_VER) || _CPPLIB_VER>=300
592  // Assume ISO standard definition of std::reverse_iterator
593  typedef std::reverse_iterator<iterator> reverse_iterator;
594  typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
595 #else
596  // Use non-standard std::reverse_iterator
597  typedef std::reverse_iterator<iterator,T,T&,T*> reverse_iterator;
598  typedef std::reverse_iterator<const_iterator,T,const T&,const T*> const_reverse_iterator;
599 #endif /* defined(_MSC_VER) && (_MSC_VER<1300) */
600 
601  //------------------------------------------------------------------------
602  // Parallel algorithm support
603  //------------------------------------------------------------------------
604  typedef generic_range_type<iterator> range_type;
605  typedef generic_range_type<const_iterator> const_range_type;
606 
607  //------------------------------------------------------------------------
608  // STL compatible constructors & destructors
609  //------------------------------------------------------------------------
610 
612  explicit concurrent_vector(const allocator_type &a = allocator_type())
613  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
614  {
615  vector_allocator_ptr = &internal_allocator;
616  }
617 
618  //Constructors are not required to have synchronization
619  //(for more details see comment in the concurrent_vector_base constructor).
620 #if __TBB_INITIALIZER_LISTS_PRESENT
621  concurrent_vector(std::initializer_list<T> init_list, const allocator_type &a = allocator_type())
623  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
624  {
625  vector_allocator_ptr = &internal_allocator;
626  __TBB_TRY {
627  internal_assign_iterators(init_list.begin(), init_list.end());
628  } __TBB_CATCH(...) {
629  segment_t *table = my_segment.load<relaxed>();;
630  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
631  __TBB_RETHROW();
632  }
633 
634  }
635 #endif //# __TBB_INITIALIZER_LISTS_PRESENT
636 
638  concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() )
639  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
640  {
641  vector_allocator_ptr = &internal_allocator;
642  __TBB_TRY {
643  internal_copy(vector, sizeof(T), &copy_array);
644  } __TBB_CATCH(...) {
645  segment_t *table = my_segment.load<relaxed>();
646  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
647  __TBB_RETHROW();
648  }
649  }
650 
651 #if __TBB_CPP11_RVALUE_REF_PRESENT
652  //TODO add __TBB_NOEXCEPT(true) and static_assert(std::has_nothrow_move_constructor<A>::value)
655  : internal::allocator_base<T, A>(std::move(source)), internal::concurrent_vector_base()
656  {
657  vector_allocator_ptr = &internal_allocator;
658  concurrent_vector_base_v3::internal_swap(source);
659  }
660 
661  concurrent_vector( concurrent_vector&& source, const allocator_type& a)
662  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
663  {
664  vector_allocator_ptr = &internal_allocator;
665  //C++ standard requires instances of an allocator being compared for equality,
666  //which means that memory allocated by one instance is possible to deallocate with the other one.
667  if (a == source.my_allocator) {
668  concurrent_vector_base_v3::internal_swap(source);
669  } else {
670  __TBB_TRY {
671  internal_copy(source, sizeof(T), &move_array);
672  } __TBB_CATCH(...) {
673  segment_t *table = my_segment.load<relaxed>();
674  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
675  __TBB_RETHROW();
676  }
677  }
678  }
679 
680 #endif
681 
683  template<class M>
684  concurrent_vector( const concurrent_vector<T, M>& vector, const allocator_type& a = allocator_type() )
685  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
686  {
687  vector_allocator_ptr = &internal_allocator;
688  __TBB_TRY {
689  internal_copy(vector.internal_vector_base(), sizeof(T), &copy_array);
690  } __TBB_CATCH(...) {
691  segment_t *table = my_segment.load<relaxed>();
692  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
693  __TBB_RETHROW();
694  }
695  }
696 
698  explicit concurrent_vector(size_type n)
699  {
700  vector_allocator_ptr = &internal_allocator;
701  __TBB_TRY {
702  internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
703  } __TBB_CATCH(...) {
704  segment_t *table = my_segment.load<relaxed>();
705  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
706  __TBB_RETHROW();
707  }
708  }
709 
711  concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type())
712  : internal::allocator_base<T, A>(a)
713  {
714  vector_allocator_ptr = &internal_allocator;
715  __TBB_TRY {
716  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
717  } __TBB_CATCH(...) {
718  segment_t *table = my_segment.load<relaxed>();
719  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
720  __TBB_RETHROW();
721  }
722  }
723 
725  template<class I>
726  concurrent_vector(I first, I last, const allocator_type &a = allocator_type())
727  : internal::allocator_base<T, A>(a)
728  {
729  vector_allocator_ptr = &internal_allocator;
730  __TBB_TRY {
731  internal_assign_range(first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
732  } __TBB_CATCH(...) {
733  segment_t *table = my_segment.load<relaxed>();
734  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
735  __TBB_RETHROW();
736  }
737  }
738 
741  if( this != &vector )
742  internal_assign(vector, sizeof(T), &destroy_array, &assign_array, &copy_array);
743  return *this;
744  }
745 
746 #if __TBB_CPP11_RVALUE_REF_PRESENT
747  //TODO: add __TBB_NOEXCEPT()
749  concurrent_vector& operator=( concurrent_vector&& other ) {
750  __TBB_ASSERT(this != &other, "Move assignment to itself is prohibited ");
751  typedef typename tbb::internal::allocator_traits<A>::propagate_on_container_move_assignment pocma_t;
752  if(pocma_t::value || this->my_allocator == other.my_allocator) {
753  concurrent_vector trash (std::move(*this));
754  internal_swap(other);
755  if (pocma_t::value) {
756  this->my_allocator = std::move(other.my_allocator);
757  }
758  } else {
759  internal_assign(other, sizeof(T), &destroy_array, &move_assign_array, &move_array);
760  }
761  return *this;
762  }
763 #endif
764  //TODO: add an template assignment operator? (i.e. with different element type)
765 
767  template<class M>
769  if( static_cast<void*>( this ) != static_cast<const void*>( &vector ) )
770  internal_assign(vector.internal_vector_base(),
771  sizeof(T), &destroy_array, &assign_array, &copy_array);
772  return *this;
773  }
774 
775 #if __TBB_INITIALIZER_LISTS_PRESENT
776  concurrent_vector& operator=( std::initializer_list<T> init_list ) {
778  internal_clear(&destroy_array);
779  internal_assign_iterators(init_list.begin(), init_list.end());
780  return *this;
781  }
782 #endif //#if __TBB_INITIALIZER_LISTS_PRESENT
783 
784  //------------------------------------------------------------------------
785  // Concurrent operations
786  //------------------------------------------------------------------------
788 
789  iterator grow_by( size_type delta ) {
790  return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load());
791  }
792 
794 
795  iterator grow_by( size_type delta, const_reference t ) {
796  return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size.load());
797  }
798 
800  template<typename I>
801  iterator grow_by( I first, I last ) {
802  typename std::iterator_traits<I>::difference_type delta = std::distance(first, last);
803  __TBB_ASSERT( delta >= 0, NULL);
804 
805  return iterator(*this, delta ? internal_grow_by(delta, sizeof(T), &copy_range<I>, static_cast<const void*>(&first)) : my_early_size.load());
806  }
807 
808 #if __TBB_INITIALIZER_LISTS_PRESENT
809 
810  iterator grow_by( std::initializer_list<T> init_list ) {
811  return grow_by( init_list.begin(), init_list.end() );
812  }
813 #endif //#if __TBB_INITIALIZER_LISTS_PRESENT
814 
816 
820  iterator grow_to_at_least( size_type n ) {
821  size_type m=0;
822  if( n ) {
823  m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL );
824  if( m>n ) m=n;
825  }
826  return iterator(*this, m);
827  };
828 
831  iterator grow_to_at_least( size_type n, const_reference t ) {
832  size_type m=0;
833  if( n ) {
834  m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array_by, &t);
835  if( m>n ) m=n;
836  }
837  return iterator(*this, m);
838  };
839 
841 
842  iterator push_back( const_reference item )
843  {
844  push_back_helper prolog(*this);
845  new(prolog.internal_push_back_result()) T(item);
846  return prolog.return_iterator_and_dismiss();
847  }
848 
849 #if __TBB_CPP11_RVALUE_REF_PRESENT
850 
852  iterator push_back( T&& item )
853  {
854  push_back_helper prolog(*this);
855  new(prolog.internal_push_back_result()) T(std::move(item));
856  return prolog.return_iterator_and_dismiss();
857  }
858 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
859 
861  template<typename... Args>
862  iterator emplace_back( Args&&... args )
863  {
864  push_back_helper prolog(*this);
865  new(prolog.internal_push_back_result()) T(std::forward<Args>(args)...);
866  return prolog.return_iterator_and_dismiss();
867  }
868 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
869 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
870 
873  reference operator[]( size_type index ) {
874  return internal_subscript(index);
875  }
876 
878  const_reference operator[]( size_type index ) const {
879  return internal_subscript(index);
880  }
881 
883  reference at( size_type index ) {
884  return internal_subscript_with_exceptions(index);
885  }
886 
888  const_reference at( size_type index ) const {
889  return internal_subscript_with_exceptions(index);
890  }
891 
893  range_type range( size_t grainsize = 1 ) {
894  return range_type( begin(), end(), grainsize );
895  }
896 
898  const_range_type range( size_t grainsize = 1 ) const {
899  return const_range_type( begin(), end(), grainsize );
900  }
901 
902  //------------------------------------------------------------------------
903  // Capacity
904  //------------------------------------------------------------------------
906  size_type size() const {
907  size_type sz = my_early_size, cp = internal_capacity();
908  return cp < sz ? cp : sz;
909  }
910 
912  bool empty() const {return !my_early_size;}
913 
915  size_type capacity() const {return internal_capacity();}
916 
918 
920  void reserve( size_type n ) {
921  if( n )
922  internal_reserve(n, sizeof(T), max_size());
923  }
924 
926  void resize( size_type n ) {
927  internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
928  }
929 
931  void resize( size_type n, const_reference t ) {
932  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
933  }
934 
936  void shrink_to_fit();
937 
939  size_type max_size() const {return (~size_type(0))/sizeof(T);}
940 
941  //------------------------------------------------------------------------
942  // STL support
943  //------------------------------------------------------------------------
944 
946  iterator begin() {return iterator(*this,0);}
948  iterator end() {return iterator(*this,size());}
950  const_iterator begin() const {return const_iterator(*this,0);}
952  const_iterator end() const {return const_iterator(*this,size());}
954  const_iterator cbegin() const {return const_iterator(*this,0);}
956  const_iterator cend() const {return const_iterator(*this,size());}
958  reverse_iterator rbegin() {return reverse_iterator(end());}
960  reverse_iterator rend() {return reverse_iterator(begin());}
962  const_reverse_iterator rbegin() const {return const_reverse_iterator(end());}
964  const_reverse_iterator rend() const {return const_reverse_iterator(begin());}
966  const_reverse_iterator crbegin() const {return const_reverse_iterator(end());}
968  const_reverse_iterator crend() const {return const_reverse_iterator(begin());}
970  reference front() {
971  __TBB_ASSERT( size()>0, NULL);
972  const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
973  return (segment_value.template pointer<T>())[0];
974  }
976  const_reference front() const {
977  __TBB_ASSERT( size()>0, NULL);
978  const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
979  return (segment_value.template pointer<const T>())[0];
980  }
982  reference back() {
983  __TBB_ASSERT( size()>0, NULL);
984  return internal_subscript( size()-1 );
985  }
987  const_reference back() const {
988  __TBB_ASSERT( size()>0, NULL);
989  return internal_subscript( size()-1 );
990  }
992  allocator_type get_allocator() const { return this->my_allocator; }
993 
995  void assign(size_type n, const_reference t) {
996  clear();
997  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
998  }
999 
1001  template<class I>
1002  void assign(I first, I last) {
1003  clear(); internal_assign_range( first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
1004  }
1005 
1006 #if __TBB_INITIALIZER_LISTS_PRESENT
1007  void assign(std::initializer_list<T> init_list) {
1009  clear(); internal_assign_iterators( init_list.begin(), init_list.end());
1010  }
1011 #endif //# __TBB_INITIALIZER_LISTS_PRESENT
1012 
1014  void swap(concurrent_vector &vector) {
1015  using std::swap;
1016  if( this != &vector ) {
1017  concurrent_vector_base_v3::internal_swap(static_cast<concurrent_vector_base_v3&>(vector));
1018  swap(this->my_allocator, vector.my_allocator);
1019  }
1020  }
1021 
1023 
1024  void clear() {
1025  internal_clear(&destroy_array);
1026  }
1027 
1030  segment_t *table = my_segment.load<relaxed>();
1031  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
1032  // base class destructor call should be then
1033  }
1034 
1035  const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; }
1036 private:
1038  static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) {
1039  return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.allocate(k);
1040  }
1042  void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block);
1043 
1045  T& internal_subscript( size_type index ) const;
1046 
1048  T& internal_subscript_with_exceptions( size_type index ) const;
1049 
1051  void internal_assign_n(size_type n, const_pointer p) {
1052  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(p), &destroy_array, p? &initialize_array_by : &initialize_array );
1053  }
1054 
1056  template<bool B> class is_integer_tag;
1057 
1059  template<class I>
1060  void internal_assign_range(I first, I last, is_integer_tag<true> *) {
1061  internal_assign_n(static_cast<size_type>(first), &static_cast<T&>(last));
1062  }
1064  template<class I>
1065  void internal_assign_range(I first, I last, is_integer_tag<false> *) {
1066  internal_assign_iterators(first, last);
1067  }
1069  template<class I>
1070  void internal_assign_iterators(I first, I last);
1071 
1072  //these functions are marked __TBB_EXPORTED_FUNC as they are called from within the library
1073 
1075  static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n );
1076 
1078  static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n );
1079 
1081  static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n );
1082 
1083 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1084  static void __TBB_EXPORTED_FUNC move_array_if_noexcept( void* dst, const void* src, size_type n );
1086 #endif //__TBB_MOVE_IF_NO_EXCEPT_PRESENT
1087 
1088 #if __TBB_CPP11_RVALUE_REF_PRESENT
1089  static void __TBB_EXPORTED_FUNC move_array( void* dst, const void* src, size_type n );
1091 
1093  static void __TBB_EXPORTED_FUNC move_assign_array( void* dst, const void* src, size_type n );
1094 #endif
1095  template<typename Iterator>
1097  static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_type_erased_iterator, size_type n );
1098 
1100  static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n );
1101 
1103  static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n );
1104 
1106  class internal_loop_guide : internal::no_copy {
1107  public:
1108  const pointer array;
1109  const size_type n;
1110  size_type i;
1111 
1112  static const T* as_const_pointer(const void *ptr) { return static_cast<const T *>(ptr); }
1113  static T* as_pointer(const void *src) { return static_cast<T*>(const_cast<void *>(src)); }
1114 
1115  internal_loop_guide(size_type ntrials, void *ptr)
1116  : array(as_pointer(ptr)), n(ntrials), i(0) {}
1117  void init() { for(; i < n; ++i) new( &array[i] ) T(); }
1118  void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*as_const_pointer(src)); }
1119  void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(as_const_pointer(src)[i]); }
1120  void assign(const void *src) { for(; i < n; ++i) array[i] = as_const_pointer(src)[i]; }
1121 #if __TBB_CPP11_RVALUE_REF_PRESENT
1122  void move_assign(const void *src) { for(; i < n; ++i) array[i] = std::move(as_pointer(src)[i]); }
1123  void move_construct(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move(as_pointer(src)[i]) ); }
1124 #endif
1125 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1126  void move_construct_if_noexcept(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move_if_noexcept(as_pointer(src)[i]) ); }
1127 #endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1128 
1129  //TODO: rename to construct_range
1130  template<class I> void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); }
1131  ~internal_loop_guide() {
1132  if(i < n) {// if an exception was raised, fill the rest of items with zeros
1133  internal::handle_unconstructed_elements(array+i, n-i);
1134  }
1135  }
1136  };
1137 
1138  struct push_back_helper : internal::no_copy{
1139  struct element_construction_guard : internal::no_copy{
1140  pointer element;
1141 
1142  element_construction_guard(pointer an_element) : element (an_element){}
1143  void dismiss(){ element = NULL; }
1145  if (element){
1146  internal::handle_unconstructed_elements(element, 1);
1147  }
1148  }
1149  };
1150 
1151  concurrent_vector & v;
1152  size_type k;
1154 
1155  push_back_helper(concurrent_vector & vector) :
1156  v(vector),
1157  g (static_cast<T*>(v.internal_push_back(sizeof(T),k)))
1158  {}
1159 
1160  pointer internal_push_back_result(){ return g.element;}
1161  iterator return_iterator_and_dismiss(){
1162  pointer ptr = g.element;
1163  g.dismiss();
1164  return iterator(v, k, ptr);
1165  }
1166  };
1167 };
1168 
1169 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1170 #pragma warning (push)
1171 #pragma warning (disable: 4701) // potentially uninitialized local variable "old"
1172 #endif
1173 template<typename T, class A>
1175  internal_segments_table old;
1176  __TBB_TRY {
1177  internal_array_op2 copy_or_move_array =
1178 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1179  &move_array_if_noexcept
1180 #else
1181  &copy_array
1182 #endif
1183  ;
1184  if( internal_compact( sizeof(T), &old, &destroy_array, copy_or_move_array ) )
1185  internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments
1186  } __TBB_CATCH(...) {
1187  if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype]
1188  internal_free_segments( old.table, 1, old.first_block );
1189  __TBB_RETHROW();
1190  }
1191 }
1192 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1193 #pragma warning (pop)
1194 #endif // warning 4701 is back
1195 
1196 template<typename T, class A>
1197 void concurrent_vector<T, A>::internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block) {
1198  // Free the arrays
1199  while( k > first_block ) {
1200  --k;
1201  segment_value_t segment_value = table[k].load<relaxed>();
1202  table[k].store<relaxed>(segment_not_used());
1203  if( segment_value == segment_allocated() ) // check for correct segment pointer
1204  this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(k) );
1205  }
1206  segment_value_t segment_value = table[0].load<relaxed>();
1207  if( segment_value == segment_allocated() ) {
1208  __TBB_ASSERT( first_block > 0, NULL );
1209  while(k > 0) table[--k].store<relaxed>(segment_not_used());
1210  this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(first_block) );
1211  }
1212 }
1213 
1214 template<typename T, class A>
1215 T& concurrent_vector<T, A>::internal_subscript( size_type index ) const {
1216  //TODO: unify both versions of internal_subscript
1217  __TBB_ASSERT( index < my_early_size, "index out of bounds" );
1218  size_type j = index;
1219  segment_index_t k = segment_base_index_of( j );
1220  __TBB_ASSERT( my_segment.load<acquire>() != my_storage || k < pointers_per_short_table, "index is being allocated" );
1221  //no need in load with acquire (load<acquire>) since thread works in own space or gets
1222  //the information about added elements via some form of external synchronization
1223  //TODO: why not make a load of my_segment relaxed as well ?
1224  //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1225  segment_value_t segment_value = my_segment[k].template load<relaxed>();
1226  __TBB_ASSERT( segment_value != segment_allocation_failed(), "the instance is broken by bad allocation. Use at() instead" );
1227  __TBB_ASSERT( segment_value != segment_not_used(), "index is being allocated" );
1228  return (( segment_value.pointer<T>()))[j];
1229 }
1230 
1231 template<typename T, class A>
1233  if( index >= my_early_size )
1234  internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range
1235  size_type j = index;
1236  segment_index_t k = segment_base_index_of( j );
1237  //TODO: refactor this condition into separate helper function, e.g. fits_into_small_table
1238  if( my_segment.load<acquire>() == my_storage && k >= pointers_per_short_table )
1239  internal::throw_exception(internal::eid_segment_range_error); // throw std::range_error
1240  // no need in load with acquire (load<acquire>) since thread works in own space or gets
1241  //the information about added elements via some form of external synchronization
1242  //TODO: why not make a load of my_segment relaxed as well ?
1243  //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1244  segment_value_t segment_value = my_segment[k].template load<relaxed>();
1245  enforce_segment_allocated(segment_value, internal::eid_index_range_error);
1246  return (segment_value.pointer<T>())[j];
1247 }
1248 
1249 template<typename T, class A> template<class I>
1251  __TBB_ASSERT(my_early_size == 0, NULL);
1252  size_type n = std::distance(first, last);
1253  if( !n ) return;
1254  internal_reserve(n, sizeof(T), max_size());
1255  my_early_size = n;
1256  segment_index_t k = 0;
1257  //TODO: unify segment iteration code with concurrent_base_v3::helper
1258  size_type sz = segment_size( my_first_block );
1259  while( sz < n ) {
1260  internal_loop_guide loop(sz, my_segment[k].template load<relaxed>().template pointer<void>());
1261  loop.iterate(first);
1262  n -= sz;
1263  if( !k ) k = my_first_block;
1264  else { ++k; sz <<= 1; }
1265  }
1266  internal_loop_guide loop(n, my_segment[k].template load<relaxed>().template pointer<void>());
1267  loop.iterate(first);
1268 }
1269 
1270 template<typename T, class A>
1271 void concurrent_vector<T, A>::initialize_array( void* begin, const void *, size_type n ) {
1272  internal_loop_guide loop(n, begin); loop.init();
1273 }
1274 
1275 template<typename T, class A>
1276 void concurrent_vector<T, A>::initialize_array_by( void* begin, const void *src, size_type n ) {
1277  internal_loop_guide loop(n, begin); loop.init(src);
1278 }
1279 
1280 template<typename T, class A>
1281 void concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_type n ) {
1282  internal_loop_guide loop(n, dst); loop.copy(src);
1283 }
1284 
1285 #if __TBB_CPP11_RVALUE_REF_PRESENT
1286 template<typename T, class A>
1287 void concurrent_vector<T, A>::move_array( void* dst, const void* src, size_type n ) {
1288  internal_loop_guide loop(n, dst); loop.move_construct(src);
1289 }
1290 template<typename T, class A>
1291 void concurrent_vector<T, A>::move_assign_array( void* dst, const void* src, size_type n ) {
1292  internal_loop_guide loop(n, dst); loop.move_assign(src);
1293 }
1294 #endif
1295 
1296 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1297 template<typename T, class A>
1298 void concurrent_vector<T, A>::move_array_if_noexcept( void* dst, const void* src, size_type n ) {
1299  internal_loop_guide loop(n, dst); loop.move_construct_if_noexcept(src);
1300 }
1301 #endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1302 
1303 template<typename T, class A>
1304 template<typename I>
1305 void concurrent_vector<T, A>::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){
1306  I & iterator ((*const_cast<I*>(static_cast<const I*>(p_type_erased_iterator))));
1307  internal_loop_guide loop(n, dst); loop.iterate(iterator);
1308 }
1309 
1310 template<typename T, class A>
1311 void concurrent_vector<T, A>::assign_array( void* dst, const void* src, size_type n ) {
1312  internal_loop_guide loop(n, dst); loop.assign(src);
1313 }
1314 
1315 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1316  // Workaround for overzealous compiler warning
1317  #pragma warning (push)
1318  #pragma warning (disable: 4189)
1319 #endif
1320 template<typename T, class A>
1321 void concurrent_vector<T, A>::destroy_array( void* begin, size_type n ) {
1322  T* array = static_cast<T*>(begin);
1323  for( size_type j=n; j>0; --j )
1324  array[j-1].~T(); // destructors are supposed to not throw any exceptions
1325 }
1326 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1327  #pragma warning (pop)
1328 #endif // warning 4189 is back
1329 
1330 // concurrent_vector's template functions
1331 template<typename T, class A1, class A2>
1332 inline bool operator==(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b) {
1333  //TODO: call size() only once per vector (in operator==)
1334  // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
1335  if(a.size() != b.size()) return false;
1336  typename concurrent_vector<T, A1>::const_iterator i(a.begin());
1337  typename concurrent_vector<T, A2>::const_iterator j(b.begin());
1338  for(; i != a.end(); ++i, ++j)
1339  if( !(*i == *j) ) return false;
1340  return true;
1341 }
1342 
1343 template<typename T, class A1, class A2>
1344 inline bool operator!=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1345 { return !(a == b); }
1346 
1347 template<typename T, class A1, class A2>
1348 inline bool operator<(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1349 { return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); }
1350 
1351 template<typename T, class A1, class A2>
1352 inline bool operator>(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1353 { return b < a; }
1354 
1355 template<typename T, class A1, class A2>
1356 inline bool operator<=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1357 { return !(b < a); }
1358 
1359 template<typename T, class A1, class A2>
1360 inline bool operator>=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1361 { return !(a < b); }
1362 
1363 template<typename T, class A>
1364 inline void swap(concurrent_vector<T, A> &a, concurrent_vector<T, A> &b)
1365 { a.swap( b ); }
1366 
1367 } // namespace tbb
1368 
1369 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1370  #pragma warning (pop)
1371 #endif // warning 4267,4127 are back
1372 
1373 #endif /* __TBB_concurrent_vector_H */
reverse_iterator rend()
reverse end iterator
Definition: concurrent_vector.h:960
~concurrent_vector()
Clear and destroy vector.
Definition: concurrent_vector.h:1029
iterator begin()
start iterator
Definition: concurrent_vector.h:946
reference front()
the first item
Definition: concurrent_vector.h:970
concurrent_vector & operator=(const concurrent_vector< T, M > &vector)
Assignment for vector with different allocator type.
Definition: concurrent_vector.h:768
concurrent_vector(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
Definition: concurrent_vector.h:726
size_type size() const
Return size of vector. It may include elements under construction.
Definition: concurrent_vector.h:906
const_reverse_iterator rend() const
reverse end const iterator
Definition: concurrent_vector.h:964
const_iterator cbegin() const
start const iterator
Definition: concurrent_vector.h:954
void assign(I first, I last)
assign range [first, last)
Definition: concurrent_vector.h:1002
concurrent_vector & operator=(const concurrent_vector &vector)
Assignment.
Definition: concurrent_vector.h:740
void reserve(size_type n)
Allocate enough space to grow to size n without having to allocate more memory later.
Definition: concurrent_vector.h:920
allocator_type get_allocator() const
return allocator object
Definition: concurrent_vector.h:992
iterator push_back(const_reference item)
Push item.
Definition: concurrent_vector.h:842
const_reverse_iterator rbegin() const
reverse start const iterator
Definition: concurrent_vector.h:962
reverse_iterator rbegin()
reverse start iterator
Definition: concurrent_vector.h:958
size_type capacity() const
Maximum size to which array can grow without allocating more memory. Concurrent allocations are not i...
Definition: concurrent_vector.h:915
concurrent_vector(const concurrent_vector< T, M > &vector, const allocator_type &a=allocator_type())
Copying constructor for vector with different allocator type.
Definition: concurrent_vector.h:684
Acquire.
Definition: atomic.h:47
const_reverse_iterator crbegin() const
reverse start const iterator
Definition: concurrent_vector.h:966
iterator grow_by(I first, I last)
Returns iterator pointing to the first new element.
Definition: concurrent_vector.h:801
const_reference back() const
the last item const
Definition: concurrent_vector.h:987
A range over which to iterate.
Definition: blocked_range.h:40
concurrent_vector(size_type n, const_reference t, const allocator_type &a=allocator_type())
Construction with initial size specified by argument n, initialization by copying of t...
Definition: concurrent_vector.h:711
concurrent_vector(const allocator_type &a=allocator_type())
Construct empty vector.
Definition: concurrent_vector.h:612
reference at(size_type index)
Get reference to element at given index. Throws exceptions on errors.
Definition: concurrent_vector.h:883
reference operator[](size_type index)
Get reference to element at given index.
Definition: concurrent_vector.h:873
void shrink_to_fit()
Optimize memory usage and fragmentation.
Definition: concurrent_vector.h:1174
concurrent_vector(const concurrent_vector &vector, const allocator_type &a=allocator_type())
Copying constructor.
Definition: concurrent_vector.h:638
const_range_type range(size_t grainsize=1) const
Get const range for iterating with parallel algorithms.
Definition: concurrent_vector.h:898
const_iterator begin() const
start const iterator
Definition: concurrent_vector.h:950
size_type max_size() const
Upper bound on argument to reserve.
Definition: concurrent_vector.h:939
No ordering.
Definition: atomic.h:51
Concurrent vector container.
Definition: concurrent_vector.h:73
iterator end()
end iterator
Definition: concurrent_vector.h:948
const_reference front() const
the first item const
Definition: concurrent_vector.h:976
const_reference operator[](size_type index) const
Get const reference to element at given index.
Definition: concurrent_vector.h:878
reference back()
the last item
Definition: concurrent_vector.h:982
const_iterator end() const
end const iterator
Definition: concurrent_vector.h:952
Primary template for atomic.
Definition: atomic.h:405
Definition: _flow_graph_async_msg_impl.h:32
void assign(size_type n, const_reference t)
assign n items by copying t item
Definition: concurrent_vector.h:995
iterator grow_by(size_type delta)
Grow by "delta" elements.
Definition: concurrent_vector.h:789
void resize(size_type n, const_reference t)
Resize the vector, copy t for new elements. Not thread-safe.
Definition: concurrent_vector.h:931
const_iterator cend() const
end const iterator
Definition: concurrent_vector.h:956
void resize(size_type n)
Resize the vector. Not thread-safe.
Definition: concurrent_vector.h:926
bool empty() const
Return false if vector is not empty or has elements under construction at least.
Definition: concurrent_vector.h:912
const_reference at(size_type index) const
Get const reference to element at given index. Throws exceptions on errors.
Definition: concurrent_vector.h:888
void clear()
Clear container while keeping memory allocated.
Definition: concurrent_vector.h:1024
void swap(concurrent_vector &vector)
swap two instances
Definition: concurrent_vector.h:1014
The namespace tbb contains all components of the library.
Definition: parallel_for.h:44
const_reverse_iterator crend() const
reverse end const iterator
Definition: concurrent_vector.h:968
iterator grow_to_at_least(size_type n, const_reference t)
Analogous to grow_to_at_least( size_type n ) with exception that the new elements are initialized by ...
Definition: concurrent_vector.h:831
concurrent_vector(size_type n)
Construction with initial size specified by argument n.
Definition: concurrent_vector.h:698
iterator grow_to_at_least(size_type n)
Append minimal sequence of elements such that size()>=n.
Definition: concurrent_vector.h:820
range_type range(size_t grainsize=1)
Get range for iterating with parallel algorithms.
Definition: concurrent_vector.h:893
Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
Definition: atomic.h:501
iterator grow_by(size_type delta, const_reference t)
Grow by "delta" elements using copying constructor.
Definition: concurrent_vector.h:795