BRE12
_concurrent_unordered_impl.h
1 /*
2  Copyright 2005-2016 Intel Corporation. All Rights Reserved.
3 
4  This file is part of Threading Building Blocks. Threading Building Blocks is free software;
5  you can redistribute it and/or modify it under the terms of the GNU General Public License
6  version 2 as published by the Free Software Foundation. Threading Building Blocks is
7  distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8  implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9  See the GNU General Public License for more details. You should have received a copy of
10  the GNU General Public License along with Threading Building Blocks; if not, write to the
11  Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
12 
13  As a special exception, you may use this file as part of a free software library without
14  restriction. Specifically, if other files instantiate templates or use macros or inline
15  functions from this file, or you compile this file and link it with other files to produce
16  an executable, this file does not by itself cause the resulting executable to be covered
17  by the GNU General Public License. This exception does not however invalidate any other
18  reasons why the executable file might be covered by the GNU General Public License.
19 */
20 
21 /* Container implementations in this header are based on PPL implementations
22  provided by Microsoft. */
23 
24 #ifndef __TBB__concurrent_unordered_impl_H
25 #define __TBB__concurrent_unordered_impl_H
26 #if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H)
27 #error Do not #include this internal file directly; use public TBB headers instead.
28 #endif
29 
30 #include "../tbb_stddef.h"
31 
32 #if !TBB_USE_EXCEPTIONS && _MSC_VER
33  // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
34  #pragma warning (push)
35  #pragma warning (disable: 4530)
36 #endif
37 
38 #include <iterator>
39 #include <utility> // Need std::pair
40 #include <functional> // Need std::equal_to (in ../concurrent_unordered_*.h)
41 #include <string> // For tbb_hasher
42 #include <cstring> // Need std::memset
43 #include <algorithm> // Need std::swap
44 
45 #if !TBB_USE_EXCEPTIONS && _MSC_VER
46  #pragma warning (pop)
47 #endif
48 
49 #include "../atomic.h"
50 #include "../tbb_exception.h"
51 #include "../tbb_allocator.h"
52 
53 #if __TBB_INITIALIZER_LISTS_PRESENT
54  #include <initializer_list>
55 #endif
56 
57 #include "_tbb_hash_compare_impl.h"
58 
59 namespace tbb {
60 namespace interface5 {
62 namespace internal {
63 
64 template <typename T, typename Allocator>
65 class split_ordered_list;
66 template <typename Traits>
67 class concurrent_unordered_base;
68 
69 // Forward list iterators (without skipping dummy elements)
70 template<class Solist, typename Value>
71 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
72 {
73  template <typename T, typename Allocator>
74  friend class split_ordered_list;
75  template <typename Traits>
76  friend class concurrent_unordered_base;
77  template<class M, typename V>
78  friend class flist_iterator;
79 
80  typedef typename Solist::nodeptr_t nodeptr_t;
81 public:
82  typedef typename Solist::value_type value_type;
83  typedef typename Solist::difference_type difference_type;
84  typedef typename Solist::pointer pointer;
85  typedef typename Solist::reference reference;
86 
87  flist_iterator() : my_node_ptr(0) {}
88  flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
89  : my_node_ptr(other.my_node_ptr) {}
90 
91  reference operator*() const { return my_node_ptr->my_element; }
92  pointer operator->() const { return &**this; }
93 
94  flist_iterator& operator++() {
95  my_node_ptr = my_node_ptr->my_next;
96  return *this;
97  }
98 
99  flist_iterator operator++(int) {
100  flist_iterator tmp = *this;
101  ++*this;
102  return tmp;
103  }
104 
105 protected:
106  flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
107  nodeptr_t get_node_ptr() const { return my_node_ptr; }
108 
109  nodeptr_t my_node_ptr;
110 
111  template<typename M, typename T, typename U>
112  friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
113  template<typename M, typename T, typename U>
114  friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
115 };
116 
117 template<typename Solist, typename T, typename U>
118 bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
119  return i.my_node_ptr == j.my_node_ptr;
120 }
121 template<typename Solist, typename T, typename U>
122 bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
123  return i.my_node_ptr != j.my_node_ptr;
124 }
125 
126 // Split-order list iterators, needed to skip dummy elements
127 template<class Solist, typename Value>
128 class solist_iterator : public flist_iterator<Solist, Value>
129 {
130  typedef flist_iterator<Solist, Value> base_type;
131  typedef typename Solist::nodeptr_t nodeptr_t;
132  using base_type::get_node_ptr;
133  template <typename T, typename Allocator>
134  friend class split_ordered_list;
135  template<class M, typename V>
136  friend class solist_iterator;
137  template<typename M, typename T, typename U>
138  friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
139  template<typename M, typename T, typename U>
140  friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
141 
142  const Solist *my_list_ptr;
143  solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
144 
145 public:
146  typedef typename Solist::value_type value_type;
147  typedef typename Solist::difference_type difference_type;
148  typedef typename Solist::pointer pointer;
149  typedef typename Solist::reference reference;
150 
151  solist_iterator() {}
152  solist_iterator(const solist_iterator<Solist, typename Solist::value_type> &other )
153  : base_type(other), my_list_ptr(other.my_list_ptr) {}
154 
155  reference operator*() const {
156  return this->base_type::operator*();
157  }
158 
159  pointer operator->() const {
160  return (&**this);
161  }
162 
163  solist_iterator& operator++() {
164  do ++(*(base_type *)this);
165  while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
166 
167  return (*this);
168  }
169 
170  solist_iterator operator++(int) {
171  solist_iterator tmp = *this;
172  do ++*this;
173  while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
174 
175  return (tmp);
176  }
177 };
178 
179 template<typename Solist, typename T, typename U>
180 bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
181  return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
182 }
183 template<typename Solist, typename T, typename U>
184 bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
185  return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
186 }
187 
188 // Forward type and class definitions
189 typedef size_t sokey_t;
190 
191 
192 // Forward list in which elements are sorted in a split-order
193 template <typename T, typename Allocator>
194 class split_ordered_list
195 {
196 public:
197  typedef split_ordered_list<T, Allocator> self_type;
198  typedef typename Allocator::template rebind<T>::other allocator_type;
199  struct node;
200  typedef node *nodeptr_t;
201 
202  typedef typename allocator_type::size_type size_type;
203  typedef typename allocator_type::difference_type difference_type;
204  typedef typename allocator_type::pointer pointer;
205  typedef typename allocator_type::const_pointer const_pointer;
206  typedef typename allocator_type::reference reference;
207  typedef typename allocator_type::const_reference const_reference;
208  typedef typename allocator_type::value_type value_type;
209 
210  typedef solist_iterator<self_type, const value_type> const_iterator;
211  typedef solist_iterator<self_type, value_type> iterator;
212  typedef flist_iterator<self_type, const value_type> raw_const_iterator;
213  typedef flist_iterator<self_type, value_type> raw_iterator;
214 
215  // Node that holds the element in a split-ordered list
216  struct node : tbb::internal::no_assign
217  {
218  private:
219  // for compilers that try to generate default constructors though they are not needed.
220  node(); // VS 2008, 2010, 2012
221  public:
222  // Initialize the node with the given order key
223  void init(sokey_t order_key) {
224  my_order_key = order_key;
225  my_next = NULL;
226  }
227 
228  // Return the order key (needed for hashing)
229  sokey_t get_order_key() const { // TODO: remove
230  return my_order_key;
231  }
232 
233  // Inserts the new element in the list in an atomic fashion
234  nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
235  {
236  // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
237  nodeptr_t exchange_node = tbb::internal::as_atomic(my_next).compare_and_swap(new_node, current_node);
238 
239  if (exchange_node == current_node) // TODO: why this branch?
240  {
241  // Operation succeeded, return the new node
242  return new_node;
243  }
244  else
245  {
246  // Operation failed, return the "interfering" node
247  return exchange_node;
248  }
249  }
250 
251  // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
252  // in the hash table to quickly index into the right subsection of the split-ordered list.
253  bool is_dummy() const {
254  return (my_order_key & 0x1) == 0;
255  }
256 
257 
258  nodeptr_t my_next; // Next element in the list
259  value_type my_element; // Element storage
260  sokey_t my_order_key; // Order key for this element
261  };
262 
263  // Allocate a new node with the given order key; used to allocate dummy nodes
264  nodeptr_t create_node(sokey_t order_key) {
265  nodeptr_t pnode = my_node_allocator.allocate(1);
266  pnode->init(order_key);
267  return (pnode);
268  }
269 
270  // Allocate a new node with the given order key and value
271  template<typename Arg>
272  nodeptr_t create_node(sokey_t order_key, __TBB_FORWARDING_REF(Arg) t){
273  nodeptr_t pnode = my_node_allocator.allocate(1);
274 
275  //TODO: use RAII scoped guard instead of explicit catch
276  __TBB_TRY {
277  new(static_cast<void*>(&pnode->my_element)) T(tbb::internal::forward<Arg>(t));
278  pnode->init(order_key);
279  } __TBB_CATCH(...) {
280  my_node_allocator.deallocate(pnode, 1);
281  __TBB_RETHROW();
282  }
283 
284  return (pnode);
285  }
286 
287  // Allocate a new node with the given parameters for constructing value
288  template<typename __TBB_PARAMETER_PACK Args>
289  nodeptr_t create_node_v( __TBB_FORWARDING_REF(Args) __TBB_PARAMETER_PACK args){
290  nodeptr_t pnode = my_node_allocator.allocate(1);
291 
292  //TODO: use RAII scoped guard instead of explicit catch
293  __TBB_TRY {
294  new(static_cast<void*>(&pnode->my_element)) T(__TBB_PACK_EXPANSION(tbb::internal::forward<Args>(args)));
295  } __TBB_CATCH(...) {
296  my_node_allocator.deallocate(pnode, 1);
297  __TBB_RETHROW();
298  }
299 
300  return (pnode);
301  }
302 
303  split_ordered_list(allocator_type a = allocator_type())
304  : my_node_allocator(a), my_element_count(0)
305  {
306  // Immediately allocate a dummy node with order key of 0. This node
307  // will always be the head of the list.
308  my_head = create_node(sokey_t(0));
309  }
310 
311  ~split_ordered_list()
312  {
313  // Clear the list
314  clear();
315 
316  // Remove the head element which is not cleared by clear()
317  nodeptr_t pnode = my_head;
318  my_head = NULL;
319 
320  __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
321 
322  destroy_node(pnode);
323  }
324 
325  // Common forward list functions
326 
327  allocator_type get_allocator() const {
328  return (my_node_allocator);
329  }
330 
331  void clear() {
332  nodeptr_t pnext;
333  nodeptr_t pnode = my_head;
334 
335  __TBB_ASSERT(my_head != NULL, "Invalid head list node");
336  pnext = pnode->my_next;
337  pnode->my_next = NULL;
338  pnode = pnext;
339 
340  while (pnode != NULL)
341  {
342  pnext = pnode->my_next;
343  destroy_node(pnode);
344  pnode = pnext;
345  }
346 
347  my_element_count = 0;
348  }
349 
350  // Returns a first non-dummy element in the SOL
351  iterator begin() {
352  return first_real_iterator(raw_begin());
353  }
354 
355  // Returns a first non-dummy element in the SOL
356  const_iterator begin() const {
357  return first_real_iterator(raw_begin());
358  }
359 
360  iterator end() {
361  return (iterator(0, this));
362  }
363 
364  const_iterator end() const {
365  return (const_iterator(0, this));
366  }
367 
368  const_iterator cbegin() const {
369  return (((const self_type *)this)->begin());
370  }
371 
372  const_iterator cend() const {
373  return (((const self_type *)this)->end());
374  }
375 
376  // Checks if the number of elements (non-dummy) is 0
377  bool empty() const {
378  return (my_element_count == 0);
379  }
380 
381  // Returns the number of non-dummy elements in the list
382  size_type size() const {
383  return my_element_count;
384  }
385 
386  // Returns the maximum size of the list, determined by the allocator
387  size_type max_size() const {
388  return my_node_allocator.max_size();
389  }
390 
391  // Swaps 'this' list with the passed in one
392  void swap(self_type& other)
393  {
394  if (this == &other)
395  {
396  // Nothing to do
397  return;
398  }
399 
400  std::swap(my_element_count, other.my_element_count);
401  std::swap(my_head, other.my_head);
402  }
403 
404  // Split-order list functions
405 
406  // Returns a first element in the SOL, which is always a dummy
407  raw_iterator raw_begin() {
408  return raw_iterator(my_head);
409  }
410 
411  // Returns a first element in the SOL, which is always a dummy
412  raw_const_iterator raw_begin() const {
413  return raw_const_iterator(my_head);
414  }
415 
416  raw_iterator raw_end() {
417  return raw_iterator(0);
418  }
419 
420  raw_const_iterator raw_end() const {
421  return raw_const_iterator(0);
422  }
423 
424  static sokey_t get_order_key(const raw_const_iterator& it) {
425  return it.get_node_ptr()->get_order_key();
426  }
427 
428  static sokey_t get_safe_order_key(const raw_const_iterator& it) {
429  if( !it.get_node_ptr() ) return ~sokey_t(0);
430  return it.get_node_ptr()->get_order_key();
431  }
432 
433  // Returns a public iterator version of the internal iterator. Public iterator must not
434  // be a dummy private iterator.
435  iterator get_iterator(raw_iterator it) {
436  __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
437  return iterator(it.get_node_ptr(), this);
438  }
439 
440  // Returns a public iterator version of the internal iterator. Public iterator must not
441  // be a dummy private iterator.
442  const_iterator get_iterator(raw_const_iterator it) const {
443  __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
444  return const_iterator(it.get_node_ptr(), this);
445  }
446 
447  // Returns a non-const version of the raw_iterator
448  raw_iterator get_iterator(raw_const_iterator it) {
449  return raw_iterator(it.get_node_ptr());
450  }
451 
452  // Returns a non-const version of the iterator
453  static iterator get_iterator(const_iterator it) {
454  return iterator(it.my_node_ptr, it.my_list_ptr);
455  }
456 
457  // Returns a public iterator version of a first non-dummy internal iterator at or after
458  // the passed in internal iterator.
459  iterator first_real_iterator(raw_iterator it)
460  {
461  // Skip all dummy, internal only iterators
462  while (it != raw_end() && it.get_node_ptr()->is_dummy())
463  ++it;
464 
465  return iterator(it.get_node_ptr(), this);
466  }
467 
468  // Returns a public iterator version of a first non-dummy internal iterator at or after
469  // the passed in internal iterator.
470  const_iterator first_real_iterator(raw_const_iterator it) const
471  {
472  // Skip all dummy, internal only iterators
473  while (it != raw_end() && it.get_node_ptr()->is_dummy())
474  ++it;
475 
476  return const_iterator(it.get_node_ptr(), this);
477  }
478 
479  // Erase an element using the allocator
480  void destroy_node(nodeptr_t pnode) {
481  if (!pnode->is_dummy()) my_node_allocator.destroy(pnode);
482  my_node_allocator.deallocate(pnode, 1);
483  }
484 
485  // Try to insert a new element in the list.
486  // If insert fails, return the node that was inserted instead.
487  static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
488  new_node->my_next = current_node;
489  return previous->atomic_set_next(new_node, current_node);
490  }
491 
492  // Insert a new element between passed in iterators
493  std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count)
494  {
495  nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), pnode, next.get_node_ptr());
496 
497  if (inserted_node == pnode)
498  {
499  // If the insert succeeded, check that the order is correct and increment the element count
500  check_range(it, next);
501  *new_count = tbb::internal::as_atomic(my_element_count).fetch_and_increment();
502  return std::pair<iterator, bool>(iterator(pnode, this), true);
503  }
504  else
505  {
506  return std::pair<iterator, bool>(end(), false);
507  }
508  }
509 
510  // Insert a new dummy element, starting search at a parent dummy element
511  raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
512  {
513  raw_iterator last = raw_end();
514  raw_iterator where = it;
515 
516  __TBB_ASSERT(where != last, "Invalid head node");
517 
518  ++where;
519 
520  // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
521  nodeptr_t dummy_node = create_node(order_key);
522 
523  for (;;)
524  {
525  __TBB_ASSERT(it != last, "Invalid head list node");
526 
527  // If the head iterator is at the end of the list, or past the point where this dummy
528  // node needs to be inserted, then try to insert it.
529  if (where == last || get_order_key(where) > order_key)
530  {
531  __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
532 
533  // Try to insert it in the right place
534  nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), dummy_node, where.get_node_ptr());
535 
536  if (inserted_node == dummy_node)
537  {
538  // Insertion succeeded, check the list for order violations
539  check_range(it, where);
540  return raw_iterator(dummy_node);
541  }
542  else
543  {
544  // Insertion failed: either dummy node was inserted by another thread, or
545  // a real element was inserted at exactly the same place as dummy node.
546  // Proceed with the search from the previous location where order key was
547  // known to be larger (note: this is legal only because there is no safe
548  // concurrent erase operation supported).
549  where = it;
550  ++where;
551  continue;
552  }
553  }
554  else if (get_order_key(where) == order_key)
555  {
556  // Another dummy node with the same value found, discard the new one.
557  destroy_node(dummy_node);
558  return where;
559  }
560 
561  // Move the iterator forward
562  it = where;
563  ++where;
564  }
565 
566  }
567 
568  // This erase function can handle both real and dummy nodes
569  void erase_node(raw_iterator previous, raw_const_iterator& where)
570  {
571  nodeptr_t pnode = (where++).get_node_ptr();
572  nodeptr_t prevnode = previous.get_node_ptr();
573  __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
574  prevnode->my_next = pnode->my_next;
575 
576  destroy_node(pnode);
577  }
578 
579  // Erase the element (previous node needs to be passed because this is a forward only list)
580  iterator erase_node(raw_iterator previous, const_iterator where)
581  {
582  raw_const_iterator it = where;
583  erase_node(previous, it);
584  my_element_count--;
585 
586  return get_iterator(first_real_iterator(it));
587  }
588 
589  // Move all elements from the passed in split-ordered list to this one
590  void move_all(self_type& source)
591  {
592  raw_const_iterator first = source.raw_begin();
593  raw_const_iterator last = source.raw_end();
594 
595  if (first == last)
596  return;
597 
598  nodeptr_t previous_node = my_head;
599  raw_const_iterator begin_iterator = first++;
600 
601  // Move all elements one by one, including dummy ones
602  for (raw_const_iterator it = first; it != last;)
603  {
604  nodeptr_t pnode = it.get_node_ptr();
605 
606  nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
607  previous_node = try_insert_atomic(previous_node, dummy_node, NULL);
608  __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
609  raw_const_iterator where = it++;
610  source.erase_node(get_iterator(begin_iterator), where);
611  }
612  check_range();
613  }
614 
615 
616 private:
617  //Need to setup private fields of split_ordered_list in move constructor and assignment of concurrent_unordered_base
618  template <typename Traits>
619  friend class concurrent_unordered_base;
620 
621  // Check the list for order violations
622  void check_range( raw_iterator first, raw_iterator last )
623  {
624 #if TBB_USE_ASSERT
625  for (raw_iterator it = first; it != last; ++it)
626  {
627  raw_iterator next = it;
628  ++next;
629 
630  __TBB_ASSERT(next == raw_end() || get_order_key(next) >= get_order_key(it), "!!! List order inconsistency !!!");
631  }
632 #else
633  tbb::internal::suppress_unused_warning(first, last);
634 #endif
635  }
636  void check_range()
637  {
638 #if TBB_USE_ASSERT
639  check_range( raw_begin(), raw_end() );
640 #endif
641  }
642 
643  typename allocator_type::template rebind<node>::other my_node_allocator; // allocator object for nodes
644  size_type my_element_count; // Total item count, not counting dummy nodes
645  nodeptr_t my_head; // pointer to head node
646 };
647 
648 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
649 #pragma warning(push)
650 #pragma warning(disable: 4127) // warning C4127: conditional expression is constant
651 #endif
652 
653 template <typename Traits>
654 class concurrent_unordered_base : public Traits
655 {
656 protected:
657  // Type definitions
658  typedef concurrent_unordered_base<Traits> self_type;
659  typedef typename Traits::value_type value_type;
660  typedef typename Traits::key_type key_type;
661  typedef typename Traits::hash_compare hash_compare;
662  typedef typename Traits::value_compare value_compare;
663  typedef typename Traits::allocator_type allocator_type;
664  typedef typename hash_compare::hasher hasher;
665  typedef typename hash_compare::key_equal key_equal;
666  typedef typename allocator_type::pointer pointer;
667  typedef typename allocator_type::const_pointer const_pointer;
668  typedef typename allocator_type::reference reference;
669  typedef typename allocator_type::const_reference const_reference;
670  typedef typename allocator_type::size_type size_type;
671  typedef typename allocator_type::difference_type difference_type;
672  typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
673  typedef typename solist_t::nodeptr_t nodeptr_t;
674  // Iterators that walk the entire split-order list, including dummy nodes
675  typedef typename solist_t::raw_iterator raw_iterator;
676  typedef typename solist_t::raw_const_iterator raw_const_iterator;
677  typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
678  typedef typename solist_t::const_iterator const_iterator;
679  typedef iterator local_iterator;
680  typedef const_iterator const_local_iterator;
681  using Traits::my_hash_compare;
682  using Traits::get_key;
683  using Traits::allow_multimapping;
684 
685  static const size_type initial_bucket_number = 8; // Initial number of buckets
686 private:
687  typedef std::pair<iterator, iterator> pairii_t;
688  typedef std::pair<const_iterator, const_iterator> paircc_t;
689 
690  static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit
691  static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket
692 
693  struct call_internal_clear_on_exit{
694  concurrent_unordered_base* my_instance;
695  call_internal_clear_on_exit(concurrent_unordered_base* instance) : my_instance(instance) {}
696  void dismiss(){ my_instance = NULL;}
697  ~call_internal_clear_on_exit(){
698  if (my_instance){
699  my_instance->internal_clear();
700  }
701  }
702  };
703 protected:
704  // Constructors/Destructors
705  concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
706  const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
707  : Traits(hc), my_solist(a),
708  my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
709  {
710  if( n_of_buckets == 0) ++n_of_buckets;
711  my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
712  internal_init();
713  }
714 
715  concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
716  : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
717  {
718  internal_init();
719  internal_copy(right);
720  }
721 
722  concurrent_unordered_base(const concurrent_unordered_base& right)
723  : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
724  {
725  //FIXME:exception safety seems to be broken here
726  internal_init();
727  internal_copy(right);
728  }
729 
730 #if __TBB_CPP11_RVALUE_REF_PRESENT
731  concurrent_unordered_base(concurrent_unordered_base&& right)
732  : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
733  {
734  internal_init();
735  swap(right);
736  }
737 
738  concurrent_unordered_base(concurrent_unordered_base&& right, const allocator_type& a)
739  : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
740  {
741  call_internal_clear_on_exit clear_buckets_on_exception(this);
742 
743  internal_init();
744  if (a == right.get_allocator()){
745  this->swap(right);
746  }else{
747  my_maximum_bucket_size = right.my_maximum_bucket_size;
748  my_number_of_buckets = right.my_number_of_buckets;
749  my_solist.my_element_count = right.my_solist.my_element_count;
750 
751  if (! right.my_solist.empty()){
752  nodeptr_t previous_node = my_solist.my_head;
753 
754  // Move all elements one by one, including dummy ones
755  for (raw_const_iterator it = ++(right.my_solist.raw_begin()), last = right.my_solist.raw_end(); it != last; ++it)
756  {
757  const nodeptr_t pnode = it.get_node_ptr();
758  nodeptr_t node;
759  if (pnode->is_dummy()) {
760  node = my_solist.create_node(pnode->get_order_key());
761  size_type bucket = __TBB_ReverseBits(pnode->get_order_key()) % my_number_of_buckets;
762  set_bucket(bucket, node);
763  }else{
764  node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element));
765  }
766 
767  previous_node = my_solist.try_insert_atomic(previous_node, node, NULL);
768  __TBB_ASSERT(previous_node != NULL, "Insertion of node failed. Concurrent inserts in constructor ?");
769  }
770  my_solist.check_range();
771  }
772  }
773 
774  clear_buckets_on_exception.dismiss();
775  }
776 
777 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
778 
779  concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
780  if (this != &right)
781  internal_copy(right);
782  return (*this);
783  }
784 
785 #if __TBB_CPP11_RVALUE_REF_PRESENT
786  concurrent_unordered_base& operator=(concurrent_unordered_base&& other)
787  {
788  if(this != &other){
789  typedef typename tbb::internal::allocator_traits<allocator_type>::propagate_on_container_move_assignment pocma_t;
790  if(pocma_t::value || this->my_allocator == other.my_allocator) {
791  concurrent_unordered_base trash (std::move(*this));
792  swap(other);
793  if (pocma_t::value) {
794  using std::swap;
795  //TODO: swapping allocators here may be a problem, replace with single direction moving
796  swap(this->my_solist.my_node_allocator, other.my_solist.my_node_allocator);
797  swap(this->my_allocator, other.my_allocator);
798  }
799  } else {
800  concurrent_unordered_base moved_copy(std::move(other),this->my_allocator);
801  this->swap(moved_copy);
802  }
803  }
804  return *this;
805  }
806 
807 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
808 
809 #if __TBB_INITIALIZER_LISTS_PRESENT
810  concurrent_unordered_base& operator=(std::initializer_list<value_type> il)
812  {
813  this->clear();
814  this->insert(il.begin(),il.end());
815  return (*this);
816  }
817 #endif // __TBB_INITIALIZER_LISTS_PRESENT
818 
819 
820  ~concurrent_unordered_base() {
821  // Delete all node segments
822  internal_clear();
823  }
824 
825 public:
826  allocator_type get_allocator() const {
827  return my_solist.get_allocator();
828  }
829 
830  // Size and capacity function
831  bool empty() const {
832  return my_solist.empty();
833  }
834 
835  size_type size() const {
836  return my_solist.size();
837  }
838 
839  size_type max_size() const {
840  return my_solist.max_size();
841  }
842 
843  // Iterators
844  iterator begin() {
845  return my_solist.begin();
846  }
847 
848  const_iterator begin() const {
849  return my_solist.begin();
850  }
851 
852  iterator end() {
853  return my_solist.end();
854  }
855 
856  const_iterator end() const {
857  return my_solist.end();
858  }
859 
860  const_iterator cbegin() const {
861  return my_solist.cbegin();
862  }
863 
864  const_iterator cend() const {
865  return my_solist.cend();
866  }
867 
868  // Parallel traversal support
869  class const_range_type : tbb::internal::no_assign {
870  const concurrent_unordered_base &my_table;
871  raw_const_iterator my_begin_node;
872  raw_const_iterator my_end_node;
873  mutable raw_const_iterator my_midpoint_node;
874  public:
876  typedef typename concurrent_unordered_base::size_type size_type;
877  typedef typename concurrent_unordered_base::value_type value_type;
878  typedef typename concurrent_unordered_base::reference reference;
879  typedef typename concurrent_unordered_base::difference_type difference_type;
880  typedef typename concurrent_unordered_base::const_iterator iterator;
881 
883  bool empty() const {return my_begin_node == my_end_node;}
884 
886  bool is_divisible() const {
887  return my_midpoint_node != my_end_node;
888  }
890  const_range_type( const_range_type &r, split ) :
891  my_table(r.my_table), my_end_node(r.my_end_node)
892  {
893  r.my_end_node = my_begin_node = r.my_midpoint_node;
894  __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
895  __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
896  set_midpoint();
897  r.set_midpoint();
898  }
900  const_range_type( const concurrent_unordered_base &a_table ) :
901  my_table(a_table), my_begin_node(a_table.my_solist.begin()),
902  my_end_node(a_table.my_solist.end())
903  {
904  set_midpoint();
905  }
906  iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
907  iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
909  size_type grainsize() const { return 1; }
910 
912  void set_midpoint() const {
913  if( my_begin_node == my_end_node ) // not divisible
914  my_midpoint_node = my_end_node;
915  else {
916  sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
917  sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
918  size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
919  while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
920  if(__TBB_ReverseBits(mid_bucket) > begin_key) {
921  // found a dummy_node between begin and end
922  my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
923  }
924  else {
925  // didn't find a dummy node between begin and end.
926  my_midpoint_node = my_end_node;
927  }
928 #if TBB_USE_ASSERT
929  {
930  sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
931  __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
932  __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
933  }
934 #endif // TBB_USE_ASSERT
935  }
936  }
937  };
938 
939  class range_type : public const_range_type {
940  public:
941  typedef typename concurrent_unordered_base::iterator iterator;
943  range_type( range_type &r, split ) : const_range_type( r, split() ) {}
945  range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
946 
947  iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
948  iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
949  };
950 
951  range_type range() {
952  return range_type( *this );
953  }
954 
955  const_range_type range() const {
956  return const_range_type( *this );
957  }
958 
959  // Modifiers
960  std::pair<iterator, bool> insert(const value_type& value) {
961  return internal_insert(value);
962  }
963 
964  iterator insert(const_iterator, const value_type& value) {
965  // Ignore hint
966  return insert(value).first;
967  }
968 
969 #if __TBB_CPP11_RVALUE_REF_PRESENT
970  std::pair<iterator, bool> insert(value_type&& value) {
971  return internal_insert(std::move(value));
972  }
973 
974  iterator insert(const_iterator, value_type&& value) {
975  // Ignore hint
976  return insert(std::move(value)).first;
977  }
978 
979 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
980  template<typename... Args>
981  std::pair<iterator, bool> emplace(Args&&... args) {
982  nodeptr_t pnode = my_solist.create_node_v(tbb::internal::forward<Args>(args)...);
983  const sokey_t hashed_element_key = (sokey_t) my_hash_compare(get_key(pnode->my_element));
984  const sokey_t order_key = split_order_key_regular(hashed_element_key);
985  pnode->init(order_key);
986 
987  return internal_insert(pnode->my_element, pnode);
988  }
989 
990  template<typename... Args>
991  iterator emplace_hint(const_iterator, Args&&... args) {
992  // Ignore hint
993  return emplace(tbb::internal::forward<Args>(args)...).first;
994  }
995 
996 #endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
997 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
998 
999  template<class Iterator>
1000  void insert(Iterator first, Iterator last) {
1001  for (Iterator it = first; it != last; ++it)
1002  insert(*it);
1003  }
1004 
1005 #if __TBB_INITIALIZER_LISTS_PRESENT
1006  void insert(std::initializer_list<value_type> il) {
1008  insert(il.begin(), il.end());
1009  }
1010 #endif
1011 
1012  iterator unsafe_erase(const_iterator where) {
1013  return internal_erase(where);
1014  }
1015 
1016  iterator unsafe_erase(const_iterator first, const_iterator last) {
1017  while (first != last)
1018  unsafe_erase(first++);
1019  return my_solist.get_iterator(first);
1020  }
1021 
1022  size_type unsafe_erase(const key_type& key) {
1023  pairii_t where = equal_range(key);
1024  size_type item_count = internal_distance(where.first, where.second);
1025  unsafe_erase(where.first, where.second);
1026  return item_count;
1027  }
1028 
1029  void swap(concurrent_unordered_base& right) {
1030  if (this != &right) {
1031  std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here
1032  my_solist.swap(right.my_solist);
1033  internal_swap_buckets(right);
1034  std::swap(my_number_of_buckets, right.my_number_of_buckets);
1035  std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
1036  }
1037  }
1038 
1039  // Observers
1040  hasher hash_function() const {
1041  return my_hash_compare.my_hash_object;
1042  }
1043 
1044  key_equal key_eq() const {
1045  return my_hash_compare.my_key_compare_object;
1046  }
1047 
1048  void clear() {
1049  // Clear list
1050  my_solist.clear();
1051 
1052  // Clear buckets
1053  internal_clear();
1054 
1055  // Initialize bucket 0
1056  __TBB_ASSERT(my_buckets[0] == NULL, NULL);
1057  raw_iterator dummy_node = my_solist.raw_begin();
1058  set_bucket(0, dummy_node);
1059  }
1060 
1061  // Lookup
1062  iterator find(const key_type& key) {
1063  return internal_find(key);
1064  }
1065 
1066  const_iterator find(const key_type& key) const {
1067  return const_cast<self_type*>(this)->internal_find(key);
1068  }
1069 
1070  size_type count(const key_type& key) const {
1071  if(allow_multimapping) {
1072  paircc_t answer = equal_range(key);
1073  size_type item_count = internal_distance(answer.first, answer.second);
1074  return item_count;
1075  } else {
1076  return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
1077  }
1078  }
1079 
1080  std::pair<iterator, iterator> equal_range(const key_type& key) {
1081  return internal_equal_range(key);
1082  }
1083 
1084  std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
1085  return const_cast<self_type*>(this)->internal_equal_range(key);
1086  }
1087 
1088  // Bucket interface - for debugging
1089  size_type unsafe_bucket_count() const {
1090  return my_number_of_buckets;
1091  }
1092 
1093  size_type unsafe_max_bucket_count() const {
1094  return segment_size(pointers_per_table-1);
1095  }
1096 
1097  size_type unsafe_bucket_size(size_type bucket) {
1098  size_type item_count = 0;
1099  if (is_initialized(bucket)) {
1100  raw_iterator it = get_bucket(bucket);
1101  ++it;
1102  for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
1103  ++item_count;
1104  }
1105  return item_count;
1106  }
1107 
1108  size_type unsafe_bucket(const key_type& key) const {
1109  sokey_t order_key = (sokey_t) my_hash_compare(key);
1110  size_type bucket = order_key % my_number_of_buckets;
1111  return bucket;
1112  }
1113 
1114  // If the bucket is initialized, return a first non-dummy element in it
1115  local_iterator unsafe_begin(size_type bucket) {
1116  if (!is_initialized(bucket))
1117  return end();
1118 
1119  raw_iterator it = get_bucket(bucket);
1120  return my_solist.first_real_iterator(it);
1121  }
1122 
1123  // If the bucket is initialized, return a first non-dummy element in it
1124  const_local_iterator unsafe_begin(size_type bucket) const
1125  {
1126  if (!is_initialized(bucket))
1127  return end();
1128 
1129  raw_const_iterator it = get_bucket(bucket);
1130  return my_solist.first_real_iterator(it);
1131  }
1132 
1133  // @REVIEW: Takes O(n)
1134  // Returns the iterator after the last non-dummy element in the bucket
1135  local_iterator unsafe_end(size_type bucket)
1136  {
1137  if (!is_initialized(bucket))
1138  return end();
1139 
1140  raw_iterator it = get_bucket(bucket);
1141 
1142  // Find the end of the bucket, denoted by the dummy element
1143  do ++it;
1144  while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1145 
1146  // Return the first real element past the end of the bucket
1147  return my_solist.first_real_iterator(it);
1148  }
1149 
1150  // @REVIEW: Takes O(n)
1151  // Returns the iterator after the last non-dummy element in the bucket
1152  const_local_iterator unsafe_end(size_type bucket) const
1153  {
1154  if (!is_initialized(bucket))
1155  return end();
1156 
1157  raw_const_iterator it = get_bucket(bucket);
1158 
1159  // Find the end of the bucket, denoted by the dummy element
1160  do ++it;
1161  while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1162 
1163  // Return the first real element past the end of the bucket
1164  return my_solist.first_real_iterator(it);
1165  }
1166 
1167  const_local_iterator unsafe_cbegin(size_type bucket) const {
1168  return ((const self_type *) this)->unsafe_begin(bucket);
1169  }
1170 
1171  const_local_iterator unsafe_cend(size_type bucket) const {
1172  return ((const self_type *) this)->unsafe_end(bucket);
1173  }
1174 
1175  // Hash policy
1176  float load_factor() const {
1177  return (float) size() / (float) unsafe_bucket_count();
1178  }
1179 
1180  float max_load_factor() const {
1181  return my_maximum_bucket_size;
1182  }
1183 
1184  void max_load_factor(float newmax) {
1185  if (newmax != newmax || newmax < 0)
1186  tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
1187  my_maximum_bucket_size = newmax;
1188  }
1189 
1190  // This function is a noop, because the underlying split-ordered list
1191  // is already sorted, so an increase in the bucket number will be
1192  // reflected next time this bucket is touched.
1193  void rehash(size_type buckets) {
1194  size_type current_buckets = my_number_of_buckets;
1195  if (current_buckets >= buckets)
1196  return;
1197  my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
1198  }
1199 
1200 private:
1201 
1202  // Initialize the hash and keep the first bucket open
1203  void internal_init() {
1204  // Allocate an array of segment pointers
1205  memset(my_buckets, 0, pointers_per_table * sizeof(void *));
1206 
1207  // Initialize bucket 0
1208  raw_iterator dummy_node = my_solist.raw_begin();
1209  set_bucket(0, dummy_node);
1210  }
1211 
1212  void internal_clear() {
1213  for (size_type index = 0; index < pointers_per_table; ++index) {
1214  if (my_buckets[index] != NULL) {
1215  size_type sz = segment_size(index);
1216  for (size_type index2 = 0; index2 < sz; ++index2)
1217  my_allocator.destroy(&my_buckets[index][index2]);
1218  my_allocator.deallocate(my_buckets[index], sz);
1219  my_buckets[index] = 0;
1220  }
1221  }
1222  }
1223 
1224  void internal_copy(const self_type& right) {
1225  clear();
1226 
1227  my_maximum_bucket_size = right.my_maximum_bucket_size;
1228  my_number_of_buckets = right.my_number_of_buckets;
1229 
1230  __TBB_TRY {
1231  insert(right.begin(), right.end());
1232  my_hash_compare = right.my_hash_compare;
1233  } __TBB_CATCH(...) {
1234  my_solist.clear();
1235  __TBB_RETHROW();
1236  }
1237  }
1238 
1239  void internal_swap_buckets(concurrent_unordered_base& right)
1240  {
1241  // Swap all node segments
1242  for (size_type index = 0; index < pointers_per_table; ++index)
1243  {
1244  raw_iterator * iterator_pointer = my_buckets[index];
1245  my_buckets[index] = right.my_buckets[index];
1246  right.my_buckets[index] = iterator_pointer;
1247  }
1248  }
1249 
1250  //TODO: why not use std::distance?
1251  // Hash APIs
1252  static size_type internal_distance(const_iterator first, const_iterator last)
1253  {
1254  size_type num = 0;
1255 
1256  for (const_iterator it = first; it != last; ++it)
1257  ++num;
1258 
1259  return num;
1260  }
1261 
1262  // Insert an element in the hash given its value
1263  template< typename ValueType>
1264  std::pair<iterator, bool> internal_insert( __TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode = NULL)
1265  {
1266  sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
1267  size_type bucket = order_key % my_number_of_buckets;
1268 
1269  //TODO:refactor the get_bucket related stuff into separate function something like acquire_bucket(key_type)
1270  // If bucket is empty, initialize it first
1271  if (!is_initialized(bucket))
1272  init_bucket(bucket);
1273 
1274  size_type new_count = 0;
1275  order_key = split_order_key_regular(order_key);
1276  raw_iterator it = get_bucket(bucket);
1277  raw_iterator last = my_solist.raw_end();
1278  raw_iterator where = it;
1279 
1280  __TBB_ASSERT(where != last, "Invalid head node");
1281 
1282  // First node is a dummy node
1283  ++where;
1284 
1285  for (;;)
1286  {
1287  if (where == last || solist_t::get_order_key(where) > order_key ||
1288  // if multimapped, stop at the first item equal to us.
1289  (allow_multimapping && solist_t::get_order_key(where) == order_key &&
1290  !my_hash_compare(get_key(*where), get_key(value))))
1291  {
1292  if (!pnode)
1293  pnode = my_solist.create_node(order_key, tbb::internal::forward<ValueType>(value));
1294 
1295  // Try to insert 'pnode' between 'it' and 'where'
1296  std::pair<iterator, bool> result = my_solist.try_insert(it, where, pnode, &new_count);
1297 
1298  if (result.second)
1299  {
1300  // Insertion succeeded, adjust the table size, if needed
1301  adjust_table_size(new_count, my_number_of_buckets);
1302  return result;
1303  }
1304  else
1305  {
1306  // Insertion failed: either the same node was inserted by another thread, or
1307  // another element was inserted at exactly the same place as this node.
1308  // Proceed with the search from the previous location where order key was
1309  // known to be larger (note: this is legal only because there is no safe
1310  // concurrent erase operation supported).
1311  where = it;
1312  ++where;
1313  continue;
1314  }
1315  }
1316  else if (!allow_multimapping && solist_t::get_order_key(where) == order_key &&
1317  my_hash_compare(get_key(*where), get_key(value)) == 0)
1318  { // Element already in the list, return it
1319  if (pnode)
1320  my_solist.destroy_node(pnode);
1321  return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
1322  }
1323  // Move the iterator forward
1324  it = where;
1325  ++where;
1326  }
1327  }
1328 
1329  // Find the element in the split-ordered list
1330  iterator internal_find(const key_type& key)
1331  {
1332  sokey_t order_key = (sokey_t) my_hash_compare(key);
1333  size_type bucket = order_key % my_number_of_buckets;
1334 
1335  // If bucket is empty, initialize it first
1336  if (!is_initialized(bucket))
1337  init_bucket(bucket);
1338 
1339  order_key = split_order_key_regular(order_key);
1340  raw_iterator last = my_solist.raw_end();
1341 
1342  for (raw_iterator it = get_bucket(bucket); it != last; ++it)
1343  {
1344  if (solist_t::get_order_key(it) > order_key)
1345  {
1346  // If the order key is smaller than the current order key, the element
1347  // is not in the hash.
1348  return end();
1349  }
1350  else if (solist_t::get_order_key(it) == order_key)
1351  {
1352  // The fact that order keys match does not mean that the element is found.
1353  // Key function comparison has to be performed to check whether this is the
1354  // right element. If not, keep searching while order key is the same.
1355  if (!my_hash_compare(get_key(*it), key))
1356  return my_solist.get_iterator(it);
1357  }
1358  }
1359 
1360  return end();
1361  }
1362 
1363  // Erase an element from the list. This is not a concurrency safe function.
1364  iterator internal_erase(const_iterator it)
1365  {
1366  //const reference extends lifetime of possible temporary coming from get_key
1367  const key_type& key = get_key(*it);
1368  sokey_t order_key = (sokey_t) my_hash_compare(key);
1369  size_type bucket = order_key % my_number_of_buckets;
1370 
1371  // If bucket is empty, initialize it first
1372  if (!is_initialized(bucket))
1373  init_bucket(bucket);
1374 
1375  order_key = split_order_key_regular(order_key);
1376 
1377  raw_iterator previous = get_bucket(bucket);
1378  raw_iterator last = my_solist.raw_end();
1379  raw_iterator where = previous;
1380 
1381  __TBB_ASSERT(where != last, "Invalid head node");
1382 
1383  // First node is a dummy node
1384  ++where;
1385 
1386  for (;;) {
1387  if (where == last)
1388  return end();
1389  else if (my_solist.get_iterator(where) == it)
1390  return my_solist.erase_node(previous, it);
1391 
1392  // Move the iterator forward
1393  previous = where;
1394  ++where;
1395  }
1396  }
1397 
1398  // Return the [begin, end) pair of iterators with the same key values.
1399  // This operation makes sense only if mapping is many-to-one.
1400  pairii_t internal_equal_range(const key_type& key)
1401  {
1402  sokey_t order_key = (sokey_t) my_hash_compare(key);
1403  size_type bucket = order_key % my_number_of_buckets;
1404 
1405  // If bucket is empty, initialize it first
1406  if (!is_initialized(bucket))
1407  init_bucket(bucket);
1408 
1409  order_key = split_order_key_regular(order_key);
1410  raw_iterator end_it = my_solist.raw_end();
1411 
1412  for (raw_iterator it = get_bucket(bucket); it != end_it; ++it)
1413  {
1414  if (solist_t::get_order_key(it) > order_key)
1415  {
1416  // There is no element with the given key
1417  return pairii_t(end(), end());
1418  }
1419  else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
1420  {
1421  iterator first = my_solist.get_iterator(it);
1422  iterator last = first;
1423  do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
1424  return pairii_t(first, last);
1425  }
1426  }
1427 
1428  return pairii_t(end(), end());
1429  }
1430 
1431  // Bucket APIs
1432  void init_bucket(size_type bucket)
1433  {
1434  // Bucket 0 has no parent.
1435  __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized");
1436 
1437  size_type parent_bucket = get_parent(bucket);
1438 
1439  // All parent_bucket buckets have to be initialized before this bucket is
1440  if (!is_initialized(parent_bucket))
1441  init_bucket(parent_bucket);
1442 
1443  raw_iterator parent = get_bucket(parent_bucket);
1444 
1445  // Create a dummy first node in this bucket
1446  raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
1447  set_bucket(bucket, dummy_node);
1448  }
1449 
1450  void adjust_table_size(size_type total_elements, size_type current_size)
1451  {
1452  // Grow the table by a factor of 2 if possible and needed
1453  if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
1454  {
1455  // Double the size of the hash only if size has not changed in between loads
1456  my_number_of_buckets.compare_and_swap(2u*current_size, current_size);
1457  //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
1458  //due to overzealous compiler warnings in /Wp64 mode
1459  }
1460  }
1461 
1462  size_type get_parent(size_type bucket) const
1463  {
1464  // Unsets bucket's most significant turned-on bit
1465  size_type msb = __TBB_Log2((uintptr_t)bucket);
1466  return bucket & ~(size_type(1) << msb);
1467  }
1468 
1469 
1470  // Dynamic sized array (segments)
1472  static size_type segment_index_of( size_type index ) {
1473  return size_type( __TBB_Log2( uintptr_t(index|1) ) );
1474  }
1475 
1477  static size_type segment_base( size_type k ) {
1478  return (size_type(1)<<k & ~size_type(1));
1479  }
1480 
1482  static size_type segment_size( size_type k ) {
1483  return k? size_type(1)<<k : 2;
1484  }
1485 
1486  raw_iterator get_bucket(size_type bucket) const {
1487  size_type segment = segment_index_of(bucket);
1488  bucket -= segment_base(segment);
1489  __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
1490  return my_buckets[segment][bucket];
1491  }
1492 
1493  void set_bucket(size_type bucket, raw_iterator dummy_head) {
1494  size_type segment = segment_index_of(bucket);
1495  bucket -= segment_base(segment);
1496 
1497  if (my_buckets[segment] == NULL) {
1498  size_type sz = segment_size(segment);
1499  raw_iterator * new_segment = my_allocator.allocate(sz);
1500  std::memset(new_segment, 0, sz*sizeof(raw_iterator));
1501 
1502  if (my_buckets[segment].compare_and_swap( new_segment, NULL) != NULL)
1503  my_allocator.deallocate(new_segment, sz);
1504  }
1505 
1506  my_buckets[segment][bucket] = dummy_head;
1507  }
1508 
1509  bool is_initialized(size_type bucket) const {
1510  size_type segment = segment_index_of(bucket);
1511  bucket -= segment_base(segment);
1512 
1513  if (my_buckets[segment] == NULL)
1514  return false;
1515 
1516  raw_iterator it = my_buckets[segment][bucket];
1517  return (it.get_node_ptr() != NULL);
1518  }
1519 
1520  // Utilities for keys
1521 
1522  // A regular order key has its original hash value reversed and the last bit set
1523  sokey_t split_order_key_regular(sokey_t order_key) const {
1524  return __TBB_ReverseBits(order_key) | 0x1;
1525  }
1526 
1527  // A dummy order key has its original hash value reversed and the last bit unset
1528  sokey_t split_order_key_dummy(sokey_t order_key) const {
1529  return __TBB_ReverseBits(order_key) & ~sokey_t(0x1);
1530  }
1531 
1532  // Shared variables
1533  atomic<size_type> my_number_of_buckets; // Current table size
1534  solist_t my_solist; // List where all the elements are kept
1535  typename allocator_type::template rebind<raw_iterator>::other my_allocator; // Allocator object for segments
1536  float my_maximum_bucket_size; // Maximum size of the bucket
1537  atomic<raw_iterator*> my_buckets[pointers_per_table]; // The segment table
1538 };
1539 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1540 #pragma warning(pop) // warning 4127 is back
1541 #endif
1542 
1543 } // namespace internal
1545 } // namespace interface5
1546 } // namespace tbb
1547 #endif // __TBB__concurrent_unordered_impl_H
Definition: _flow_graph_async_msg_impl.h:32
The namespace tbb contains all components of the library.
Definition: parallel_for.h:44