21 #ifndef __TBB_concurrent_hash_map_H 22 #define __TBB_concurrent_hash_map_H 24 #include "tbb_stddef.h" 26 #if !TBB_USE_EXCEPTIONS && _MSC_VER 28 #pragma warning (push) 29 #pragma warning (disable: 4530) 37 #if !TBB_USE_EXCEPTIONS && _MSC_VER 41 #include "cache_aligned_allocator.h" 42 #include "tbb_allocator.h" 43 #include "spin_rw_mutex.h" 45 #include "tbb_exception.h" 46 #include "tbb_profiling.h" 47 #include "internal/_tbb_hash_compare_impl.h" 48 #if __TBB_INITIALIZER_LISTS_PRESENT 49 #include <initializer_list> 51 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 60 namespace interface5 {
62 template<
typename Key,
typename T,
typename HashCompare = tbb_hash_compare<Key>,
typename A = tbb_allocator<std::pair<Key, T> > >
71 typedef size_t hashcode_t;
73 struct hash_map_node_base : tbb::internal::no_copy {
77 typedef mutex_t::scoped_lock scoped_t;
79 hash_map_node_base *next;
83 static hash_map_node_base *
const rehash_req =
reinterpret_cast<hash_map_node_base*
>(size_t(3));
85 static hash_map_node_base *
const empty_rehashed =
reinterpret_cast<hash_map_node_base*
>(size_t(0));
90 typedef size_t size_type;
92 typedef size_t hashcode_t;
94 typedef size_t segment_index_t;
96 typedef hash_map_node_base node_base;
98 struct bucket : tbb::internal::no_copy {
102 typedef mutex_t::scoped_lock scoped_t;
104 node_base *node_list;
107 static size_type
const embedded_block = 1;
109 static size_type
const embedded_buckets = 1<<embedded_block;
111 static size_type
const first_block = 8;
113 static size_type
const pointers_per_table =
sizeof(segment_index_t) * 8;
115 typedef bucket *segment_ptr_t;
117 typedef segment_ptr_t segments_table_t[pointers_per_table];
121 segments_table_t my_table;
125 bucket my_embedded_segment[embedded_buckets];
133 std::memset(
this, 0, pointers_per_table*
sizeof(segment_ptr_t)
134 +
sizeof(my_size) +
sizeof(my_mask)
135 + embedded_buckets*
sizeof(bucket) );
136 for( size_type i = 0; i < embedded_block; i++ )
137 my_table[i] = my_embedded_segment + segment_base(i);
138 my_mask = embedded_buckets - 1;
139 __TBB_ASSERT( embedded_block <= first_block,
"The first block number must include embedded blocks");
142 my_info_restarts = 0;
143 my_info_rehashes = 0;
148 static segment_index_t segment_index_of( size_type index ) {
149 return segment_index_t( __TBB_Log2( index|1 ) );
153 static segment_index_t segment_base( segment_index_t k ) {
154 return (segment_index_t(1)<<k & ~segment_index_t(1));
158 static size_type segment_size( segment_index_t k ) {
159 return size_type(1)<<k;
163 static bool is_valid(
void *ptr ) {
164 return reinterpret_cast<uintptr_t
>(ptr) > uintptr_t(63);
168 static void init_buckets( segment_ptr_t ptr, size_type sz,
bool is_initial ) {
169 if( is_initial ) std::memset(ptr, 0, sz*
sizeof(bucket) );
170 else for(size_type i = 0; i < sz; i++, ptr++) {
171 *
reinterpret_cast<intptr_t*
>(&ptr->mutex) = 0;
172 ptr->node_list = rehash_req;
177 static void add_to_bucket( bucket *b, node_base *n ) {
178 __TBB_ASSERT(b->node_list != rehash_req, NULL);
179 n->next = b->node_list;
184 struct enable_segment_failsafe : tbb::internal::no_copy {
185 segment_ptr_t *my_segment_ptr;
186 enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
187 ~enable_segment_failsafe() {
188 if( my_segment_ptr ) *my_segment_ptr = 0;
193 void enable_segment( segment_index_t k,
bool is_initial =
false ) {
194 __TBB_ASSERT( k,
"Zero segment must be embedded" );
195 enable_segment_failsafe watchdog( my_table, k );
198 __TBB_ASSERT( !is_valid(my_table[k]),
"Wrong concurrent assignment");
199 if( k >= first_block ) {
200 sz = segment_size( k );
201 segment_ptr_t ptr = alloc.
allocate( sz );
202 init_buckets( ptr, sz, is_initial );
203 itt_hide_store_word( my_table[k], ptr );
206 __TBB_ASSERT( k == embedded_block,
"Wrong segment index" );
207 sz = segment_size( first_block );
208 segment_ptr_t ptr = alloc.
allocate( sz - embedded_buckets );
209 init_buckets( ptr, sz - embedded_buckets, is_initial );
210 ptr -= segment_base(embedded_block);
211 for(segment_index_t i = embedded_block; i < first_block; i++)
212 itt_hide_store_word( my_table[i], ptr + segment_base(i) );
214 itt_store_word_with_release( my_mask, sz-1 );
215 watchdog.my_segment_ptr = 0;
219 bucket *get_bucket( hashcode_t h )
const throw() {
220 segment_index_t s = segment_index_of( h );
221 h -= segment_base(s);
222 segment_ptr_t seg = my_table[s];
223 __TBB_ASSERT( is_valid(seg),
"hashcode must be cut by valid mask for allocated segments" );
228 void mark_rehashed_levels( hashcode_t h )
throw () {
229 segment_index_t s = segment_index_of( h );
230 while( segment_ptr_t seg = my_table[++s] )
231 if( seg[h].node_list == rehash_req ) {
232 seg[h].node_list = empty_rehashed;
233 mark_rehashed_levels( h + ((hashcode_t)1<<s) );
239 inline bool check_mask_race(
const hashcode_t h, hashcode_t &m )
const {
240 hashcode_t m_now, m_old = m;
241 m_now = (hashcode_t) itt_load_word_with_acquire( my_mask );
243 return check_rehashing_collision( h, m_old, m = m_now );
248 bool check_rehashing_collision(
const hashcode_t h, hashcode_t m_old, hashcode_t m )
const {
249 __TBB_ASSERT(m_old != m, NULL);
250 if( (h & m_old) != (h & m) ) {
253 for( ++m_old; !(h & m_old); m_old <<= 1 )
255 m_old = (m_old<<1) - 1;
256 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
258 if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
270 segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
271 size_type sz = ++my_size;
272 add_to_bucket( b, n );
275 segment_index_t new_seg = __TBB_Log2( mask+1 );
276 __TBB_ASSERT( is_valid(my_table[new_seg-1]),
"new allocations must not publish new mask until segment has allocated");
277 static const segment_ptr_t is_allocating = (segment_ptr_t)2;
278 if( !itt_hide_load_word(my_table[new_seg])
279 && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
286 void reserve(size_type buckets) {
287 if( !buckets-- )
return;
288 bool is_initial = !my_size;
289 for( size_type m = my_mask; buckets > m; m = my_mask )
290 enable_segment( segment_index_of( m+1 ), is_initial );
293 void internal_swap(hash_map_base &table) {
295 swap(this->my_mask, table.my_mask);
296 swap(this->my_size, table.my_size);
297 for(size_type i = 0; i < embedded_buckets; i++)
298 swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
299 for(size_type i = embedded_block; i < pointers_per_table; i++)
300 swap(this->my_table[i], table.my_table[i]);
304 template<
typename Iterator>
305 class hash_map_range;
310 template<
typename Container,
typename Value>
311 class hash_map_iterator
312 :
public std::iterator<std::forward_iterator_tag,Value>
314 typedef Container map_type;
315 typedef typename Container::node
node;
316 typedef hash_map_base::node_base node_base;
317 typedef hash_map_base::bucket bucket;
319 template<
typename C,
typename T,
typename U>
320 friend bool operator==(
const hash_map_iterator<C,T>& i,
const hash_map_iterator<C,U>& j );
322 template<
typename C,
typename T,
typename U>
323 friend bool operator!=(
const hash_map_iterator<C,T>& i,
const hash_map_iterator<C,U>& j );
325 template<
typename C,
typename T,
typename U>
326 friend ptrdiff_t operator-(
const hash_map_iterator<C,T>& i,
const hash_map_iterator<C,U>& j );
328 template<
typename C,
typename U>
329 friend class hash_map_iterator;
332 friend class hash_map_range;
334 void advance_to_next_bucket() {
335 size_t k = my_index+1;
336 while( my_bucket && k <= my_map->my_mask ) {
340 else my_bucket = my_map->get_bucket( k );
341 my_node =
static_cast<node*
>( my_bucket->node_list );
342 if( hash_map_base::is_valid(my_node) ) {
343 my_index = k;
return;
347 my_bucket = 0; my_node = 0; my_index = k;
349 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER) 350 template<
typename Key,
typename T,
typename HashCompare,
typename A>
355 const Container *my_map;
362 const bucket *my_bucket;
367 hash_map_iterator(
const Container &map,
size_t index,
const bucket *b, node_base *n );
371 hash_map_iterator() {}
372 hash_map_iterator(
const hash_map_iterator<Container,typename Container::value_type> &other ) :
373 my_map(other.my_map),
374 my_index(other.my_index),
375 my_bucket(other.my_bucket),
376 my_node(other.my_node)
378 Value& operator*()
const {
379 __TBB_ASSERT( hash_map_base::is_valid(my_node),
"iterator uninitialized or at end of container?" );
380 return my_node->item;
382 Value* operator->()
const {
return &operator*();}
383 hash_map_iterator& operator++();
386 hash_map_iterator operator++(
int) {
387 hash_map_iterator old(*
this);
393 template<
typename Container,
typename Value>
394 hash_map_iterator<Container,Value>::hash_map_iterator(
const Container &map,
size_t index,
const bucket *b, node_base *n ) :
398 my_node( static_cast<node*>(n) )
400 if( b && !hash_map_base::is_valid(n) )
401 advance_to_next_bucket();
404 template<
typename Container,
typename Value>
405 hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
406 my_node =
static_cast<node*
>( my_node->next );
407 if( !my_node ) advance_to_next_bucket();
411 template<
typename Container,
typename T,
typename U>
412 bool operator==(
const hash_map_iterator<Container,T>& i,
const hash_map_iterator<Container,U>& j ) {
413 return i.my_node == j.my_node && i.my_map == j.my_map;
416 template<
typename Container,
typename T,
typename U>
417 bool operator!=(
const hash_map_iterator<Container,T>& i,
const hash_map_iterator<Container,U>& j ) {
418 return i.my_node != j.my_node || i.my_map != j.my_map;
423 template<
typename Iterator>
424 class hash_map_range {
425 typedef typename Iterator::map_type map_type;
428 mutable Iterator my_midpoint;
431 void set_midpoint()
const;
432 template<
typename U>
friend class hash_map_range;
435 typedef std::size_t size_type;
436 typedef typename Iterator::value_type value_type;
437 typedef typename Iterator::reference reference;
438 typedef typename Iterator::difference_type difference_type;
439 typedef Iterator iterator;
442 bool empty()
const {
return my_begin==my_end;}
445 bool is_divisible()
const {
446 return my_midpoint!=my_end;
449 hash_map_range( hash_map_range& r, split ) :
451 my_grainsize(r.my_grainsize)
453 r.my_end = my_begin = r.my_midpoint;
454 __TBB_ASSERT( !
empty(),
"Splitting despite the range is not divisible" );
455 __TBB_ASSERT( !r.empty(),
"Splitting despite the range is not divisible" );
461 hash_map_range( hash_map_range<U>& r) :
462 my_begin(r.my_begin),
464 my_midpoint(r.my_midpoint),
465 my_grainsize(r.my_grainsize)
468 hash_map_range(
const map_type &map, size_type grainsize_ = 1 ) :
469 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
470 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
471 my_grainsize( grainsize_ )
473 __TBB_ASSERT( grainsize_>0,
"grainsize must be positive" );
476 const Iterator& begin()
const {
return my_begin;}
477 const Iterator& end()
const {
return my_end;}
479 size_type grainsize()
const {
return my_grainsize;}
482 template<
typename Iterator>
483 void hash_map_range<Iterator>::set_midpoint()
const {
485 size_t m = my_end.my_index-my_begin.my_index;
486 if( m > my_grainsize ) {
487 m = my_begin.my_index + m/2u;
488 hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
489 my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
491 my_midpoint = my_end;
493 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
494 "my_begin is after my_midpoint" );
495 __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
496 "my_midpoint is after my_end" );
497 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
498 "[my_begin, my_midpoint) range should not be empty" );
504 #if _MSC_VER && !defined(__INTEL_COMPILER) 506 #pragma warning( push ) 507 #pragma warning( disable: 4127 ) 540 template<
typename Key,
typename T,
typename HashCompare,
typename Allocator>
542 template<
typename Container,
typename Value>
543 friend class internal::hash_map_iterator;
546 friend class internal::hash_map_range;
549 typedef Key key_type;
550 typedef T mapped_type;
551 typedef std::pair<const Key,T> value_type;
552 typedef hash_map_base::size_type size_type;
553 typedef ptrdiff_t difference_type;
554 typedef value_type *pointer;
555 typedef const value_type *const_pointer;
556 typedef value_type &reference;
557 typedef const value_type &const_reference;
558 typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
559 typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
560 typedef internal::hash_map_range<iterator> range_type;
561 typedef internal::hash_map_range<const_iterator> const_range_type;
562 typedef Allocator allocator_type;
567 typedef typename Allocator::template rebind<node>::other node_allocator_type;
568 node_allocator_type my_allocator;
569 HashCompare my_hash_compare;
571 struct node :
public node_base {
573 node(
const Key &key ) : item(key, T()) {}
574 node(
const Key &key,
const T &t ) : item(key, t) {}
575 #if __TBB_CPP11_RVALUE_REF_PRESENT 576 node(
const Key &key, T &&t ) : item(key, std::move(t)) {}
577 node( value_type&& i ) : item(std::move(i)){}
578 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 579 template<
typename... Args>
580 node( Args&&... args ) : item(std::forward<Args>(args)...) {}
581 #if __TBB_COPY_FROM_NON_CONST_REF_BROKEN 582 node( value_type& i ) : item(const_cast<const value_type&>(i)) {}
583 #endif //__TBB_COPY_FROM_NON_CONST_REF_BROKEN 584 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 585 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 586 node(
const value_type& i ) : item(i) {}
589 void *
operator new(
size_t , node_allocator_type &a ) {
590 void *ptr = a.allocate(1);
592 tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);
596 void operator delete(
void *ptr, node_allocator_type &a ) { a.deallocate(static_cast<node*>(ptr),1); }
599 void delete_node( node_base *n ) {
600 my_allocator.destroy( static_cast<node*>(n) );
601 my_allocator.deallocate( static_cast<node*>(n), 1);
604 static node* allocate_node_copy_construct(node_allocator_type& allocator,
const Key &key,
const T * t){
605 return new( allocator ) node(key, *t);
608 #if __TBB_CPP11_RVALUE_REF_PRESENT 609 static node* allocate_node_move_construct(node_allocator_type& allocator,
const Key &key,
const T * t){
610 return new( allocator ) node(key, std::move(*const_cast<T*>(t)));
612 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 613 template<
typename... Args>
614 static node* allocate_node_emplace_construct(node_allocator_type& allocator, Args&&... args){
615 return new( allocator ) node(std::forward<Args>(args)...);
617 #endif //#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 620 static node* allocate_node_default_construct(node_allocator_type& allocator,
const Key &key,
const T * ){
621 return new( allocator ) node(key);
624 static node* do_not_allocate_node(node_allocator_type& ,
const Key &,
const T * ){
625 __TBB_ASSERT(
false,
"this dummy function should not be called");
629 node *search_bucket(
const key_type &key, bucket *b )
const {
630 node *n =
static_cast<node*
>( b->node_list );
631 while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) )
632 n = static_cast<node*>( n->next );
633 __TBB_ASSERT(n != internal::rehash_req,
"Search can be executed only for rehashed bucket");
644 my_b = base->get_bucket( h );
646 if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req
647 && try_acquire( my_b->mutex,
true ) )
649 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h );
651 else bucket::scoped_t::acquire( my_b->mutex, writer );
652 __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
655 bool is_writer() {
return bucket::scoped_t::is_writer; }
657 bucket *operator() () {
return my_b; }
661 void rehash_bucket( bucket *b_new,
const hashcode_t h ) {
662 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex),
"b_new must be locked (for write)");
663 __TBB_ASSERT( h > 1,
"The lowermost buckets can't be rehashed" );
664 __TBB_store_with_release(b_new->node_list, internal::empty_rehashed);
665 hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1;
672 mask = (mask<<1) | 1;
673 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
675 for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
676 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->item.first );
678 hashcode_t bmask = h & (mask>>1);
679 bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1;
680 __TBB_ASSERT( (c & bmask) == (h & bmask),
"hash() function changed for key in table" );
682 if( (c & mask) == h ) {
684 if( !b_old.upgrade_to_writer() ) {
688 add_to_bucket( b_new, n );
696 void dismiss() {my_ch_map = 0;}
712 typedef const typename concurrent_hash_map::value_type
value_type;
715 bool empty()
const {
return !my_node; }
720 node::scoped_t::release();
727 __TBB_ASSERT( my_node,
"attempt to dereference empty accessor" );
728 return my_node->item;
744 bool is_writer() {
return node::scoped_t::is_writer; }
757 __TBB_ASSERT( this->my_node,
"attempt to dereference empty accessor" );
758 #pragma warning(suppress: 6011) 759 return this->my_node->item;
770 :
internal::hash_map_base(), my_allocator(a)
782 :
internal::hash_map_base(), my_allocator(a)
787 #if __TBB_CPP11_RVALUE_REF_PRESENT 790 : internal::hash_map_base(), my_allocator(std::move(table.
get_allocator()))
797 : internal::hash_map_base(), my_allocator(a)
803 internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()));
804 scope_guard.dismiss();
807 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 814 reserve( std::distance(first, last) );
818 #if __TBB_INITIALIZER_LISTS_PRESENT 819 concurrent_hash_map( std::initializer_list<value_type> il,
const allocator_type &a = allocator_type() )
827 #endif //__TBB_INITIALIZER_LISTS_PRESENT 838 #if __TBB_CPP11_RVALUE_REF_PRESENT 842 typedef typename tbb::internal::allocator_traits<allocator_type>::propagate_on_container_move_assignment pocma_t;
843 if(pocma_t::value || this->my_allocator == table.my_allocator) {
850 this->
swap(moved_copy);
855 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 857 #if __TBB_INITIALIZER_LISTS_PRESENT 865 #endif //__TBB_INITIALIZER_LISTS_PRESENT 871 void rehash(size_type n = 0);
882 range_type range( size_type grainsize=1 ) {
883 return range_type( *
this, grainsize );
885 const_range_type range( size_type grainsize=1 )
const {
886 return const_range_type( *
this, grainsize );
892 iterator begin() {
return iterator( *
this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
893 iterator end() {
return iterator( *
this, 0, 0, 0 ); }
894 const_iterator begin()
const {
return const_iterator( *
this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
895 const_iterator end()
const {
return const_iterator( *
this, 0, 0, 0 ); }
896 std::pair<iterator, iterator> equal_range(
const Key& key ) {
return internal_equal_range( key, end() ); }
897 std::pair<const_iterator, const_iterator> equal_range(
const Key& key )
const {
return internal_equal_range( key, end() ); }
900 size_type
size()
const {
return my_size; }
903 bool empty()
const {
return my_size == 0; }
906 size_type
max_size()
const {
return (~size_type(0))/
sizeof(node);}
922 size_type
count(
const Key &key )
const {
937 return lookup(
false, key, NULL, &result,
true, &do_not_allocate_node );
944 return lookup(
true, key, NULL, &result,
false, &allocate_node_default_construct );
951 return lookup(
true, key, NULL, &result,
true, &allocate_node_default_construct );
958 return lookup(
true, value.first, &value.second, &result,
false, &allocate_node_copy_construct );
965 return lookup(
true, value.first, &value.second, &result,
true, &allocate_node_copy_construct );
971 return lookup(
true, value.first, &value.second, NULL,
false, &allocate_node_copy_construct );
974 #if __TBB_CPP11_RVALUE_REF_PRESENT 978 return generic_move_insert(result, std::move(value));
984 return generic_move_insert(result, std::move(value));
989 bool insert( value_type && value ) {
993 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 996 template<
typename... Args>
998 return generic_emplace(result, std::forward<Args>(args)...);
1003 template<
typename... Args>
1004 bool emplace(
accessor &result, Args&&... args ) {
1005 return generic_emplace(result, std::forward<Args>(args)...);
1010 template<
typename... Args>
1011 bool emplace( Args&&... args ) {
1014 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 1015 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 1018 template<
typename I>
1020 for ( ; first != last; ++first )
1024 #if __TBB_INITIALIZER_LISTS_PRESENT 1025 void insert( std::initializer_list<value_type> il ) {
1027 insert( il.begin(), il.end() );
1029 #endif //__TBB_INITIALIZER_LISTS_PRESENT 1033 bool erase(
const Key& key );
1038 return exclude( item_accessor );
1044 return exclude( item_accessor );
1049 bool lookup(
bool op_insert,
const Key &key,
const T *t,
const_accessor *result,
bool write, node* (*allocate_node)(node_allocator_type& ,
const Key &,
const T * ), node *tmp_n = 0 ) ;
1055 friend bool is_write_access_needed(
accessor const& ) {
return true;}
1056 friend bool is_write_access_needed(
const_accessor const& ) {
return false;}
1059 #if __TBB_CPP11_RVALUE_REF_PRESENT 1060 template<
typename Accessor>
1061 bool generic_move_insert( Accessor && result, value_type && value ) {
1063 return lookup(
true, value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct );
1066 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 1067 template<
typename Accessor,
typename... Args>
1068 bool generic_emplace( Accessor && result, Args &&... args ) {
1070 node * node_ptr = allocate_node_emplace_construct(my_allocator, std::forward<Args>(args)...);
1071 return lookup(
true, node_ptr->item.first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
1073 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 1074 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 1080 template<
typename I>
1086 template<
typename I>
1093 hashcode_t h = my_hash_compare.hash( key );
1094 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1097 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1098 bucket *b = get_bucket( h & m );
1100 if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req )
1102 bucket::scoped_t lock;
1103 if( lock.try_acquire( b->mutex,
true ) ) {
1104 if( b->node_list == internal::rehash_req)
1107 else lock.acquire( b->mutex,
false );
1108 __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
1110 n = search_bucket( key, b );
1113 else if( check_mask_race( h, m ) )
1119 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1120 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup(
bool op_insert,
const Key &key,
const T *t,
const_accessor *result,
bool write, node* (*allocate_node)(node_allocator_type& ,
const Key&,
const T*), node *tmp_n ) {
1121 __TBB_ASSERT( !result || !result->my_node, NULL );
1123 hashcode_t
const h = my_hash_compare.hash( key );
1124 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1125 segment_index_t grow_segment = 0;
1129 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1130 return_value =
false;
1135 n = search_bucket( key, b() );
1140 tmp_n = allocate_node(my_allocator, key, t);
1142 if( !b.
is_writer() && !b.upgrade_to_writer() ) {
1144 n = search_bucket( key, b() );
1146 b.downgrade_to_reader();
1150 if( check_mask_race(h, m) )
1153 grow_segment = insert_new_node( b(), n = tmp_n, m );
1155 return_value =
true;
1159 if( check_mask_race( h, m ) )
1163 return_value =
true;
1166 if( !result )
goto check_growth;
1169 if( !result->try_acquire( n->mutex, write ) ) {
1170 for( tbb::internal::atomic_backoff backoff(
true);; ) {
1171 if( result->try_acquire( n->mutex, write ) )
break;
1172 if( !backoff.bounded_pause() ) {
1175 __TBB_ASSERT( !op_insert || !return_value,
"Can't acquire new item in locked bucket?" );
1177 m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1183 result->my_node = n;
1184 result->my_hash = h;
1187 if( grow_segment ) {
1188 #if __TBB_STATISTICS 1191 enable_segment( grow_segment );
1194 delete_node( tmp_n );
1195 return return_value;
1198 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1199 template<
typename I>
1201 hashcode_t h = my_hash_compare.hash( key );
1202 hashcode_t m = my_mask;
1203 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1205 bucket *b = get_bucket( h );
1206 while( b->node_list == internal::rehash_req ) {
1207 m = ( 1u<<__TBB_Log2( h ) ) - 1;
1208 b = get_bucket( h &= m );
1210 node *n = search_bucket( key, b );
1212 return std::make_pair(end_, end_);
1213 iterator lower(*
this, h, b, n), upper(lower);
1214 return std::make_pair(lower, ++upper);
1217 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1219 __TBB_ASSERT( item_accessor.my_node, NULL );
1220 node_base *
const n = item_accessor.my_node;
1221 hashcode_t
const h = item_accessor.my_hash;
1222 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1226 node_base **p = &b()->node_list;
1227 while( *p && *p != n )
1230 if( check_mask_race( h, m ) )
1235 __TBB_ASSERT( *p == n, NULL );
1240 if( !item_accessor.is_writer() )
1241 item_accessor.upgrade_to_writer();
1247 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1250 hashcode_t
const h = my_hash_compare.hash( key );
1251 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1257 node_base **p = &b()->node_list;
1259 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->item.first ) ) {
1264 if( check_mask_race( h, m ) )
1268 else if( !b.
is_writer() && !b.upgrade_to_writer() ) {
1269 if( check_mask_race( h, m ) )
1277 typename node::scoped_t item_locker( n->mutex,
true );
1284 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1288 swap(this->my_allocator, table.my_allocator);
1289 swap(this->my_hash_compare, table.my_hash_compare);
1290 internal_swap(table);
1293 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1296 hashcode_t mask = my_mask;
1297 hashcode_t b = (mask+1)>>1;
1298 __TBB_ASSERT((b&(b-1))==0, NULL);
1299 bucket *bp = get_bucket( b );
1300 for(; b <= mask; b++, bp++ ) {
1301 node_base *n = bp->node_list;
1302 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req,
"Broken internal structure" );
1303 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0,
"concurrent or unexpectedly terminated operation during rehash() execution" );
1304 if( n == internal::rehash_req ) {
1305 hashcode_t h = b; bucket *b_old = bp;
1307 __TBB_ASSERT( h > 1,
"The lowermost buckets can't be rehashed" );
1308 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1;
1309 b_old = get_bucket( h &= m );
1310 }
while( b_old->node_list == internal::rehash_req );
1312 mark_rehashed_levels( h );
1313 for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
1314 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->item.first );
1315 if( (c & mask) != h ) {
1317 bucket *b_new = get_bucket( c & mask );
1318 __TBB_ASSERT( b_new->node_list != internal::rehash_req,
"hash() function changed for key in table or internal error" );
1319 add_to_bucket( b_new, q );
1320 }
else p = &q->next;
1324 #if TBB_USE_PERFORMANCE_WARNINGS 1325 int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0;
1326 static bool reported =
false;
1328 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS 1329 for( b = 0; b <= mask; b++ ) {
1330 if( b & (b-2) ) ++bp;
1331 else bp = get_bucket( b );
1332 node_base *n = bp->node_list;
1333 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0,
"concurrent or unexpectedly terminated operation during rehash() execution" );
1334 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed,
"Broken internal structure" );
1335 #if TBB_USE_PERFORMANCE_WARNINGS 1336 if( n == internal::empty_rehashed ) empty_buckets++;
1337 else if( n->next ) overpopulated_buckets++;
1340 for( ; is_valid(n); n = n->next ) {
1341 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first ) & mask;
1342 __TBB_ASSERT( h == b,
"hash() function changed for key in table or internal error" );
1346 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS 1347 #if TBB_USE_PERFORMANCE_WARNINGS 1348 if( buckets > current_size) empty_buckets -= buckets - current_size;
1349 else overpopulated_buckets -= current_size - buckets;
1350 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1351 tbb::internal::runtime_warning(
1352 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1353 #
if __TBB_USE_OPTIONAL_RTTI
1354 typeid(*this).name(),
1356 "concurrent_hash_map",
1358 current_size, empty_buckets, overpopulated_buckets );
1364 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1366 hashcode_t m = my_mask;
1367 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1368 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1369 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1370 int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0;
1371 static bool reported =
false;
1375 for( segment_index_t b = 0; b <= m; b++ ) {
1376 if( b & (b-2) ) ++bp;
1377 else bp = get_bucket( b );
1378 node_base *n = bp->node_list;
1379 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req,
"Broken internal structure" );
1380 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0,
"concurrent or unexpectedly terminated operation during clear() execution" );
1381 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1382 if( n == internal::empty_rehashed ) empty_buckets++;
1383 else if( n == internal::rehash_req ) buckets--;
1384 else if( n->next ) overpopulated_buckets++;
1386 #if __TBB_EXTRA_DEBUG 1387 for(; is_valid(n); n = n->next ) {
1388 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first );
1390 __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req,
"hash() function changed for key in table or internal error" );
1394 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1395 #if __TBB_STATISTICS 1396 printf(
"items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d" 1397 " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1398 current_size,
int(m+1), buckets, empty_buckets, overpopulated_buckets,
1399 unsigned(my_info_resizes),
unsigned(my_info_rehashes),
unsigned(my_info_restarts) );
1400 my_info_resizes = 0;
1401 my_info_restarts = 0;
1402 my_info_rehashes = 0;
1404 if( buckets > current_size) empty_buckets -= buckets - current_size;
1405 else overpopulated_buckets -= current_size - buckets;
1406 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1407 tbb::internal::runtime_warning(
1408 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1409 #
if __TBB_USE_OPTIONAL_RTTI
1410 typeid(*this).name(),
1412 "concurrent_hash_map",
1414 current_size, empty_buckets, overpopulated_buckets );
1418 #endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1420 segment_index_t s = segment_index_of( m );
1421 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1],
"wrong mask or concurrent grow" );
1424 __TBB_ASSERT( is_valid( my_table[s] ),
"wrong mask or concurrent grow" );
1425 segment_ptr_t buckets_ptr = my_table[s];
1426 size_type sz = segment_size( s ? s : 1 );
1427 for( segment_index_t i = 0; i < sz; i++ )
1428 #pragma warning(suppress: 28182)
1429 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1430 buckets_ptr[i].node_list = n->next;
1433 if( s >= first_block)
1435 #pragma warning(suppress: 6326) 1436 else if( s == embedded_block && embedded_block != first_block )
1437 alloc.
deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets );
1438 if( s >= embedded_block ) my_table[s] = 0;
1440 my_mask = embedded_buckets - 1;
1443 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1445 reserve( source.my_size );
1446 hashcode_t mask = source.my_mask;
1447 if( my_mask == mask ) {
1448 bucket *dst = 0, *src = 0;
1449 bool rehash_required =
false;
1450 for( hashcode_t k = 0; k <= mask; k++ ) {
1451 if( k & (k-2) ) ++dst,src++;
1452 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1453 __TBB_ASSERT( dst->node_list != internal::rehash_req,
"Invalid bucket in destination table");
1454 node *n =
static_cast<node*
>( src->node_list );
1455 if( n == internal::rehash_req ) {
1456 rehash_required =
true;
1457 dst->node_list = internal::rehash_req;
1458 }
else for(; n; n =
static_cast<node*
>( n->next ) ) {
1459 add_to_bucket( dst,
new( my_allocator ) node(n->item.first, n->item.second) );
1463 if( rehash_required )
rehash();
1467 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1468 template<
typename I>
1470 hashcode_t m = my_mask;
1471 for(; first != last; ++first) {
1472 hashcode_t h = my_hash_compare.hash( (*first).first );
1473 bucket *b = get_bucket( h & m );
1474 __TBB_ASSERT( b->node_list != internal::rehash_req,
"Invalid bucket in destination table");
1475 node *n =
new( my_allocator ) node(*first);
1476 add_to_bucket( b, n );
1486 template<
typename Key,
typename T,
typename HashCompare,
typename A1,
typename A2>
1488 if(a.
size() != b.
size())
return false;
1489 typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
1490 typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
1491 for(; i != i_end; ++i) {
1492 j = b.equal_range(i->first).first;
1493 if( j == j_end || !(i->second == j->second) )
return false;
1498 template<
typename Key,
typename T,
typename HashCompare,
typename A1,
typename A2>
1500 {
return !(a == b); }
1502 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1506 #if _MSC_VER && !defined(__INTEL_COMPILER) 1507 #pragma warning( pop ) 1508 #endif // warning 4127 is back const_pointer internal_fast_find(const Key &key) const
Fast find when no concurrent erasure is used. For internal use inside TBB only!
Definition: concurrent_hash_map.h:1092
allocator_type get_allocator() const
return allocator object
Definition: concurrent_hash_map.h:912
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.h:970
bool empty() const
True if size()==0.
Definition: concurrent_hash_map.h:903
Combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.h:707
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
Definition: concurrent_hash_map.h:1444
const concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.h:712
void swap(concurrent_hash_map &table)
swap two instances. Iterators are invalidated
Definition: concurrent_hash_map.h:1285
size_type count(const Key &key) const
Return count of items (0 or 1)
Definition: concurrent_hash_map.h:922
bool erase(const_accessor &item_accessor)
Erase item by const_accessor.
Definition: concurrent_hash_map.h:1037
Fast, unfair, spinning reader-writer lock with backoff and writer-preference.
Definition: spin_rw_mutex.h:42
Acquire.
Definition: atomic.h:47
concurrent_hash_map(const concurrent_hash_map &table, const allocator_type &a=allocator_type())
Copy constructor.
Definition: concurrent_hash_map.h:781
bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node *(*allocate_node)(node_allocator_type &, const Key &, const T *), node *tmp_n=0)
Insert or find item and optionally acquire a lock on the item.
Definition: concurrent_hash_map.h:1120
const_pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.h:732
void acquire(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
find a bucket by masked hashcode, optionally rehash, and acquire the lock
Definition: concurrent_hash_map.h:643
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.h:935
bool exclude(const_accessor &item_accessor)
delete item by accessor
Definition: concurrent_hash_map.h:1218
const_reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.h:726
size_type bucket_count() const
Returns the current number of buckets.
Definition: concurrent_hash_map.h:909
size_type max_size() const
Upper bound on size.
Definition: concurrent_hash_map.h:906
bool erase(accessor &item_accessor)
Erase item by accessor.
Definition: concurrent_hash_map.h:1043
concurrent_hash_map(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
Definition: concurrent_hash_map.h:811
const_accessor()
Create empty result.
Definition: concurrent_hash_map.h:737
std::pair< I, I > internal_equal_range(const Key &key, I end) const
Returns an iterator for an item defined by the key, or for the next item after it (if upper==true) ...
Definition: concurrent_hash_map.h:1200
bucket accessor is to find, rehash, acquire a lock, and access a bucket
Definition: concurrent_hash_map.h:638
pointer allocate(size_type n, const void *hint=0)
Allocate space for n objects, starting on a cache/sector line.
Definition: cache_aligned_allocator.h:81
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
Definition: concurrent_hash_map.h:942
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
Definition: concurrent_hash_map.h:949
Allows write access to elements and combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.h:750
Definition: concurrent_hash_map.h:571
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
Definition: cache_aligned_allocator.h:60
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment.
Definition: concurrent_hash_map.h:830
Primary template for atomic.
Definition: atomic.h:405
Definition: _flow_graph_async_msg_impl.h:32
void deallocate(pointer p, size_type)
Free block of memory that starts on a cache line.
Definition: cache_aligned_allocator.h:87
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
Definition: concurrent_hash_map.h:1294
pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.h:763
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.h:928
Unordered map from Key to T.
Definition: concurrent_hash_map.h:63
bool is_writer()
check whether bucket is locked for write
Definition: concurrent_hash_map.h:655
Release.
Definition: atomic.h:49
The namespace tbb contains all components of the library.
Definition: parallel_for.h:44
void insert(I first, I last)
Insert range [first, last)
Definition: concurrent_hash_map.h:1019
Definition: concurrent_hash_map.h:693
concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.h:753
void clear()
Clear table.
Definition: concurrent_hash_map.h:1365
~const_accessor()
Destroy result after releasing the underlying reference.
Definition: concurrent_hash_map.h:740
Definition: concurrent_hash_map.h:1051
bool erase(const Key &key)
Erase item.
Definition: concurrent_hash_map.h:1248
size_type size() const
Number of items in table.
Definition: concurrent_hash_map.h:900
concurrent_hash_map(size_type n, const allocator_type &a=allocator_type())
Construct empty table with n preallocated buckets. This number serves also as initial concurrency lev...
Definition: concurrent_hash_map.h:774
Wrapper around the platform's native reader-writer lock.
Definition: mutex.h:40
void release()
Set to null.
Definition: concurrent_hash_map.h:718
concurrent_hash_map(const allocator_type &a=allocator_type())
Construct empty table.
Definition: concurrent_hash_map.h:769
reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.h:756
~concurrent_hash_map()
Clear table and destroy it.
Definition: concurrent_hash_map.h:877
bool empty() const
True if result is empty.
Definition: concurrent_hash_map.h:715
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
Definition: concurrent_hash_map.h:956
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
Definition: concurrent_hash_map.h:963