00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef __TBB_concurrent_hash_map_H
00022 #define __TBB_concurrent_hash_map_H
00023
00024 #include "tbb_stddef.h"
00025
00026 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00027
00028 #pragma warning (push)
00029 #pragma warning (disable: 4530)
00030 #endif
00031
00032 #include <iterator>
00033 #include <utility>
00034 #include <cstring>
00035
00036 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00037 #pragma warning (pop)
00038 #endif
00039
00040 #include "cache_aligned_allocator.h"
00041 #include "tbb_allocator.h"
00042 #include "spin_rw_mutex.h"
00043 #include "atomic.h"
00044 #include "aligned_space.h"
00045 #include "tbb_exception.h"
00046 #include "tbb_profiling.h"
00047 #include "_concurrent_unordered_internal.h"
00048 #if TBB_USE_PERFORMANCE_WARNINGS
00049 #include <typeinfo>
00050 #endif
00051
00052 namespace tbb {
00053
00055 template<typename Key>
00056 struct tbb_hash_compare {
00057 static size_t hash( const Key& a ) { return tbb_hasher(a); }
00058 static bool equal( const Key& a, const Key& b ) { return a == b; }
00059 };
00060
00061 namespace interface4 {
00062
00063 template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<Key, T> > >
00064 class concurrent_hash_map;
00065
00067 namespace internal {
00068
00069
00071 typedef size_t hashcode_t;
00073 struct hash_map_node_base : tbb::internal::no_copy {
00075 typedef spin_rw_mutex mutex_t;
00077 typedef mutex_t::scoped_lock scoped_t;
00079 hash_map_node_base *next;
00080 mutex_t mutex;
00081 };
00083 static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
00085 static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
00087 class hash_map_base {
00088 public:
00090 typedef size_t size_type;
00092 typedef size_t hashcode_t;
00094 typedef size_t segment_index_t;
00096 typedef hash_map_node_base node_base;
00098 struct bucket : tbb::internal::no_copy {
00100 typedef spin_rw_mutex mutex_t;
00102 typedef mutex_t::scoped_lock scoped_t;
00103 mutex_t mutex;
00104 node_base *node_list;
00105 };
00107 static size_type const embedded_block = 1;
00109 static size_type const embedded_buckets = 1<<embedded_block;
00111 static size_type const first_block = 8;
00113 static size_type const pointers_per_table = sizeof(segment_index_t) * 8;
00115 typedef bucket *segment_ptr_t;
00117 typedef segment_ptr_t segments_table_t[pointers_per_table];
00119 atomic<hashcode_t> my_mask;
00121 segments_table_t my_table;
00123 atomic<size_type> my_size;
00125 bucket my_embedded_segment[embedded_buckets];
00126 #if __TBB_STATISTICS
00127 atomic<unsigned> my_info_resizes;
00128 mutable atomic<unsigned> my_info_restarts;
00129 atomic<unsigned> my_info_rehashes;
00130 #if !TBB_USE_PERFORMANCE_WARNINGS
00131 #error Please enable TBB_USE_PERFORMANCE_WARNINGS as well
00132 #endif
00133 #endif
00135 hash_map_base() {
00136 std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t)
00137 + sizeof(my_size) + sizeof(my_mask)
00138 + embedded_buckets*sizeof(bucket) );
00139 for( size_type i = 0; i < embedded_block; i++ )
00140 my_table[i] = my_embedded_segment + segment_base(i);
00141 my_mask = embedded_buckets - 1;
00142 __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
00143 #if __TBB_STATISTICS
00144 my_info_resizes = 0;
00145 my_info_restarts = 0;
00146 my_info_rehashes = 0;
00147 #endif
00148 }
00149
00151 static segment_index_t segment_index_of( size_type index ) {
00152 return segment_index_t( __TBB_Log2( index|1 ) );
00153 }
00154
00156 static segment_index_t segment_base( segment_index_t k ) {
00157 return (segment_index_t(1)<<k & ~segment_index_t(1));
00158 }
00159
00161 static size_type segment_size( segment_index_t k ) {
00162 return size_type(1)<<k;
00163 }
00164
00166 static bool is_valid( void *ptr ) {
00167 return reinterpret_cast<size_t>(ptr) > size_t(63);
00168 }
00169
00171 static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
00172 if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) );
00173 else for(size_type i = 0; i < sz; i++, ptr++) {
00174 *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
00175 ptr->node_list = rehash_req;
00176 }
00177 }
00178
00180 static void add_to_bucket( bucket *b, node_base *n ) {
00181 __TBB_ASSERT(b->node_list != rehash_req, NULL);
00182 n->next = b->node_list;
00183 b->node_list = n;
00184 }
00185
00187 struct enable_segment_failsafe {
00188 segment_ptr_t *my_segment_ptr;
00189 enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
00190 ~enable_segment_failsafe() {
00191 if( my_segment_ptr ) *my_segment_ptr = 0;
00192 }
00193 };
00194
00196 void enable_segment( segment_index_t k, bool is_initial = false ) {
00197 __TBB_ASSERT( k, "Zero segment must be embedded" );
00198 enable_segment_failsafe watchdog( my_table, k );
00199 cache_aligned_allocator<bucket> alloc;
00200 size_type sz;
00201 __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
00202 if( k >= first_block ) {
00203 sz = segment_size( k );
00204 segment_ptr_t ptr = alloc.allocate( sz );
00205 init_buckets( ptr, sz, is_initial );
00206 itt_hide_store_word( my_table[k], ptr );
00207 sz <<= 1;
00208 } else {
00209 __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
00210 sz = segment_size( first_block );
00211 segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets );
00212 init_buckets( ptr, sz - embedded_buckets, is_initial );
00213 ptr -= segment_base(embedded_block);
00214 for(segment_index_t i = embedded_block; i < first_block; i++)
00215 itt_hide_store_word( my_table[i], ptr + segment_base(i) );
00216 }
00217 itt_store_word_with_release( my_mask, sz-1 );
00218 watchdog.my_segment_ptr = 0;
00219 }
00220
00222 bucket *get_bucket( hashcode_t h ) const throw() {
00223 segment_index_t s = segment_index_of( h );
00224 h -= segment_base(s);
00225 segment_ptr_t seg = my_table[s];
00226 __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
00227 return &seg[h];
00228 }
00229
00230
00231 void mark_rehashed_levels( hashcode_t h ) throw () {
00232 segment_index_t s = segment_index_of( h );
00233 while( segment_ptr_t seg = my_table[++s] )
00234 if( seg[h].node_list == rehash_req ) {
00235 seg[h].node_list = empty_rehashed;
00236 mark_rehashed_levels( h + segment_base(s) );
00237 }
00238 }
00239
00241
00242 inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
00243 hashcode_t m_now, m_old = m;
00244 m_now = (hashcode_t) itt_load_word_with_acquire( my_mask );
00245 if( m_old != m_now )
00246 return check_rehashing_collision( h, m_old, m = m_now );
00247 return false;
00248 }
00249
00251 bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
00252 __TBB_ASSERT(m_old != m, NULL);
00253 if( (h & m_old) != (h & m) ) {
00254
00255
00256 for( ++m_old; !(h & m_old); m_old <<= 1 )
00257 ;
00258 m_old = (m_old<<1) - 1;
00259 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
00260
00261 if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
00262 {
00263 #if __TBB_STATISTICS
00264 my_info_restarts++;
00265 #endif
00266 return true;
00267 }
00268 }
00269 return false;
00270 }
00271
00273 segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
00274 size_type sz = ++my_size;
00275 add_to_bucket( b, n );
00276
00277 if( sz >= mask ) {
00278 segment_index_t new_seg = segment_index_of( mask+1 );
00279 __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
00280 if( !itt_hide_load_word(my_table[new_seg])
00281 && __TBB_CompareAndSwapW(&my_table[new_seg], 2, 0) == 0 )
00282 return new_seg;
00283 }
00284 return 0;
00285 }
00286
00288 void reserve(size_type buckets) {
00289 if( !buckets-- ) return;
00290 bool is_initial = !my_size;
00291 for( size_type m = my_mask; buckets > m; m = my_mask )
00292 enable_segment( segment_index_of( m+1 ), is_initial );
00293 }
00295 void internal_swap(hash_map_base &table) {
00296 std::swap(this->my_mask, table.my_mask);
00297 std::swap(this->my_size, table.my_size);
00298 for(size_type i = 0; i < embedded_buckets; i++)
00299 std::swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
00300 for(size_type i = embedded_block; i < pointers_per_table; i++)
00301 std::swap(this->my_table[i], table.my_table[i]);
00302 }
00303 };
00304
00305 template<typename Iterator>
00306 class hash_map_range;
00307
00309
00311 template<typename Container, typename Value>
00312 class hash_map_iterator
00313 : public std::iterator<std::forward_iterator_tag,Value>
00314 {
00315 typedef Container map_type;
00316 typedef typename Container::node node;
00317 typedef hash_map_base::node_base node_base;
00318 typedef hash_map_base::bucket bucket;
00319
00320 template<typename C, typename T, typename U>
00321 friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00322
00323 template<typename C, typename T, typename U>
00324 friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00325
00326 template<typename C, typename T, typename U>
00327 friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00328
00329 template<typename C, typename U>
00330 friend class hash_map_iterator;
00331
00332 template<typename I>
00333 friend class hash_map_range;
00334
00335 void advance_to_next_bucket() {
00336 size_t k = my_index+1;
00337 while( my_bucket && k <= my_map->my_mask ) {
00338
00339 if( k& (k-2) )
00340 ++my_bucket;
00341 else my_bucket = my_map->get_bucket( k );
00342 my_node = static_cast<node*>( my_bucket->node_list );
00343 if( hash_map_base::is_valid(my_node) ) {
00344 my_index = k; return;
00345 }
00346 ++k;
00347 }
00348 my_bucket = 0; my_node = 0; my_index = k;
00349 }
00350 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
00351 template<typename Key, typename T, typename HashCompare, typename A>
00352 friend class interface4::concurrent_hash_map;
00353 #else
00354 public:
00355 #endif
00357 const Container *my_map;
00358
00360 size_t my_index;
00361
00363 const bucket *my_bucket;
00364
00366 node *my_node;
00367
00368 hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
00369
00370 public:
00372 hash_map_iterator() {}
00373 hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :
00374 my_map(other.my_map),
00375 my_index(other.my_index),
00376 my_bucket(other.my_bucket),
00377 my_node(other.my_node)
00378 {}
00379 Value& operator*() const {
00380 __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
00381 return my_node->item;
00382 }
00383 Value* operator->() const {return &operator*();}
00384 hash_map_iterator& operator++();
00385
00387 hash_map_iterator operator++(int) {
00388 hash_map_iterator old(*this);
00389 operator++();
00390 return old;
00391 }
00392 };
00393
00394 template<typename Container, typename Value>
00395 hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
00396 my_map(&map),
00397 my_index(index),
00398 my_bucket(b),
00399 my_node( static_cast<node*>(n) )
00400 {
00401 if( b && !hash_map_base::is_valid(n) )
00402 advance_to_next_bucket();
00403 }
00404
00405 template<typename Container, typename Value>
00406 hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
00407 my_node = static_cast<node*>( my_node->next );
00408 if( !my_node ) advance_to_next_bucket();
00409 return *this;
00410 }
00411
00412 template<typename Container, typename T, typename U>
00413 bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00414 return i.my_node == j.my_node && i.my_map == j.my_map;
00415 }
00416
00417 template<typename Container, typename T, typename U>
00418 bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00419 return i.my_node != j.my_node || i.my_map != j.my_map;
00420 }
00421
00423
00424 template<typename Iterator>
00425 class hash_map_range {
00426 typedef typename Iterator::map_type map_type;
00427 Iterator my_begin;
00428 Iterator my_end;
00429 mutable Iterator my_midpoint;
00430 size_t my_grainsize;
00432 void set_midpoint() const;
00433 template<typename U> friend class hash_map_range;
00434 public:
00436 typedef std::size_t size_type;
00437 typedef typename Iterator::value_type value_type;
00438 typedef typename Iterator::reference reference;
00439 typedef typename Iterator::difference_type difference_type;
00440 typedef Iterator iterator;
00441
00443 bool empty() const {return my_begin==my_end;}
00444
00446 bool is_divisible() const {
00447 return my_midpoint!=my_end;
00448 }
00450 hash_map_range( hash_map_range& r, split ) :
00451 my_end(r.my_end),
00452 my_grainsize(r.my_grainsize)
00453 {
00454 r.my_end = my_begin = r.my_midpoint;
00455 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
00456 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
00457 set_midpoint();
00458 r.set_midpoint();
00459 }
00461 template<typename U>
00462 hash_map_range( hash_map_range<U>& r) :
00463 my_begin(r.my_begin),
00464 my_end(r.my_end),
00465 my_midpoint(r.my_midpoint),
00466 my_grainsize(r.my_grainsize)
00467 {}
00468 #if TBB_DEPRECATED
00470 hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize_ = 1 ) :
00471 my_begin(begin_),
00472 my_end(end_),
00473 my_grainsize(grainsize_)
00474 {
00475 if(!my_end.my_index && !my_end.my_bucket)
00476 my_end.my_index = my_end.my_map->my_mask + 1;
00477 set_midpoint();
00478 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
00479 }
00480 #endif
00482 hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
00483 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
00484 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
00485 my_grainsize( grainsize_ )
00486 {
00487 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
00488 set_midpoint();
00489 }
00490 const Iterator& begin() const {return my_begin;}
00491 const Iterator& end() const {return my_end;}
00493 size_type grainsize() const {return my_grainsize;}
00494 };
00495
00496 template<typename Iterator>
00497 void hash_map_range<Iterator>::set_midpoint() const {
00498
00499 size_t m = my_end.my_index-my_begin.my_index;
00500 if( m > my_grainsize ) {
00501 m = my_begin.my_index + m/2u;
00502 hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
00503 my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
00504 } else {
00505 my_midpoint = my_end;
00506 }
00507 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
00508 "my_begin is after my_midpoint" );
00509 __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
00510 "my_midpoint is after my_end" );
00511 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
00512 "[my_begin, my_midpoint) range should not be empty" );
00513 }
00514
00515 }
00517
00519
00548 template<typename Key, typename T, typename HashCompare, typename Allocator>
00549 class concurrent_hash_map : protected internal::hash_map_base {
00550 template<typename Container, typename Value>
00551 friend class internal::hash_map_iterator;
00552
00553 template<typename I>
00554 friend class internal::hash_map_range;
00555
00556 public:
00557 typedef Key key_type;
00558 typedef T mapped_type;
00559 typedef std::pair<const Key,T> value_type;
00560 typedef hash_map_base::size_type size_type;
00561 typedef ptrdiff_t difference_type;
00562 typedef value_type *pointer;
00563 typedef const value_type *const_pointer;
00564 typedef value_type &reference;
00565 typedef const value_type &const_reference;
00566 typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
00567 typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
00568 typedef internal::hash_map_range<iterator> range_type;
00569 typedef internal::hash_map_range<const_iterator> const_range_type;
00570 typedef Allocator allocator_type;
00571
00572 protected:
00573 friend class const_accessor;
00574 struct node;
00575 typedef typename Allocator::template rebind<node>::other node_allocator_type;
00576 node_allocator_type my_allocator;
00577 HashCompare my_hash_compare;
00578
00579 struct node : public node_base {
00580 value_type item;
00581 node( const Key &key ) : item(key, T()) {}
00582 node( const Key &key, const T &t ) : item(key, t) {}
00583
00584 void *operator new( size_t , node_allocator_type &a ) {
00585 void *ptr = a.allocate(1);
00586 if(!ptr)
00587 tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);
00588 return ptr;
00589 }
00590
00591 void operator delete( void *ptr, node_allocator_type &a ) {return a.deallocate(static_cast<node*>(ptr),1); }
00592 };
00593
00594 void delete_node( node_base *n ) {
00595 my_allocator.destroy( static_cast<node*>(n) );
00596 my_allocator.deallocate( static_cast<node*>(n), 1);
00597 }
00598
00599 node *search_bucket( const key_type &key, bucket *b ) const {
00600 node *n = static_cast<node*>( b->node_list );
00601 while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) )
00602 n = static_cast<node*>( n->next );
00603 __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
00604 return n;
00605 }
00606
00608 class bucket_accessor : public bucket::scoped_t {
00609 bool my_is_writer;
00610 bucket *my_b;
00611 public:
00612 bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
00614 inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
00615 my_b = base->get_bucket( h );
00616
00617 if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req
00618 && try_acquire( my_b->mutex, true ) )
00619 {
00620 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h );
00621 my_is_writer = true;
00622 }
00623 else bucket::scoped_t::acquire( my_b->mutex, my_is_writer = writer );
00624 __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
00625 }
00627 bool is_writer() { return my_is_writer; }
00629 bucket *operator() () { return my_b; }
00630
00631 bool upgrade_to_writer() { my_is_writer = true; return bucket::scoped_t::upgrade_to_writer(); }
00632 };
00633
00634
00635 void rehash_bucket( bucket *b_new, const hashcode_t h ) {
00636 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
00637 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
00638 __TBB_store_with_release(b_new->node_list, internal::empty_rehashed);
00639 hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1;
00640 #if __TBB_STATISTICS
00641 my_info_rehashes++;
00642 #endif
00643
00644 bucket_accessor b_old( this, h & mask );
00645
00646 mask = (mask<<1) | 1;
00647 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
00648 restart:
00649 for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
00650 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->item.first );
00651 #if TBB_USE_ASSERT
00652 hashcode_t bmask = h & (mask>>1);
00653 bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1;
00654 __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
00655 #endif
00656 if( (c & mask) == h ) {
00657 if( !b_old.is_writer() )
00658 if( !b_old.upgrade_to_writer() ) {
00659 goto restart;
00660 }
00661 *p = n->next;
00662 add_to_bucket( b_new, n );
00663 } else p = &n->next;
00664 }
00665 }
00666
00667 public:
00668
00669 class accessor;
00671 class const_accessor {
00672 friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
00673 friend class accessor;
00674 void operator=( const accessor & ) const;
00675 const_accessor( const accessor & );
00676 public:
00678 typedef const typename concurrent_hash_map::value_type value_type;
00679
00681 bool empty() const {return !my_node;}
00682
00684 void release() {
00685 if( my_node ) {
00686 my_lock.release();
00687 my_node = 0;
00688 }
00689 }
00690
00692 const_reference operator*() const {
00693 __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
00694 return my_node->item;
00695 }
00696
00698 const_pointer operator->() const {
00699 return &operator*();
00700 }
00701
00703 const_accessor() : my_node(NULL) {}
00704
00706 ~const_accessor() {
00707 my_node = NULL;
00708 }
00709 private:
00710 node *my_node;
00711 typename node::scoped_t my_lock;
00712 hashcode_t my_hash;
00713 };
00714
00716 class accessor: public const_accessor {
00717 public:
00719 typedef typename concurrent_hash_map::value_type value_type;
00720
00722 reference operator*() const {
00723 __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
00724 return this->my_node->item;
00725 }
00726
00728 pointer operator->() const {
00729 return &operator*();
00730 }
00731 };
00732
00734 concurrent_hash_map(const allocator_type &a = allocator_type())
00735 : internal::hash_map_base(), my_allocator(a)
00736 {}
00737
00739 concurrent_hash_map(size_type n, const allocator_type &a = allocator_type())
00740 : my_allocator(a)
00741 {
00742 reserve( n );
00743 }
00744
00746 concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type())
00747 : internal::hash_map_base(), my_allocator(a)
00748 {
00749 internal_copy(table);
00750 }
00751
00753 template<typename I>
00754 concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type())
00755 : my_allocator(a)
00756 {
00757 reserve( std::distance(first, last) );
00758 internal_copy(first, last);
00759 }
00760
00762 concurrent_hash_map& operator=( const concurrent_hash_map& table ) {
00763 if( this!=&table ) {
00764 clear();
00765 internal_copy(table);
00766 }
00767 return *this;
00768 }
00769
00770
00772
00774 void rehash(size_type n = 0);
00775
00777 void clear();
00778
00780 ~concurrent_hash_map() { clear(); }
00781
00782
00783
00784
00785 range_type range( size_type grainsize=1 ) {
00786 return range_type( *this, grainsize );
00787 }
00788 const_range_type range( size_type grainsize=1 ) const {
00789 return const_range_type( *this, grainsize );
00790 }
00791
00792
00793
00794
00795 iterator begin() {return iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
00796 iterator end() {return iterator(*this,0,0,0);}
00797 const_iterator begin() const {return const_iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
00798 const_iterator end() const {return const_iterator(*this,0,0,0);}
00799 std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range(key, end()); }
00800 std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range(key, end()); }
00801
00803 size_type size() const { return my_size; }
00804
00806 bool empty() const { return my_size == 0; }
00807
00809 size_type max_size() const {return (~size_type(0))/sizeof(node);}
00810
00812 size_type bucket_count() const { return my_mask+1; }
00813
00815 allocator_type get_allocator() const { return this->my_allocator; }
00816
00818 void swap(concurrent_hash_map &table);
00819
00820
00821
00822
00823
00825 size_type count( const Key &key ) const {
00826 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, NULL, false );
00827 }
00828
00830
00831 bool find( const_accessor &result, const Key &key ) const {
00832 result.release();
00833 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, &result, false );
00834 }
00835
00837
00838 bool find( accessor &result, const Key &key ) {
00839 result.release();
00840 return lookup(false, key, NULL, &result, true );
00841 }
00842
00844
00845 bool insert( const_accessor &result, const Key &key ) {
00846 result.release();
00847 return lookup(true, key, NULL, &result, false );
00848 }
00849
00851
00852 bool insert( accessor &result, const Key &key ) {
00853 result.release();
00854 return lookup(true, key, NULL, &result, true );
00855 }
00856
00858
00859 bool insert( const_accessor &result, const value_type &value ) {
00860 result.release();
00861 return lookup(true, value.first, &value.second, &result, false );
00862 }
00863
00865
00866 bool insert( accessor &result, const value_type &value ) {
00867 result.release();
00868 return lookup(true, value.first, &value.second, &result, true );
00869 }
00870
00872
00873 bool insert( const value_type &value ) {
00874 return lookup(true, value.first, &value.second, NULL, false );
00875 }
00876
00878 template<typename I>
00879 void insert(I first, I last) {
00880 for(; first != last; ++first)
00881 insert( *first );
00882 }
00883
00885
00886 bool erase( const Key& key );
00887
00889
00890 bool erase( const_accessor& item_accessor ) {
00891 return exclude( item_accessor, true );
00892 }
00893
00895
00896 bool erase( accessor& item_accessor ) {
00897 return exclude( item_accessor, false );
00898 }
00899
00900 protected:
00902 bool lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write );
00903
00905 bool exclude( const_accessor &item_accessor, bool readonly );
00906
00908 template<typename I>
00909 std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
00910
00912 void internal_copy( const concurrent_hash_map& source );
00913
00914 template<typename I>
00915 void internal_copy(I first, I last);
00916
00918
00920 const_pointer internal_fast_find( const Key& key ) const {
00921 hashcode_t h = my_hash_compare.hash( key );
00922 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
00923 node *n;
00924 restart:
00925 __TBB_ASSERT((m&(m+1))==0, NULL);
00926 bucket *b = get_bucket( h & m );
00927
00928 if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req )
00929 {
00930 bucket::scoped_t lock;
00931 if( lock.try_acquire( b->mutex, true ) ) {
00932 if( b->node_list == internal::rehash_req)
00933 const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m );
00934 }
00935 else lock.acquire( b->mutex, false );
00936 __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
00937 }
00938 n = search_bucket( key, b );
00939 if( n )
00940 return &n->item;
00941 else if( check_mask_race( h, m ) )
00942 goto restart;
00943 return 0;
00944 }
00945 };
00946
00947 #if _MSC_VER && !defined(__INTEL_COMPILER)
00948
00949 #pragma warning( push )
00950 #pragma warning( disable: 4127 )
00951 #endif
00952
00953 template<typename Key, typename T, typename HashCompare, typename A>
00954 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ) {
00955 __TBB_ASSERT( !result || !result->my_node, NULL );
00956 bool return_value;
00957 hashcode_t const h = my_hash_compare.hash( key );
00958 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
00959 segment_index_t grow_segment = 0;
00960 node *n, *tmp_n = 0;
00961 restart:
00962 {
00963 __TBB_ASSERT((m&(m+1))==0, NULL);
00964 return_value = false;
00965
00966 bucket_accessor b( this, h & m );
00967
00968
00969 n = search_bucket( key, b() );
00970 if( op_insert ) {
00971
00972 if( !n ) {
00973 if( !tmp_n ) {
00974 if(t) tmp_n = new( my_allocator ) node(key, *t);
00975 else tmp_n = new( my_allocator ) node(key);
00976 }
00977 if( !b.is_writer() && !b.upgrade_to_writer() ) {
00978
00979 n = search_bucket( key, b() );
00980 if( is_valid(n) ) {
00981 b.downgrade_to_reader();
00982 goto exists;
00983 }
00984 }
00985 if( check_mask_race(h, m) )
00986 goto restart;
00987
00988 grow_segment = insert_new_node( b(), n = tmp_n, m );
00989 tmp_n = 0;
00990 return_value = true;
00991 }
00992 } else {
00993 if( !n ) {
00994 if( check_mask_race( h, m ) )
00995 goto restart;
00996 return false;
00997 }
00998 return_value = true;
00999 }
01000 exists:
01001 if( !result ) goto check_growth;
01002
01003
01004 if( !result->my_lock.try_acquire( n->mutex, write ) ) {
01005
01006 tbb::internal::atomic_backoff trials;
01007 do {
01008 if( !trials.bounded_pause() ) {
01009
01010 b.release();
01011 __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
01012 __TBB_Yield();
01013 m = (hashcode_t) itt_load_word_with_acquire( my_mask );
01014 goto restart;
01015 }
01016 } while( !result->my_lock.try_acquire( n->mutex, write ) );
01017 }
01018 }
01019 result->my_node = n;
01020 result->my_hash = h;
01021 check_growth:
01022
01023 if( grow_segment ) {
01024 #if __TBB_STATISTICS
01025 my_info_resizes++;
01026 #endif
01027 enable_segment( grow_segment );
01028 }
01029 if( tmp_n )
01030 delete_node( tmp_n );
01031 return return_value;
01032 }
01033
01034 template<typename Key, typename T, typename HashCompare, typename A>
01035 template<typename I>
01036 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
01037 hashcode_t h = my_hash_compare.hash( key );
01038 hashcode_t m = my_mask;
01039 __TBB_ASSERT((m&(m+1))==0, NULL);
01040 h &= m;
01041 bucket *b = get_bucket( h );
01042 while( b->node_list == internal::rehash_req ) {
01043 m = ( 1u<<__TBB_Log2( h ) ) - 1;
01044 b = get_bucket( h &= m );
01045 }
01046 node *n = search_bucket( key, b );
01047 if( !n )
01048 return std::make_pair(end_, end_);
01049 iterator lower(*this, h, b, n), upper(lower);
01050 return std::make_pair(lower, ++upper);
01051 }
01052
01053 template<typename Key, typename T, typename HashCompare, typename A>
01054 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor, bool readonly ) {
01055 __TBB_ASSERT( item_accessor.my_node, NULL );
01056 node_base *const n = item_accessor.my_node;
01057 item_accessor.my_node = NULL;
01058 hashcode_t const h = item_accessor.my_hash;
01059 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
01060 do {
01061
01062 bucket_accessor b( this, h & m, true );
01063 node_base **p = &b()->node_list;
01064 while( *p && *p != n )
01065 p = &(*p)->next;
01066 if( !*p ) {
01067 if( check_mask_race( h, m ) )
01068 continue;
01069 item_accessor.my_lock.release();
01070 return false;
01071 }
01072 __TBB_ASSERT( *p == n, NULL );
01073 *p = n->next;
01074 my_size--;
01075 break;
01076 } while(true);
01077 if( readonly )
01078 item_accessor.my_lock.upgrade_to_writer();
01079 item_accessor.my_lock.release();
01080 delete_node( n );
01081 return true;
01082 }
01083
01084 template<typename Key, typename T, typename HashCompare, typename A>
01085 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
01086 node_base *n;
01087 hashcode_t const h = my_hash_compare.hash( key );
01088 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
01089 restart:
01090 {
01091
01092 bucket_accessor b( this, h & m );
01093 search:
01094 node_base **p = &b()->node_list;
01095 n = *p;
01096 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->item.first ) ) {
01097 p = &n->next;
01098 n = *p;
01099 }
01100 if( !n ) {
01101 if( check_mask_race( h, m ) )
01102 goto restart;
01103 return false;
01104 }
01105 else if( !b.is_writer() && !b.upgrade_to_writer() ) {
01106 if( check_mask_race( h, m ) )
01107 goto restart;
01108 goto search;
01109 }
01110 *p = n->next;
01111 my_size--;
01112 }
01113 {
01114 typename node::scoped_t item_locker( n->mutex, true );
01115 }
01116
01117 delete_node( n );
01118 return true;
01119 }
01120
01121 template<typename Key, typename T, typename HashCompare, typename A>
01122 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
01123 std::swap(this->my_allocator, table.my_allocator);
01124 std::swap(this->my_hash_compare, table.my_hash_compare);
01125 internal_swap(table);
01126 }
01127
01128 template<typename Key, typename T, typename HashCompare, typename A>
01129 void concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {
01130 reserve( sz );
01131 hashcode_t mask = my_mask;
01132 hashcode_t b = (mask+1)>>1;
01133 __TBB_ASSERT((b&(b-1))==0, NULL);
01134 bucket *bp = get_bucket( b );
01135 for(; b <= mask; b++, bp++ ) {
01136 node_base *n = bp->node_list;
01137 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
01138 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
01139 if( n == internal::rehash_req ) {
01140 hashcode_t h = b; bucket *b_old = bp;
01141 do {
01142 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
01143 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1;
01144 b_old = get_bucket( h &= m );
01145 } while( b_old->node_list == internal::rehash_req );
01146
01147 mark_rehashed_levels( h );
01148 for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
01149 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->item.first );
01150 if( (c & mask) != h ) {
01151 *p = q->next;
01152 bucket *b_new = get_bucket( c & mask );
01153 __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
01154 add_to_bucket( b_new, q );
01155 } else p = &q->next;
01156 }
01157 }
01158 }
01159 #if TBB_USE_PERFORMANCE_WARNINGS
01160 int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0;
01161 static bool reported = false;
01162 #endif
01163 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01164 for( b = 0; b <= mask; b++ ) {
01165 if( b & (b-2) ) ++bp;
01166 else bp = get_bucket( b );
01167 node_base *n = bp->node_list;
01168 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
01169 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
01170 #if TBB_USE_PERFORMANCE_WARNINGS
01171 if( n == internal::empty_rehashed ) empty_buckets++;
01172 else if( n->next ) overpopulated_buckets++;
01173 #endif
01174 #if TBB_USE_ASSERT
01175 for( ; is_valid(n); n = n->next ) {
01176 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first ) & mask;
01177 __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
01178 }
01179 #endif
01180 }
01181 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01182 #if TBB_USE_PERFORMANCE_WARNINGS
01183 if( buckets > current_size) empty_buckets -= buckets - current_size;
01184 else overpopulated_buckets -= current_size - buckets;
01185 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
01186 tbb::internal::runtime_warning(
01187 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
01188 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
01189 reported = true;
01190 }
01191 #endif
01192 }
01193
01194 template<typename Key, typename T, typename HashCompare, typename A>
01195 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
01196 hashcode_t m = my_mask;
01197 __TBB_ASSERT((m&(m+1))==0, NULL);
01198 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01199 #if TBB_USE_PERFORMANCE_WARNINGS
01200 int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0;
01201 static bool reported = false;
01202 #endif
01203 bucket *bp = 0;
01204
01205 for( segment_index_t b = 0; b <= m; b++ ) {
01206 if( b & (b-2) ) ++bp;
01207 else bp = get_bucket( b );
01208 node_base *n = bp->node_list;
01209 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
01210 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
01211 #if TBB_USE_PERFORMANCE_WARNINGS
01212 if( n == internal::empty_rehashed ) empty_buckets++;
01213 else if( n == internal::rehash_req ) buckets--;
01214 else if( n->next ) overpopulated_buckets++;
01215 #endif
01216 #if __TBB_EXTRA_DEBUG
01217 for(; is_valid(n); n = n->next ) {
01218 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first );
01219 h &= m;
01220 __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
01221 }
01222 #endif
01223 }
01224 #if TBB_USE_PERFORMANCE_WARNINGS
01225 #if __TBB_STATISTICS
01226 printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
01227 " concurrent: resizes=%u rehashes=%u restarts=%u\n",
01228 current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
01229 unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
01230 my_info_resizes = 0;
01231 my_info_restarts = 0;
01232 my_info_rehashes = 0;
01233 #endif
01234 if( buckets > current_size) empty_buckets -= buckets - current_size;
01235 else overpopulated_buckets -= current_size - buckets;
01236 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
01237 tbb::internal::runtime_warning(
01238 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
01239 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
01240 reported = true;
01241 }
01242 #endif
01243 #endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01244 my_size = 0;
01245 segment_index_t s = segment_index_of( m );
01246 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
01247 cache_aligned_allocator<bucket> alloc;
01248 do {
01249 __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
01250 segment_ptr_t buckets_ptr = my_table[s];
01251 size_type sz = segment_size( s ? s : 1 );
01252 for( segment_index_t i = 0; i < sz; i++ )
01253 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
01254 buckets_ptr[i].node_list = n->next;
01255 delete_node( n );
01256 }
01257 if( s >= first_block)
01258 alloc.deallocate( buckets_ptr, sz );
01259 else if( s == embedded_block && embedded_block != first_block )
01260 alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets );
01261 if( s >= embedded_block ) my_table[s] = 0;
01262 } while(s-- > 0);
01263 my_mask = embedded_buckets - 1;
01264 }
01265
01266 template<typename Key, typename T, typename HashCompare, typename A>
01267 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
01268 reserve( source.my_size );
01269 hashcode_t mask = source.my_mask;
01270 if( my_mask == mask ) {
01271 bucket *dst = 0, *src = 0;
01272 bool rehash_required = false;
01273 for( hashcode_t k = 0; k <= mask; k++ ) {
01274 if( k & (k-2) ) ++dst,src++;
01275 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
01276 __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
01277 node *n = static_cast<node*>( src->node_list );
01278 if( n == internal::rehash_req ) {
01279 rehash_required = true;
01280 dst->node_list = internal::rehash_req;
01281 } else for(; n; n = static_cast<node*>( n->next ) ) {
01282 add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) );
01283 ++my_size;
01284 }
01285 }
01286 if( rehash_required ) rehash();
01287 } else internal_copy( source.begin(), source.end() );
01288 }
01289
01290 template<typename Key, typename T, typename HashCompare, typename A>
01291 template<typename I>
01292 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last) {
01293 hashcode_t m = my_mask;
01294 for(; first != last; ++first) {
01295 hashcode_t h = my_hash_compare.hash( first->first );
01296 bucket *b = get_bucket( h & m );
01297 __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
01298 node *n = new( my_allocator ) node(first->first, first->second);
01299 add_to_bucket( b, n );
01300 ++my_size;
01301 }
01302 }
01303
01304 }
01305
01306 using interface4::concurrent_hash_map;
01307
01308
01309 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01310 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
01311 if(a.size() != b.size()) return false;
01312 typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
01313 typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
01314 for(; i != i_end; ++i) {
01315 j = b.equal_range(i->first).first;
01316 if( j == j_end || !(i->second == j->second) ) return false;
01317 }
01318 return true;
01319 }
01320
01321 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01322 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
01323 { return !(a == b); }
01324
01325 template<typename Key, typename T, typename HashCompare, typename A>
01326 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
01327 { a.swap( b ); }
01328
01329 #if _MSC_VER && !defined(__INTEL_COMPILER)
01330 #pragma warning( pop )
01331 #endif // warning 4127 is back
01332
01333 }
01334
01335 #endif