17 #ifndef __TBB_concurrent_hash_map_H 18 #define __TBB_concurrent_hash_map_H 24 #include __TBB_STD_SWAP_HEADER 35 #if __TBB_INITIALIZER_LISTS_PRESENT 36 #include <initializer_list> 38 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 44 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT 52 namespace interface5 {
54 template<
typename Key,
typename T,
typename HashCompare = tbb_hash_compare<Key>,
typename A = tbb_allocator<std::pair<const Key, T> > >
101 static size_type const embedded_buckets = 1<<embedded_block;
117 bucket my_embedded_segment[embedded_buckets];
125 std::memset(
this, 0, pointers_per_table*
sizeof(
segment_ptr_t)
126 +
sizeof(my_size) +
sizeof(my_mask)
127 + embedded_buckets*
sizeof(
bucket) );
128 for(
size_type i = 0; i < embedded_block; i++ )
129 my_table[i] = my_embedded_segment + segment_base(i);
130 my_mask = embedded_buckets - 1;
131 __TBB_ASSERT( embedded_block <= first_block,
"The first block number must include embedded blocks");
134 my_info_restarts = 0;
135 my_info_rehashes = 0;
156 return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
161 if( is_initial ) std::memset( static_cast<void*>(ptr), 0, sz*
sizeof(
bucket) );
162 else for(
size_type i = 0; i < sz; i++, ptr++) {
163 *reinterpret_cast<intptr_t*>(&ptr->
mutex) = 0;
180 if( my_segment_ptr ) *my_segment_ptr = 0;
185 template<
typename Allocator>
189 bucket_allocator_type bucket_allocator(allocator);
193 __TBB_ASSERT( !is_valid(my_table[k]),
"Wrong concurrent assignment");
194 if( k >= first_block ) {
195 sz = segment_size( k );
196 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
197 init_buckets( ptr, sz, is_initial );
201 __TBB_ASSERT( k == embedded_block,
"Wrong segment index" );
202 sz = segment_size( first_block );
203 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
204 init_buckets( ptr, sz - embedded_buckets, is_initial );
205 ptr -= segment_base(embedded_block);
213 template<
typename Allocator>
217 bucket_allocator_type bucket_allocator(allocator);
221 if(
s >= first_block)
222 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
223 else if(
s == embedded_block && embedded_block != first_block )
224 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
225 segment_size(first_block) - embedded_buckets);
226 if(
s >= embedded_block ) my_table[
s] = 0;
232 h -= segment_base(
s);
234 __TBB_ASSERT( is_valid(seg),
"hashcode must be cut by valid mask for allocated segments" );
254 return check_rehashing_collision(
h, m_old, m = m_now );
261 if( (
h & m_old) != (
h & m) ) {
264 for( ++m_old; !(
h & m_old); m_old <<= 1 )
266 m_old = (m_old<<1) - 1;
283 add_to_bucket( b, n );
287 __TBB_ASSERT( is_valid(my_table[new_seg-1]),
"new allocations must not publish new mask until segment has allocated");
290 &&
as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
297 template<
typename Allocator>
299 if( !buckets-- )
return;
300 bool is_initial = !my_size;
301 for(
size_type m = my_mask; buckets > m; m = my_mask )
302 enable_segment( segment_index_of( m+1 ), allocator, is_initial );
309 for(
size_type i = 0; i < embedded_buckets; i++)
311 for(
size_type i = embedded_block; i < pointers_per_table; i++)
315 #if __TBB_CPP11_RVALUE_REF_PRESENT 317 my_mask = other.my_mask;
318 other.my_mask = embedded_buckets - 1;
319 my_size = other.my_size;
322 for(
size_type i = 0; i < embedded_buckets; ++i) {
323 my_embedded_segment[i].
node_list = other.my_embedded_segment[i].node_list;
324 other.my_embedded_segment[i].node_list = NULL;
327 for(
size_type i = embedded_block; i < pointers_per_table; ++i) {
328 my_table[i] = other.my_table[i];
329 other.my_table[i] = NULL;
332 #endif // __TBB_CPP11_RVALUE_REF_PRESENT 335 template<
typename Iterator>
341 template<
typename Container,
typename Value>
343 :
public std::iterator<std::forward_iterator_tag,Value>
346 typedef typename Container::node
node;
350 template<
typename C,
typename T,
typename U>
353 template<
typename C,
typename T,
typename U>
356 template<
typename C,
typename T,
typename U>
359 template<
typename C,
typename U>
366 size_t k = my_index+1;
367 __TBB_ASSERT( my_bucket,
"advancing an invalid iterator?");
368 while( k <= my_map->my_mask ) {
372 else my_bucket = my_map->get_bucket( k );
373 my_node = static_cast<node*>( my_bucket->node_list );
375 my_index = k;
return;
379 my_bucket = 0; my_node = 0; my_index = k;
381 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER) 382 template<
typename Key,
typename T,
typename HashCompare,
typename A>
387 const Container *my_map;
405 my_map(other.my_map),
406 my_index(other.my_index),
407 my_bucket(other.my_bucket),
408 my_node(other.my_node)
410 Value& operator*()
const {
412 return my_node->value();
425 template<
typename Container,
typename Value>
430 my_node( static_cast<
node*>(n) )
436 template<
typename Container,
typename Value>
438 my_node = static_cast<node*>( my_node->next );
439 if( !my_node ) advance_to_next_bucket();
443 template<
typename Container,
typename T,
typename U>
448 template<
typename Container,
typename T,
typename U>
455 template<
typename Iterator>
456 class hash_map_range {
485 r.my_end =
my_begin = r.my_midpoint;
487 __TBB_ASSERT( !r.empty(),
"Splitting despite the range is not divisible" );
501 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
502 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
505 __TBB_ASSERT( grainsize_>0,
"grainsize must be positive" );
514 template<
typename Iterator>
517 size_t m = my_end.my_index-my_begin.my_index;
518 if( m > my_grainsize ) {
519 m = my_begin.my_index + m/2u;
521 my_midpoint = Iterator(*my_begin.my_map,m,b,b->
node_list);
523 my_midpoint = my_end;
525 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
526 "my_begin is after my_midpoint" );
528 "my_midpoint is after my_end" );
529 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
530 "[my_begin, my_midpoint) range should not be empty" );
536 #if _MSC_VER && !defined(__INTEL_COMPILER) 538 #pragma warning( push ) 539 #pragma warning( disable: 4127 ) 572 template<
typename Key,
typename T,
typename HashCompare,
typename Allocator>
574 template<
typename Container,
typename Value>
590 typedef internal::hash_map_iterator<concurrent_hash_map,value_type>
iterator;
591 typedef internal::hash_map_iterator<concurrent_hash_map,const value_type>
const_iterator;
631 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 632 template<
typename... Args>
635 template<
typename Arg1,
typename Arg2>
642 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 655 #if __TBB_CPP11_RVALUE_REF_PRESENT 662 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT 664 return create_node(allocator, std::piecewise_construct,
665 std::forward_as_tuple(
key), std::forward_as_tuple());
673 __TBB_ASSERT(
false,
"this dummy function should not be called");
680 n = static_cast<node*>( n->next );
695 && try_acquire(
my_b->mutex,
true ) )
703 bool is_writer() {
return bucket::scoped_t::is_writer; }
711 __TBB_ASSERT(
h > 1,
"The lowermost buckets can't be rehashed" );
727 bmask = bmask==0? 1 : ( 1u<<(
__TBB_Log2( bmask )+1 ) ) - 1;
728 __TBB_ASSERT( (c & bmask) == (
h & bmask),
"hash() function changed for key in table" );
730 if( (c &
mask) ==
h ) {
732 if( !b_old.upgrade_to_writer() ) {
855 #if __TBB_CPP11_RVALUE_REF_PRESENT 867 if (a == table.get_allocator()){
871 internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
875 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 896 #if __TBB_INITIALIZER_LISTS_PRESENT 901 call_clear_on_leave scope_guard(
this);
903 scope_guard.dismiss();
914 #endif //__TBB_INITIALIZER_LISTS_PRESENT 927 #if __TBB_CPP11_RVALUE_REF_PRESENT 936 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 938 #if __TBB_INITIALIZER_LISTS_PRESENT 945 #endif //__TBB_INITIALIZER_LISTS_PRESENT 1010 return const_cast<concurrent_hash_map*>(
this)->lookup(
false,
key, NULL, &result,
false, &
do_not_allocate_node );
1054 #if __TBB_CPP11_RVALUE_REF_PRESENT 1073 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 1076 template<
typename... Args>
1083 template<
typename... Args>
1090 template<
typename... Args>
1094 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 1095 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 1098 template<
typename I>
1104 #if __TBB_INITIALIZER_LISTS_PRESENT 1105 void insert( std::initializer_list<value_type> il ) {
1107 insert( il.begin(), il.end() );
1109 #endif //__TBB_INITIALIZER_LISTS_PRESENT 1118 return exclude( item_accessor );
1124 return exclude( item_accessor );
1139 #if __TBB_CPP11_RVALUE_REF_PRESENT 1140 template<
typename Accessor>
1146 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 1147 template<
typename Accessor,
typename... Args>
1153 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 1154 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 1160 template<
typename I>
1166 template<
typename I>
1169 #if __TBB_CPP11_RVALUE_REF_PRESENT 1177 if (this->my_allocator == other.my_allocator) {
1181 internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size());
1194 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1200 if(
lock.try_acquire( b->
mutex,
true ) ) {
1202 const_cast<concurrent_hash_map*>(
this)->rehash_bucket( b,
h & m );
1216 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT 1217 namespace internal {
1220 template<
template<
typename...>
typename Map,
typename Key,
typename T,
typename... Args>
1221 using hash_map_t = Map<
1223 std::conditional_t< (
sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >,
1225 std::conditional_t< (
sizeof...(Args)>0) && is_allocator_v< pack_element_t<
sizeof...(Args)-1, Args...> >,
1231 template<
typename I,
typename... Args>
1232 concurrent_hash_map(I, I, Args...)
1233 -> internal::hash_map_t<concurrent_hash_map, internal::iterator_key_t<I>,internal::iterator_mapped_t<I>, Args...>;
1237 template<
typename Key,
typename T,
typename CompareOrAllocator>
1238 concurrent_hash_map(std::initializer_list<std::pair<const Key, T>>, CompareOrAllocator)
1239 -> internal::hash_map_t<concurrent_hash_map, Key, T, CompareOrAllocator>;
1243 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1244 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup(
bool op_insert,
const Key &
key,
const T *t,
const_accessor *result,
bool write,
node* (*allocate_node)(
node_allocator_type& ,
const Key&,
const T*),
node *tmp_n ) {
1253 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1254 return_value =
false;
1259 n = search_bucket(
key, b() );
1264 tmp_n = allocate_node(my_allocator,
key, t);
1266 if( !b.
is_writer() && !b.upgrade_to_writer() ) {
1268 n = search_bucket(
key, b() );
1270 b.downgrade_to_reader();
1274 if( check_mask_race(
h, m) )
1277 grow_segment = insert_new_node( b(), n = tmp_n, m );
1279 return_value =
true;
1283 if( check_mask_race(
h, m ) )
1287 return_value =
true;
1290 if( !result )
goto check_growth;
1293 if( !result->try_acquire( n->mutex, write ) ) {
1295 if( result->try_acquire( n->mutex, write ) )
break;
1296 if( !backoff.bounded_pause() ) {
1299 __TBB_ASSERT( !op_insert || !return_value,
"Can't acquire new item in locked bucket?" );
1311 if( grow_segment ) {
1312 #if __TBB_STATISTICS 1315 enable_segment( grow_segment, my_allocator );
1318 delete_node( tmp_n );
1319 return return_value;
1322 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1323 template<
typename I>
1327 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1332 b = get_bucket(
h &= m );
1334 node *n = search_bucket(
key, b );
1336 return std::make_pair(end_, end_);
1337 iterator lower(*
this,
h, b, n), upper(lower);
1338 return std::make_pair(lower, ++upper);
1341 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1351 while( *
p && *
p != n )
1354 if( check_mask_race(
h, m ) )
1365 item_accessor.upgrade_to_writer();
1371 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1383 while( is_valid(n) && !my_hash_compare.equal(
key, static_cast<node*>(n)->value().first ) ) {
1388 if( check_mask_race(
h, m ) )
1392 else if( !b.
is_writer() && !b.upgrade_to_writer() ) {
1393 if( check_mask_race(
h, m ) )
1401 typename node::scoped_t item_locker( n->
mutex,
true );
1408 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1415 internal_swap(table);
1419 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1421 reserve( sz, my_allocator );
1425 bucket *bp = get_bucket( b );
1426 for(; b <=
mask; b++, bp++ ) {
1429 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->
mutex) == 0,
"concurrent or unexpectedly terminated operation during rehash() execution" );
1433 __TBB_ASSERT(
h > 1,
"The lowermost buckets can't be rehashed" );
1435 b_old = get_bucket(
h &= m );
1438 mark_rehashed_levels(
h );
1441 if( (c &
mask) !=
h ) {
1445 add_to_bucket( b_new, q );
1446 }
else p = &q->next;
1450 #if TBB_USE_PERFORMANCE_WARNINGS 1451 int current_size =
int(my_size), buckets =
int(
mask)+1, empty_buckets = 0, overpopulated_buckets = 0;
1452 static bool reported =
false;
1454 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS 1455 for( b = 0; b <=
mask; b++ ) {
1456 if( b & (b-2) ) ++bp;
1457 else bp = get_bucket( b );
1459 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->
mutex) == 0,
"concurrent or unexpectedly terminated operation during rehash() execution" );
1461 #if TBB_USE_PERFORMANCE_WARNINGS 1463 else if( n->
next ) overpopulated_buckets++;
1466 for( ; is_valid(n); n = n->
next ) {
1468 __TBB_ASSERT(
h == b,
"hash() function changed for key in table or internal error" );
1472 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS 1473 #if TBB_USE_PERFORMANCE_WARNINGS 1474 if( buckets > current_size) empty_buckets -= buckets - current_size;
1475 else overpopulated_buckets -= current_size - buckets;
1476 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1478 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1480 typeid(*this).name(),
1482 "concurrent_hash_map",
1484 current_size, empty_buckets, overpopulated_buckets );
1490 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1493 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1494 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1495 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1496 int current_size =
int(my_size), buckets =
int(m)+1, empty_buckets = 0, overpopulated_buckets = 0;
1497 static bool reported =
false;
1502 if( b & (b-2) ) ++bp;
1503 else bp = get_bucket( b );
1506 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->
mutex) == 0,
"concurrent or unexpectedly terminated operation during clear() execution" );
1507 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1510 else if( n->
next ) overpopulated_buckets++;
1512 #if __TBB_EXTRA_DEBUG 1513 for(; is_valid(n); n = n->
next ) {
1520 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1521 #if __TBB_STATISTICS 1522 printf(
"items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d" 1523 " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1524 current_size,
int(m+1), buckets, empty_buckets, overpopulated_buckets,
1525 unsigned(my_info_resizes),
unsigned(my_info_rehashes),
unsigned(my_info_restarts) );
1526 my_info_resizes = 0;
1527 my_info_restarts = 0;
1528 my_info_rehashes = 0;
1530 if( buckets > current_size) empty_buckets -= buckets - current_size;
1531 else overpopulated_buckets -= current_size - buckets;
1532 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1534 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1536 typeid(*this).name(),
1538 "concurrent_hash_map",
1540 current_size, empty_buckets, overpopulated_buckets );
1544 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1547 __TBB_ASSERT(
s+1 == pointers_per_table || !my_table[
s+1],
"wrong mask or concurrent grow" );
1549 __TBB_ASSERT( is_valid( my_table[
s] ),
"wrong mask or concurrent grow" );
1553 for(
node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].
node_list ) {
1557 delete_segment(
s, my_allocator);
1559 my_mask = embedded_buckets - 1;
1562 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1565 if( my_mask ==
mask ) {
1566 reserve( source.
my_size, my_allocator );
1567 bucket *dst = 0, *src = 0;
1568 bool rehash_required =
false;
1570 if( k & (k-2) ) ++dst,src++;
1571 else { dst = get_bucket( k ); src = source.
get_bucket( k ); }
1573 node *n = static_cast<node*>( src->node_list );
1575 rehash_required =
true;
1577 }
else for(; n; n = static_cast<node*>( n->next ) ) {
1578 node* node_ptr = create_node(my_allocator, n->
value().first, n->
value().second);
1579 add_to_bucket( dst, node_ptr);
1583 if( rehash_required ) rehash();
1587 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1588 template<
typename I>
1590 reserve( reserve_size, my_allocator );
1593 hashcode_t h = my_hash_compare.hash( (*first).first );
1594 bucket *b = get_bucket(
h & m );
1596 node* node_ptr = create_node(my_allocator, (*first).first, (*first).second);
1597 add_to_bucket( b, node_ptr );
1607 template<
typename Key,
typename T,
typename HashCompare,
typename A1,
typename A2>
1609 if(a.
size() != b.
size())
return false;
1612 for(; i != i_end; ++i) {
1614 if( j == j_end || !(i->second == j->second) )
return false;
1619 template<
typename Key,
typename T,
typename HashCompare,
typename A1,
typename A2>
1621 {
return !(a == b); }
1623 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1627 #if _MSC_VER && !defined(__INTEL_COMPILER) 1628 #pragma warning( pop ) 1629 #endif // warning 4127 is back static node * do_not_allocate_node(node_allocator_type &, const Key &, const T *)
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
const_iterator end() const
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
std::pair< const Key, T > value_type
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
HashCompare my_hash_compare
The scoped locking pattern.
Class for determining type of std::allocator<T>::value_type.
void itt_hide_store_word(T &dst, T src)
atomic< size_type > my_size
Size of container in stored items.
bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node *(*allocate_node)(node_allocator_type &, const Key &, const T *), node *tmp_n=0)
Insert or find item and optionally acquire a lock on the item.
Meets requirements of a forward iterator for STL */.
spin_rw_mutex mutex_t
Mutex type.
static hash_map_node_base *const empty_rehashed
Rehashed empty bucket flag.
Iterator::value_type value_type
~concurrent_hash_map()
Clear table and destroy it.
bool operator!=(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
bucket my_embedded_segment[embedded_buckets]
Zero segment.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
static hash_map_node_base *const rehash_req
Incompleteness flag value.
bool operator==(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
void mark_rehashed_levels(hashcode_t h)
size_t hashcode_t
Type of a hash code.
auto last(Container &c) -> decltype(begin(c))
Iterator::difference_type difference_type
segment_index_t insert_new_node(bucket *b, node_base *n, hashcode_t mask)
Insert a node and check for load factor.
concurrent_hash_map(const allocator_type &a=allocator_type())
Construct empty table.
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
void reserve(size_type buckets, const Allocator &allocator)
Prepare enough segments for number of buckets.
node * search_bucket(const key_type &key, bucket *b) const
bucket * operator()()
get bucket pointer
Combines data access, locking, and garbage collection.
bool is_divisible() const
True if range can be partitioned into two subranges.
concurrent_hash_map::value_type value_type
Type of value.
static void deallocate(Alloc &a, pointer p, size_type n)
const Iterator & end() const
void release()
Set to null.
void internal_swap(hash_map_base &table)
Swap hash_map_bases.
static void construct(Alloc &, PT *p)
internal::hash_map_iterator< concurrent_hash_map, value_type > iterator
bool exclude(const_accessor &item_accessor)
delete item by accessor
Value * operator->() const
concurrent_hash_map(std::initializer_list< value_type > il, const HashCompare &compare, const allocator_type &a=allocator_type())
node_scoped_guard(node *n, node_allocator_type &alloc)
void const char const char int ITT_FORMAT __itt_group_sync s
bool empty() const
True if range is empty.
static void destroy(Alloc &, T *p)
node * my_node
Pointer to node that has current item.
enable_segment_failsafe(segments_table_t &table, segment_index_t k)
std::size_t size_type
Type for size of a range.
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
static node * create_node(node_allocator_type &allocator, Args &&... args)
static node * allocate_node_copy_construct(node_allocator_type &allocator, const Key &key, const T *t)
hash_map_node_base node_base
Node base type.
const_iterator begin() const
static segment_index_t segment_index_of(size_type index)
void swap(concurrent_hash_map &table)
swap two instances. Iterators are invalidated
friend class const_accessor
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
bucket_accessor(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
auto first(Container &c) -> decltype(begin(c))
bool check_mask_race(const hashcode_t h, hashcode_t &m) const
Check for mask race.
void rehash_bucket(bucket *b_new, const hashcode_t h)
allocator_type get_allocator() const
return allocator object
static segment_index_t segment_base(segment_index_t k)
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
call_clear_on_leave(concurrent_hash_map *a_ch_map)
hash_map_node_base * next
Next node in chain.
friend bool is_write_access_needed(const_accessor const &)
void delete_node(node_base *n)
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
friend bool is_write_access_needed(accessor_not_used const &)
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
friend const_accessor * accessor_location(accessor_not_used const &)
static node * allocate_node_move_construct(node_allocator_type &allocator, const Key &key, const T *t)
hash_map_iterator operator++(int)
Post increment.
const_reference operator *() const
Return reference to associated value in hash table.
bool erase(accessor &item_accessor)
Erase item by accessor.
bool operator==(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
concurrent_hash_map(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
Base class for types that should not be copied or assigned.
Iterator::map_type map_type
const value_type & const_reference
atomic< T > & as_atomic(T &t)
static void add_to_bucket(bucket *b, node_base *n)
Add node.
const_pointer operator->() const
Return pointer to associated value in hash table.
void allocator_copy_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
T * begin() const
Pointer to beginning of array.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void acquire(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
find a bucket by masked hashcode, optionally rehash, and acquire the lock
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_false_type)
concurrent_hash_map(const concurrent_hash_map &table, const allocator_type &a)
hash_map_base::size_type size_type
bool generic_move_insert(Accessor &&result, value_type &&value)
void itt_store_word_with_release(tbb::atomic< T > &dst, U src)
size_t my_index
Index in hash table for current item.
concurrent_hash_map(I first, I last, const HashCompare &compare, const allocator_type &a=allocator_type())
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
T itt_hide_load_word(const T &src)
std::pair< iterator, iterator > equal_range(const Key &key)
void insert(I first, I last)
Insert range [first, last)
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
hash_map_base()
Constructor.
bool operator!=(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Class that implements exponential backoff.
segment_ptr_t * my_segment_ptr
node_allocator_type & my_alloc
range_type range(size_type grainsize=1)
bool generic_emplace(Accessor &&result, Args &&... args)
internal::hash_map_iterator< concurrent_hash_map, const value_type > const_iterator
Allows write access to elements and combines data access, locking, and garbage collection.
tbb::aligned_space< value_type > my_value
void move(tbb_thread &t1, tbb_thread &t2)
tbb::internal::allocator_rebind< Allocator, node >::type node_allocator_type
concurrent_hash_map(const HashCompare &compare, const allocator_type &a=allocator_type())
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
hash_map_base::node_base node_base
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_true_type)
static size_type segment_size(segment_index_t k)
bucket accessor is to find, rehash, acquire a lock, and access a bucket
size_t size_type
Size type.
size_type bucket_count() const
Returns the current number of buckets.
void enable_segment(segment_index_t k, const Allocator &allocator, bool is_initial=false)
Enable segment.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
tick_count::interval_t operator-(const tick_count &t1, const tick_count &t0)
const Container * my_map
concurrent_hash_map over which we are iterating.
std::pair< I, I > internal_equal_range(const Key &key, I end) const
Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
pointer operator->() const
Return pointer to associated value in hash table.
size_t hashcode_t
Type of a hash code.
void __TBB_store_with_release(volatile T &location, V value)
allocator_traits< Alloc >::template rebind_alloc< T >::other type
node_allocator_type my_allocator
#define __TBB_FORWARDING_REF(A)
~const_accessor()
Destroy result after releasing the underlying reference.
void delete_segment(segment_index_t s, const Allocator &allocator)
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
friend class internal::hash_map_iterator
T __TBB_load_with_acquire(const volatile T &location)
bucket * get_bucket(hashcode_t h) const
Get bucket by (masked) hashcode.
size_type max_size() const
Upper bound on size.
hash_map_iterator(const hash_map_iterator< Container, typename Container::value_type > &other)
bool empty() const
True if size()==0.
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
hash_map_range(const map_type &map, size_type grainsize_=1)
Init range with container and grainsize specified.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
bool check_rehashing_collision(const hashcode_t h, hashcode_t m_old, hashcode_t m) const
Process mask race, check for rehashing collision.
intptr_t __TBB_Log2(uintptr_t x)
hash_compare that is default argument for concurrent_hash_map
const_range_type range(size_type grainsize=1) const
Unordered map from Key to T.
bool empty() const
True if result is empty.
static void init_buckets(segment_ptr_t ptr, size_type sz, bool is_initial)
Initialize buckets.
const concurrent_hash_map::value_type value_type
Type of value.
const Iterator & begin() const
bool emplace(Args &&... args)
Insert item by copying if there is no such key present already.
hash_map_base::bucket bucket
std::pair< const_iterator, const_iterator > equal_range(const Key &key) const
void set_midpoint() const
Set my_midpoint to point approximately half way between my_begin and my_end.
bool emplace(accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
concurrent_hash_map(concurrent_hash_map &&table, const allocator_type &a)
Move constructor.
bool is_writer()
check whether bucket is locked for write
tbb::internal::allocator_traits< node_allocator_type > node_allocator_traits
internal::hash_map_range< const_iterator > const_range_type
T itt_load_word_with_acquire(const tbb::atomic< T > &src)
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
hash_map_iterator & operator++()
Identifiers declared inside namespace internal should never be used directly by client code.
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
concurrent_hash_map(size_type n, const HashCompare &compare, const allocator_type &a=allocator_type())
size_t segment_index_t
Segment index type.
internal::hash_map_range< iterator > range_type
Fast, unfair, spinning reader-writer lock with backoff and writer-preference.
size_type grainsize() const
The grain size for this range.
void advance_to_next_bucket()
size_type size() const
Number of items in table.
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
Range class used with concurrent_hash_map.
concurrent_hash_map(size_type n, const allocator_type &a=allocator_type())
Construct empty table with n preallocated buckets. This number serves also as initial concurrency lev...
const_accessor()
Create empty result.
tbb::internal::false_type propagate_on_container_move_assignment
void const char const char int ITT_FORMAT __itt_group_sync p
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
base class of concurrent_hash_map
concurrent_hash_map * my_ch_map
Iterator::reference reference
friend class internal::hash_map_range
hash_map_range(hash_map_range< U > &r)
type conversion
bool emplace(const_accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
bool erase(const_accessor &item_accessor)
Erase item by const_accessor.
const_pointer internal_fast_find(const Key &key) const
Fast find when no concurrent erasure is used. For internal use inside TBB only!
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
size_type count(const Key &key) const
Return count of items (0 or 1)
ptrdiff_t difference_type
spin_rw_mutex mutex_t
Mutex type for buckets.
const bucket * my_bucket
Pointer to bucket.
static node * allocate_node_default_construct(node_allocator_type &allocator, const Key &key, const T *)
const value_type * const_pointer
static bool is_valid(void *ptr)
static pointer allocate(Alloc &a, size_type n)
friend const_accessor * accessor_location(const_accessor &a)
hash_map_iterator()
Construct undefined iterator.
bool erase(const Key &key)
Erase item.
Dummy type that distinguishes splitting constructor from copy constructor.
hash_map_range(hash_map_range &r, split)
Split range.
reference operator *() const
Return reference to associated value in hash table.
bucket * segment_ptr_t
Segment pointer.
void internal_move(hash_map_base &&other)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
#define __TBB_USE_OPTIONAL_RTTI
friend bool is_write_access_needed(accessor const &)
~enable_segment_failsafe()
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.