Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
enumerable_thread_specific.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_enumerable_thread_specific_H
18 #define __TBB_enumerable_thread_specific_H
19 
20 #include "atomic.h"
21 #include "concurrent_vector.h"
22 #include "tbb_thread.h"
23 #include "tbb_allocator.h"
25 #include "aligned_space.h"
28 #include "tbb_profiling.h"
29 #include <string.h> // for memcpy
30 
31 #if _WIN32||_WIN64
32 #include "machine/windows_api.h"
33 #else
34 #include <pthread.h>
35 #endif
36 
37 #define __TBB_ETS_USE_CPP11 \
38  (__TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \
39  && __TBB_CPP11_DECLTYPE_PRESENT && __TBB_CPP11_LAMBDAS_PRESENT)
40 
41 namespace tbb {
42 
45 
46 namespace interface6 {
47 
48  // Forward declaration to use in internal classes
49  template <typename T, typename Allocator, ets_key_usage_type ETS_key_type>
51 
53  namespace internal {
54 
55  using namespace tbb::internal;
56 
57  template<ets_key_usage_type ETS_key_type>
58  class ets_base: tbb::internal::no_copy {
59  protected:
60  typedef tbb_thread::id key_type;
61 #if __TBB_PROTECTED_NESTED_CLASS_BROKEN
62  public:
63 #endif
64  struct slot;
65 
66  struct array {
67  array* next;
68  size_t lg_size;
69  slot& at( size_t k ) {
70  return ((slot*)(void*)(this+1))[k];
71  }
72  size_t size() const {return size_t(1)<<lg_size;}
73  size_t mask() const {return size()-1;}
74  size_t start( size_t h ) const {
75  return h>>(8*sizeof(size_t)-lg_size);
76  }
77  };
78  struct slot {
79  key_type key;
80  void* ptr;
81  bool empty() const {return key == key_type();}
82  bool match( key_type k ) const {return key == k;}
83  bool claim( key_type k ) {
84  // TODO: maybe claim ptr, because key_type is not guaranteed to fit into word size
85  return atomic_compare_and_swap(key, k, key_type()) == key_type();
86  }
87  };
88 #if __TBB_PROTECTED_NESTED_CLASS_BROKEN
89  protected:
90 #endif
91 
93 
95  atomic<array*> my_root;
96  atomic<size_t> my_count;
97  virtual void* create_local() = 0;
98  virtual void* create_array(size_t _size) = 0; // _size in bytes
99  virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes
100  array* allocate( size_t lg_size ) {
101  size_t n = size_t(1)<<lg_size;
102  array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
103  a->lg_size = lg_size;
104  std::memset( a+1, 0, n*sizeof(slot) );
105  return a;
106  }
107  void free(array* a) {
108  size_t n = size_t(1)<<(a->lg_size);
109  free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
110  }
111 
112  ets_base() {my_root=NULL; my_count=0;}
113  virtual ~ets_base(); // g++ complains if this is not virtual
114  void* table_lookup( bool& exists );
115  void table_clear();
116  // The following functions are not used in concurrent context,
117  // so we don't need synchronization and ITT annotations there.
118  void table_elementwise_copy( const ets_base& other,
119  void*(*add_element)(ets_base&, void*) ) {
120  __TBB_ASSERT(!my_root,NULL);
121  __TBB_ASSERT(!my_count,NULL);
122  if( !other.my_root ) return;
123  array* root = my_root = allocate(other.my_root->lg_size);
124  root->next = NULL;
125  my_count = other.my_count;
126  size_t mask = root->mask();
127  for( array* r=other.my_root; r; r=r->next ) {
128  for( size_t i=0; i<r->size(); ++i ) {
129  slot& s1 = r->at(i);
130  if( !s1.empty() ) {
131  for( size_t j = root->start(tbb::tbb_hash<key_type>()(s1.key)); ; j=(j+1)&mask ) {
132  slot& s2 = root->at(j);
133  if( s2.empty() ) {
134  s2.ptr = add_element(*this, s1.ptr);
135  s2.key = s1.key;
136  break;
137  }
138  else if( s2.match(s1.key) )
139  break;
140  }
141  }
142  }
143  }
144  }
145  void table_swap( ets_base& other ) {
146  __TBB_ASSERT(this!=&other, "Don't swap an instance with itself");
147  tbb::internal::swap<relaxed>(my_root, other.my_root);
148  tbb::internal::swap<relaxed>(my_count, other.my_count);
149  }
150  };
151 
152  template<ets_key_usage_type ETS_key_type>
153  ets_base<ETS_key_type>::~ets_base() {
154  __TBB_ASSERT(!my_root, NULL);
155  }
156 
157  template<ets_key_usage_type ETS_key_type>
158  void ets_base<ETS_key_type>::table_clear() {
159  while( array* r = my_root ) {
160  my_root = r->next;
161  free(r);
162  }
163  my_count = 0;
164  }
165 
166  template<ets_key_usage_type ETS_key_type>
167  void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
168  const key_type k = tbb::this_tbb_thread::get_id();
169 
170  __TBB_ASSERT(k != key_type(),NULL);
171  void* found;
172  size_t h = tbb::tbb_hash<key_type>()(k);
173  for( array* r=my_root; r; r=r->next ) {
175  size_t mask=r->mask();
176  for(size_t i = r->start(h); ;i=(i+1)&mask) {
177  slot& s = r->at(i);
178  if( s.empty() ) break;
179  if( s.match(k) ) {
180  if( r==my_root ) {
181  // Success at top level
182  exists = true;
183  return s.ptr;
184  } else {
185  // Success at some other level. Need to insert at top level.
186  exists = true;
187  found = s.ptr;
188  goto insert;
189  }
190  }
191  }
192  }
193  // Key does not yet exist. The density of slots in the table does not exceed 0.5,
194  // for if this will occur a new table is allocated with double the current table
195  // size, which is swapped in as the new root table. So an empty slot is guaranteed.
196  exists = false;
197  found = create_local();
198  {
199  size_t c = ++my_count;
200  array* r = my_root;
202  if( !r || c>r->size()/2 ) {
203  size_t s = r ? r->lg_size : 2;
204  while( c>size_t(1)<<(s-1) ) ++s;
205  array* a = allocate(s);
206  for(;;) {
207  a->next = r;
209  array* new_r = my_root.compare_and_swap(a,r);
210  if( new_r==r ) break;
211  call_itt_notify(acquired, new_r);
212  if( new_r->lg_size>=s ) {
213  // Another thread inserted an equal or bigger array, so our array is superfluous.
214  free(a);
215  break;
216  }
217  r = new_r;
218  }
219  }
220  }
221  insert:
222  // Whether a slot has been found in an older table, or if it has been inserted at this level,
223  // it has already been accounted for in the total. Guaranteed to be room for it, and it is
224  // not present, so search for empty slot and use it.
225  array* ir = my_root;
227  size_t mask = ir->mask();
228  for(size_t i = ir->start(h);;i=(i+1)&mask) {
229  slot& s = ir->at(i);
230  if( s.empty() ) {
231  if( s.claim(k) ) {
232  s.ptr = found;
233  return found;
234  }
235  }
236  }
237  }
238 
240  template <>
241  class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key> {
242  typedef ets_base<ets_no_key> super;
243 #if _WIN32||_WIN64
244 #if __TBB_WIN8UI_SUPPORT
245  typedef DWORD tls_key_t;
246  void create_key() { my_key = FlsAlloc(NULL); }
247  void destroy_key() { FlsFree(my_key); }
248  void set_tls(void * value) { FlsSetValue(my_key, (LPVOID)value); }
249  void* get_tls() { return (void *)FlsGetValue(my_key); }
250 #else
251  typedef DWORD tls_key_t;
252  void create_key() { my_key = TlsAlloc(); }
253  void destroy_key() { TlsFree(my_key); }
254  void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
255  void* get_tls() { return (void *)TlsGetValue(my_key); }
256 #endif
257 #else
258  typedef pthread_key_t tls_key_t;
259  void create_key() { pthread_key_create(&my_key, NULL); }
260  void destroy_key() { pthread_key_delete(my_key); }
261  void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
262  void* get_tls() const { return pthread_getspecific(my_key); }
263 #endif
264  tls_key_t my_key;
265  virtual void* create_local() __TBB_override = 0;
266  virtual void* create_array(size_t _size) __TBB_override = 0; // _size in bytes
267  virtual void free_array(void* ptr, size_t _size) __TBB_override = 0; // size in bytes
268  protected:
269  ets_base() {create_key();}
270  ~ets_base() {destroy_key();}
271  void* table_lookup( bool& exists ) {
272  void* found = get_tls();
273  if( found ) {
274  exists=true;
275  } else {
276  found = super::table_lookup(exists);
277  set_tls(found);
278  }
279  return found;
280  }
281  void table_clear() {
282  destroy_key();
283  create_key();
284  super::table_clear();
285  }
286  void table_swap( ets_base& other ) {
287  using std::swap;
288  __TBB_ASSERT(this!=&other, "Don't swap an instance with itself");
289  swap(my_key, other.my_key);
290  super::table_swap(other);
291  }
292  };
293 
295  template< typename Container, typename Value >
296  class enumerable_thread_specific_iterator
297 #if defined(_WIN64) && defined(_MSC_VER)
298  // Ensure that Microsoft's internal template function _Val_type works correctly.
299  : public std::iterator<std::random_access_iterator_tag,Value>
300 #endif /* defined(_WIN64) && defined(_MSC_VER) */
301  {
303 
304  Container *my_container;
305  typename Container::size_type my_index;
306  mutable Value *my_value;
307 
308  template<typename C, typename T>
309  friend enumerable_thread_specific_iterator<C,T>
310  operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator<C,T>& v );
311 
312  template<typename C, typename T, typename U>
313  friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i,
314  const enumerable_thread_specific_iterator<C,U>& j );
315 
316  template<typename C, typename T, typename U>
317  friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i,
318  const enumerable_thread_specific_iterator<C,U>& j );
319 
320  template<typename C, typename T, typename U>
321  friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i,
322  const enumerable_thread_specific_iterator<C,U>& j );
323 
324  template<typename C, typename U>
325  friend class enumerable_thread_specific_iterator;
326 
327  public:
328 
329  enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) :
330  my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
331 
333  enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
334 
335  template<typename U>
336  enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
337  my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
338 
339  enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
340  return enumerable_thread_specific_iterator(*my_container, my_index + offset);
341  }
342 
343  enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
344  my_index += offset;
345  my_value = NULL;
346  return *this;
347  }
348 
349  enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
350  return enumerable_thread_specific_iterator( *my_container, my_index-offset );
351  }
352 
353  enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
354  my_index -= offset;
355  my_value = NULL;
356  return *this;
357  }
358 
359  Value& operator*() const {
360  Value* value = my_value;
361  if( !value ) {
362  value = my_value = (*my_container)[my_index].value();
363  }
364  __TBB_ASSERT( value==(*my_container)[my_index].value(), "corrupt cache" );
365  return *value;
366  }
367 
368  Value& operator[]( ptrdiff_t k ) const {
369  return (*my_container)[my_index + k].value;
370  }
371 
372  Value* operator->() const {return &operator*();}
373 
374  enumerable_thread_specific_iterator& operator++() {
375  ++my_index;
376  my_value = NULL;
377  return *this;
378  }
379 
380  enumerable_thread_specific_iterator& operator--() {
381  --my_index;
382  my_value = NULL;
383  return *this;
384  }
385 
387  enumerable_thread_specific_iterator operator++(int) {
388  enumerable_thread_specific_iterator result = *this;
389  ++my_index;
390  my_value = NULL;
391  return result;
392  }
393 
395  enumerable_thread_specific_iterator operator--(int) {
396  enumerable_thread_specific_iterator result = *this;
397  --my_index;
398  my_value = NULL;
399  return result;
400  }
401 
402  // STL support
403  typedef ptrdiff_t difference_type;
404  typedef Value value_type;
405  typedef Value* pointer;
406  typedef Value& reference;
407  typedef std::random_access_iterator_tag iterator_category;
408  };
409 
410  template<typename Container, typename T>
411  enumerable_thread_specific_iterator<Container,T>
412  operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator<Container,T>& v ) {
413  return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
414  }
415 
416  template<typename Container, typename T, typename U>
417  bool operator==( const enumerable_thread_specific_iterator<Container,T>& i,
418  const enumerable_thread_specific_iterator<Container,U>& j ) {
419  return i.my_index==j.my_index && i.my_container == j.my_container;
420  }
421 
422  template<typename Container, typename T, typename U>
423  bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i,
424  const enumerable_thread_specific_iterator<Container,U>& j ) {
425  return !(i==j);
426  }
427 
428  template<typename Container, typename T, typename U>
429  bool operator<( const enumerable_thread_specific_iterator<Container,T>& i,
430  const enumerable_thread_specific_iterator<Container,U>& j ) {
431  return i.my_index<j.my_index;
432  }
433 
434  template<typename Container, typename T, typename U>
435  bool operator>( const enumerable_thread_specific_iterator<Container,T>& i,
436  const enumerable_thread_specific_iterator<Container,U>& j ) {
437  return j<i;
438  }
439 
440  template<typename Container, typename T, typename U>
441  bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i,
442  const enumerable_thread_specific_iterator<Container,U>& j ) {
443  return !(i<j);
444  }
445 
446  template<typename Container, typename T, typename U>
447  bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i,
448  const enumerable_thread_specific_iterator<Container,U>& j ) {
449  return !(j<i);
450  }
451 
452  template<typename Container, typename T, typename U>
453  ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i,
454  const enumerable_thread_specific_iterator<Container,U>& j ) {
455  return i.my_index-j.my_index;
456  }
457 
458  template<typename SegmentedContainer, typename Value >
459  class segmented_iterator
460 #if defined(_WIN64) && defined(_MSC_VER)
461  : public std::iterator<std::input_iterator_tag, Value>
462 #endif
463  {
464  template<typename C, typename T, typename U>
465  friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
466 
467  template<typename C, typename T, typename U>
468  friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
469 
470  template<typename C, typename U>
471  friend class segmented_iterator;
472 
473  public:
474 
475  segmented_iterator() {my_segcont = NULL;}
476 
477  segmented_iterator( const SegmentedContainer& _segmented_container ) :
478  my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
479  outer_iter(my_segcont->end()) { }
480 
481  ~segmented_iterator() {}
482 
483  typedef typename SegmentedContainer::iterator outer_iterator;
484  typedef typename SegmentedContainer::value_type InnerContainer;
485  typedef typename InnerContainer::iterator inner_iterator;
486 
487  // STL support
488  typedef ptrdiff_t difference_type;
489  typedef Value value_type;
490  typedef typename SegmentedContainer::size_type size_type;
491  typedef Value* pointer;
492  typedef Value& reference;
493  typedef std::input_iterator_tag iterator_category;
494 
495  // Copy Constructor
496  template<typename U>
497  segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
498  my_segcont(other.my_segcont),
499  outer_iter(other.outer_iter),
500  // can we assign a default-constructed iterator to inner if we're at the end?
501  inner_iter(other.inner_iter)
502  {}
503 
504  // assignment
505  template<typename U>
506  segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
507  if(this != &other) {
508  my_segcont = other.my_segcont;
509  outer_iter = other.outer_iter;
510  if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
511  }
512  return *this;
513  }
514 
515  // allow assignment of outer iterator to segmented iterator. Once it is
516  // assigned, move forward until a non-empty inner container is found or
517  // the end of the outer container is reached.
518  segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
519  __TBB_ASSERT(my_segcont != NULL, NULL);
520  // check that this iterator points to something inside the segmented container
521  for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
522  if( !outer_iter->empty() ) {
523  inner_iter = outer_iter->begin();
524  break;
525  }
526  }
527  return *this;
528  }
529 
530  // pre-increment
531  segmented_iterator& operator++() {
532  advance_me();
533  return *this;
534  }
535 
536  // post-increment
537  segmented_iterator operator++(int) {
538  segmented_iterator tmp = *this;
539  operator++();
540  return tmp;
541  }
542 
543  bool operator==(const outer_iterator& other_outer) const {
544  __TBB_ASSERT(my_segcont != NULL, NULL);
545  return (outer_iter == other_outer &&
546  (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
547  }
548 
549  bool operator!=(const outer_iterator& other_outer) const {
550  return !operator==(other_outer);
551 
552  }
553 
554  // (i)* RHS
555  reference operator*() const {
556  __TBB_ASSERT(my_segcont != NULL, NULL);
557  __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
558  __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
559  return *inner_iter;
560  }
561 
562  // i->
563  pointer operator->() const { return &operator*();}
564 
565  private:
566  SegmentedContainer* my_segcont;
567  outer_iterator outer_iter;
568  inner_iterator inner_iter;
569 
570  void advance_me() {
571  __TBB_ASSERT(my_segcont != NULL, NULL);
572  __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
573  __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
574  ++inner_iter;
575  while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
576  inner_iter = outer_iter->begin();
577  }
578  }
579  }; // segmented_iterator
580 
581  template<typename SegmentedContainer, typename T, typename U>
582  bool operator==( const segmented_iterator<SegmentedContainer,T>& i,
583  const segmented_iterator<SegmentedContainer,U>& j ) {
584  if(i.my_segcont != j.my_segcont) return false;
585  if(i.my_segcont == NULL) return true;
586  if(i.outer_iter != j.outer_iter) return false;
587  if(i.outer_iter == i.my_segcont->end()) return true;
588  return i.inner_iter == j.inner_iter;
589  }
590 
591  // !=
592  template<typename SegmentedContainer, typename T, typename U>
593  bool operator!=( const segmented_iterator<SegmentedContainer,T>& i,
594  const segmented_iterator<SegmentedContainer,U>& j ) {
595  return !(i==j);
596  }
597 
598  template<typename T>
599  struct construct_by_default: tbb::internal::no_assign {
600  void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization.
601  construct_by_default( int ) {}
602  };
603 
604  template<typename T>
605  struct construct_by_exemplar: tbb::internal::no_assign {
606  const T exemplar;
607  void construct(void*where) {new(where) T(exemplar);}
608  construct_by_exemplar( const T& t ) : exemplar(t) {}
609 #if __TBB_ETS_USE_CPP11
610  construct_by_exemplar( T&& t ) : exemplar(std::move(t)) {}
611 #endif
612  };
613 
614  template<typename T, typename Finit>
615  struct construct_by_finit: tbb::internal::no_assign {
616  Finit f;
617  void construct(void* where) {new(where) T(f());}
618  construct_by_finit( const Finit& f_ ) : f(f_) {}
619 #if __TBB_ETS_USE_CPP11
620  construct_by_finit( Finit&& f_ ) : f(std::move(f_)) {}
621 #endif
622  };
623 
624 #if __TBB_ETS_USE_CPP11
625  template<typename T, typename... P>
626  struct construct_by_args: tbb::internal::no_assign {
627  internal::stored_pack<P...> pack;
628  void construct(void* where) {
629  internal::call( [where](const typename strip<P>::type&... args ){
630  new(where) T(args...);
631  }, pack );
632  }
633  construct_by_args( P&& ... args ) : pack(std::forward<P>(args)...) {}
634  };
635 #endif
636 
637  // storage for initialization function pointer
638  // TODO: consider removing the template parameter T here and in callback_leaf
639  template<typename T>
640  class callback_base {
641  public:
642  // Clone *this
643  virtual callback_base* clone() const = 0;
644  // Destruct and free *this
645  virtual void destroy() = 0;
646  // Need virtual destructor to satisfy GCC compiler warning
647  virtual ~callback_base() { }
648  // Construct T at where
649  virtual void construct(void* where) = 0;
650  };
651 
652  template <typename T, typename Constructor>
653  class callback_leaf: public callback_base<T>, Constructor {
654 #if __TBB_ETS_USE_CPP11
655  template<typename... P> callback_leaf( P&& ... params ) : Constructor(std::forward<P>(params)...) {}
656 #else
657  template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
658 #endif
659  // TODO: make the construction/destruction consistent (use allocator.construct/destroy)
660  typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
661 
662  callback_base<T>* clone() const __TBB_override {
663  return make(*this);
664  }
665 
666  void destroy() __TBB_override {
667  my_allocator_type().destroy(this);
668  my_allocator_type().deallocate(this,1);
669  }
670 
671  void construct(void* where) __TBB_override {
672  Constructor::construct(where);
673  }
674  public:
675 #if __TBB_ETS_USE_CPP11
676  template<typename... P>
677  static callback_base<T>* make( P&& ... params ) {
678  void* where = my_allocator_type().allocate(1);
679  return new(where) callback_leaf( std::forward<P>(params)... );
680  }
681 #else
682  template<typename X>
683  static callback_base<T>* make( const X& x ) {
684  void* where = my_allocator_type().allocate(1);
685  return new(where) callback_leaf(x);
686  }
687 #endif
688  };
689 
691 
699  template<typename U>
700  struct ets_element {
701  tbb::aligned_space<U> my_space;
702  bool is_built;
703  ets_element() { is_built = false; } // not currently-built
704  U* value() { return my_space.begin(); }
705  U* value_committed() { is_built = true; return my_space.begin(); }
706  ~ets_element() {
707  if(is_built) {
708  my_space.begin()->~U();
709  is_built = false;
710  }
711  }
712  };
713 
714  // A predicate that can be used for a compile-time compatibility check of ETS instances
715  // Ideally, it should have been declared inside the ETS class, but unfortunately
716  // in that case VS2013 does not enable the variadic constructor.
717  template<typename T, typename ETS> struct is_compatible_ets { static const bool value = false; };
718  template<typename T, typename U, typename A, ets_key_usage_type C>
719  struct is_compatible_ets< T, enumerable_thread_specific<U,A,C> > { static const bool value = internal::is_same_type<T,U>::value; };
720 
721 #if __TBB_ETS_USE_CPP11
722  // A predicate that checks whether, for a variable 'foo' of type T, foo() is a valid expression
723  template <typename T>
724  class is_callable_no_args {
725  private:
726  typedef char yes[1];
727  typedef char no [2];
728 
729  template<typename U> static yes& decide( decltype(declval<U>()())* );
730  template<typename U> static no& decide(...);
731  public:
732  static const bool value = (sizeof(decide<T>(NULL)) == sizeof(yes));
733  };
734 #endif
735 
736  } // namespace internal
738 
740 
759  template <typename T,
760  typename Allocator=cache_aligned_allocator<T>,
761  ets_key_usage_type ETS_key_type=ets_no_key >
762  class enumerable_thread_specific: internal::ets_base<ETS_key_type> {
763 
764  template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
765 
766  typedef internal::padded< internal::ets_element<T> > padded_element;
767 
769  template<typename I>
771  public:
772  typedef T value_type;
773  typedef T& reference;
774  typedef const T& const_reference;
775  typedef I iterator;
776  typedef ptrdiff_t difference_type;
777  generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
778  template<typename U>
779  generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
781  };
782 
783  typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
785 
786  internal::callback_base<T> *my_construct_callback;
787 
789 
790  // TODO: consider unifying the callback mechanism for all create_local* methods below
791  // (likely non-compatible and requires interface version increase)
793  padded_element& lref = *my_locals.grow_by(1);
794  my_construct_callback->construct(lref.value());
795  return lref.value_committed();
796  }
797 
798  static void* create_local_by_copy( internal::ets_base<ets_no_key>& base, void* p ) {
799  enumerable_thread_specific& ets = static_cast<enumerable_thread_specific&>(base);
800  padded_element& lref = *ets.my_locals.grow_by(1);
801  new(lref.value()) T(*static_cast<T*>(p));
802  return lref.value_committed();
803  }
804 
805 #if __TBB_ETS_USE_CPP11
806  static void* create_local_by_move( internal::ets_base<ets_no_key>& base, void* p ) {
807  enumerable_thread_specific& ets = static_cast<enumerable_thread_specific&>(base);
808  padded_element& lref = *ets.my_locals.grow_by(1);
809  new(lref.value()) T(std::move(*static_cast<T*>(p)));
810  return lref.value_committed();
811  }
812 #endif
813 
814  typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
815 
816  // _size is in bytes
817  void* create_array(size_t _size) __TBB_override {
818  size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
819  return array_allocator_type().allocate(nelements);
820  }
821 
822  void free_array( void* _ptr, size_t _size) __TBB_override {
823  size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
824  array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
825  }
826 
827  public:
828 
830  typedef Allocator allocator_type;
831  typedef T value_type;
832  typedef T& reference;
833  typedef const T& const_reference;
834  typedef T* pointer;
835  typedef const T* const_pointer;
838 
839  // Iterator types
840  typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
841  typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
842 
843  // Parallel range types
844  typedef generic_range_type< iterator > range_type;
845  typedef generic_range_type< const_iterator > const_range_type;
846 
848  enumerable_thread_specific() : my_construct_callback(
849  internal::callback_leaf<T,internal::construct_by_default<T> >::make(/*dummy argument*/0)
850  ){}
851 
853  template <typename Finit
854 #if __TBB_ETS_USE_CPP11
856 #endif
857  >
858  explicit enumerable_thread_specific( Finit finit ) : my_construct_callback(
859  internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( tbb::internal::move(finit) )
860  ){}
861 
863  explicit enumerable_thread_specific( const T& exemplar ) : my_construct_callback(
864  internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar )
865  ){}
866 
867 #if __TBB_ETS_USE_CPP11
868  explicit enumerable_thread_specific( T&& exemplar ) : my_construct_callback(
869  internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( std::move(exemplar) )
870  ){}
871 
873  template <typename P1, typename... P,
877  >::type>
878  enumerable_thread_specific( P1&& arg1, P&& ... args ) : my_construct_callback(
879  internal::callback_leaf<T,internal::construct_by_args<T,P1,P...> >::make( std::forward<P1>(arg1), std::forward<P>(args)... )
880  ){}
881 #endif
882 
885  if(my_construct_callback) my_construct_callback->destroy();
886  // Deallocate the hash table before overridden free_array() becomes inaccessible
887  this->internal::ets_base<ets_no_key>::table_clear();
888  }
889 
892  bool exists;
893  return local(exists);
894  }
895 
897  reference local(bool& exists) {
898  void* ptr = this->table_lookup(exists);
899  return *(T*)ptr;
900  }
901 
903  size_type size() const { return my_locals.size(); }
904 
906  bool empty() const { return my_locals.empty(); }
907 
909  iterator begin() { return iterator( my_locals, 0 ); }
911  iterator end() { return iterator(my_locals, my_locals.size() ); }
912 
914  const_iterator begin() const { return const_iterator(my_locals, 0); }
915 
917  const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
918 
920  range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); }
921 
923  const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
924 
926  void clear() {
927  my_locals.clear();
928  this->table_clear();
929  // callback is not destroyed
930  }
931 
932  private:
933 
934  template<typename A2, ets_key_usage_type C2>
936 #if __TBB_ETS_USE_CPP11 && TBB_USE_ASSERT
937  // this tests is_compatible_ets
938  __TBB_STATIC_ASSERT( (internal::is_compatible_ets<T, typename internal::strip<decltype(other)>::type>::value), "is_compatible_ets fails" );
939 #endif
940  // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.
941  my_construct_callback = other.my_construct_callback->clone();
942  __TBB_ASSERT(my_locals.size()==0,NULL);
943  my_locals.reserve(other.size());
944  this->table_elementwise_copy( other, create_local_by_copy );
945  }
946 
948  using std::swap;
949  __TBB_ASSERT( this!=&other, NULL );
950  swap(my_construct_callback, other.my_construct_callback);
951  // concurrent_vector::swap() preserves storage space,
952  // so addresses to the vector kept in ETS hash table remain valid.
953  swap(my_locals, other.my_locals);
954  this->internal::ets_base<ETS_key_type>::table_swap(other);
955  }
956 
957 #if __TBB_ETS_USE_CPP11
958  template<typename A2, ets_key_usage_type C2>
960 #if TBB_USE_ASSERT
961  // this tests is_compatible_ets
962  __TBB_STATIC_ASSERT( (internal::is_compatible_ets<T, typename internal::strip<decltype(other)>::type>::value), "is_compatible_ets fails" );
963 #endif
964  my_construct_callback = other.my_construct_callback;
965  other.my_construct_callback = NULL;
966  __TBB_ASSERT(my_locals.size()==0,NULL);
967  my_locals.reserve(other.size());
968  this->table_elementwise_copy( other, create_local_by_move );
969  }
970 #endif
971 
972  public:
973 
975  : internal::ets_base<ETS_key_type>() /* prevents GCC warnings with -Wextra */
976  {
977  internal_copy(other);
978  }
979 
980  template<typename Alloc, ets_key_usage_type Cachetype>
982  {
983  internal_copy(other);
984  }
985 
986 #if __TBB_ETS_USE_CPP11
987  enumerable_thread_specific( enumerable_thread_specific&& other ) : my_construct_callback()
988  {
989  internal_swap(other);
990  }
991 
992  template<typename Alloc, ets_key_usage_type Cachetype>
994  {
995  internal_move(std::move(other));
996  }
997 #endif
998 
1000  {
1001  if( this != &other ) {
1002  this->clear();
1003  my_construct_callback->destroy();
1004  internal_copy( other );
1005  }
1006  return *this;
1007  }
1008 
1009  template<typename Alloc, ets_key_usage_type Cachetype>
1011  {
1012  __TBB_ASSERT( static_cast<void*>(this)!=static_cast<const void*>(&other), NULL ); // Objects of different types
1013  this->clear();
1014  my_construct_callback->destroy();
1015  internal_copy(other);
1016  return *this;
1017  }
1018 
1019 #if __TBB_ETS_USE_CPP11
1021  {
1022  if( this != &other )
1023  internal_swap(other);
1024  return *this;
1025  }
1026 
1027  template<typename Alloc, ets_key_usage_type Cachetype>
1029  {
1030  __TBB_ASSERT( static_cast<void*>(this)!=static_cast<const void*>(&other), NULL ); // Objects of different types
1031  this->clear();
1032  my_construct_callback->destroy();
1033  internal_move(std::move(other));
1034  return *this;
1035  }
1036 #endif
1037 
1038  // combine_func_t has signature T(T,T) or T(const T&, const T&)
1039  template <typename combine_func_t>
1040  T combine(combine_func_t f_combine) {
1041  if(begin() == end()) {
1042  internal::ets_element<T> location;
1043  my_construct_callback->construct(location.value());
1044  return *location.value_committed();
1045  }
1046  const_iterator ci = begin();
1047  T my_result = *ci;
1048  while(++ci != end())
1049  my_result = f_combine( my_result, *ci );
1050  return my_result;
1051  }
1052 
1053  // combine_func_t takes T by value or by [const] reference, and returns nothing
1054  template <typename combine_func_t>
1055  void combine_each(combine_func_t f_combine) {
1056  for(iterator ci = begin(); ci != end(); ++ci) {
1057  f_combine( *ci );
1058  }
1059  }
1060 
1061  }; // enumerable_thread_specific
1062 
1063  template< typename Container >
1064  class flattened2d {
1065 
1066  // This intermediate typedef is to address issues with VC7.1 compilers
1067  typedef typename Container::value_type conval_type;
1068 
1069  public:
1070 
1072  typedef typename conval_type::size_type size_type;
1073  typedef typename conval_type::difference_type difference_type;
1074  typedef typename conval_type::allocator_type allocator_type;
1075  typedef typename conval_type::value_type value_type;
1076  typedef typename conval_type::reference reference;
1077  typedef typename conval_type::const_reference const_reference;
1078  typedef typename conval_type::pointer pointer;
1079  typedef typename conval_type::const_pointer const_pointer;
1080 
1081  typedef typename internal::segmented_iterator<Container, value_type> iterator;
1082  typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
1083 
1084  flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) :
1085  my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
1086 
1087  explicit flattened2d( const Container &c ) :
1088  my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
1089 
1090  iterator begin() { return iterator(*my_container) = my_begin; }
1091  iterator end() { return iterator(*my_container) = my_end; }
1092  const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
1093  const_iterator end() const { return const_iterator(*my_container) = my_end; }
1094 
1095  size_type size() const {
1096  size_type tot_size = 0;
1097  for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
1098  tot_size += i->size();
1099  }
1100  return tot_size;
1101  }
1102 
1103  private:
1104 
1105  Container *my_container;
1106  typename Container::const_iterator my_begin;
1107  typename Container::const_iterator my_end;
1108 
1109  };
1110 
1111  template <typename Container>
1112  flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
1113  return flattened2d<Container>(c, b, e);
1114  }
1115 
1116  template <typename Container>
1117  flattened2d<Container> flatten2d(const Container &c) {
1118  return flattened2d<Container>(c);
1119  }
1120 
1121 } // interface6
1122 
1123 namespace internal {
1124 using interface6::internal::segmented_iterator;
1125 }
1126 
1127 using interface6::enumerable_thread_specific;
1128 using interface6::flattened2d;
1129 using interface6::flatten2d;
1130 
1131 } // namespace tbb
1132 
1133 #endif
internal_collection_type::difference_type difference_type
Allocator::template rebind< uintptr_t >::other array_allocator_type
static void * create_local_by_move(internal::ets_base< ets_no_key > &base, void *p)
enumerable_thread_specific & operator=(const enumerable_thread_specific &other)
const_iterator end() const
end const iterator
static void * create_local_by_copy(internal::ets_base< ets_no_key > &base, void *p)
enumerable_thread_specific & operator=(const enumerable_thread_specific< T, Alloc, Cachetype > &other)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
#define __TBB_override
Definition: tbb_stddef.h:240
bool operator==(const memory_pool_allocator< T, P > &a, const memory_pool_allocator< U, P > &b)
Definition: memory_pool.h:180
enumerable_thread_specific()
Default constructor. Each local instance of T is default constructed.
internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator
vector_iterator< Container, T > operator+(ptrdiff_t offset, const vector_iterator< Container, T > &v)
bool operator>=(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
bool empty() const
Return false if vector is not empty or has elements under construction at least.
range_type range(size_t grainsize=1)
Get range for parallel algorithms.
void const char const char int ITT_FORMAT __itt_group_sync s
conval_type::const_pointer const_pointer
enumerable_thread_specific & operator=(enumerable_thread_specific &&other)
internal::padded< internal::ets_element< T > > padded_element
void free_array(void *_ptr, size_t _size) __TBB_override
Allocator::template rebind< padded_element >::other padded_allocator_type
void call(F &&f, Pack &&p)
Calls the given function with arguments taken from a stored_pack.
The graph class.
size_type size() const
Return size of vector. It may include elements under construction.
enumerable_thread_specific(const enumerable_thread_specific &other)
iterator grow_by(size_type delta)
Grow by "delta" elements.
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:532
const_range_type range(size_t grainsize=1) const
Get const range for parallel algorithms.
enumerable_thread_specific(const enumerable_thread_specific< T, Alloc, Cachetype > &other)
internal::concurrent_vector_base_v3::size_type size_type
reference local()
returns reference to local, discarding exists
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:331
size_type size() const
Get the number of local copies.
enumerable_thread_specific(const T &exemplar)
Constructor with exemplar. Each local instance of T is copy-constructed from the exemplar.
conval_type::allocator_type allocator_type
void clear()
Clear container while keeping memory allocated.
The enumerable_thread_specific container.
void internal_swap(enumerable_thread_specific &other)
generic_range_type< const_iterator > const_range_type
T * begin() const
Pointer to beginning of array.
Definition: aligned_space.h:35
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
Block of space aligned sufficiently to construct an array T with N elements.
Definition: aligned_space.h:29
bool operator!=(const memory_pool_allocator< T, P > &a, const memory_pool_allocator< U, P > &b)
Definition: memory_pool.h:183
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
Definition: tbb_allocator.h:58
enumerable_thread_specific & operator=(enumerable_thread_specific< T, Alloc, Cachetype > &&other)
internal::segmented_iterator< Container, value_type > iterator
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
void reserve(size_type n)
Allocate enough space to grow to size n without having to allocate more memory later.
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:305
bool empty() const
true if there have been no local copies created
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
internal::segmented_iterator< Container, const value_type > const_iterator
tick_count::interval_t operator-(const tick_count &t1, const tick_count &t0)
Definition: tick_count.h:126
enumerable_thread_specific(enumerable_thread_specific &&other)
void * create_array(size_t _size) __TBB_override
ets_key_usage_type
enum for selecting between single key and key-per-instance versions
conval_type::size_type size_type
Basic types.
bool operator<=(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
reference local(bool &exists)
Returns reference to calling thread's local copy, creating one if necessary.
A range over which to iterate.
Definition: blocked_range.h:45
void internal_copy(const enumerable_thread_specific< T, A2, C2 > &other)
bool operator<(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
Base class for types that should not be assigned.
Definition: tbb_stddef.h:320
const_iterator begin() const
begin const iterator
bool operator>(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
void call_itt_notify(notify_type, void *)
void internal_move(enumerable_thread_specific< T, A2, C2 > &&other)
flattened2d(const Container &c, typename Container::const_iterator b, typename Container::const_iterator e)
flattened2d< Container > flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e)
enumerable_thread_specific(P1 &&arg1, P &&... args)
Variadic constructor with initializer arguments. Each local instance of T is constructed by T(args....
Identifiers declared inside namespace internal should never be used directly by client code.
Definition: atomic.h:51
enumerable_thread_specific(enumerable_thread_specific< T, Alloc, Cachetype > &&other)
tbb_thread::id get_id()
Definition: tbb_thread.h:317
conval_type::difference_type difference_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
A generic range, used to create range objects from the iterators.
void const char const char int ITT_FORMAT __itt_group_sync p
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:395
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
conval_type::const_reference const_reference
enumerable_thread_specific(Finit finit)
Constructor with initializer functor. Each local instance of T is constructed by T(finit()).
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition: atomic.h:535
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.