Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
atomic.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_atomic_H
18 #define __TBB_atomic_H
19 
20 #include <cstddef>
21 
22 #if _MSC_VER
23 #define __TBB_LONG_LONG __int64
24 #else
25 #define __TBB_LONG_LONG long long
26 #endif /* _MSC_VER */
27 
28 #include "tbb_machine.h"
29 
30 #if _MSC_VER && !__INTEL_COMPILER
31  // Suppress overzealous compiler warnings till the end of the file
32  #pragma warning (push)
33  #pragma warning (disable: 4244 4267 4512)
34 #endif
35 
36 namespace tbb {
37 
48 };
49 
51 namespace internal {
52 
53 #if __TBB_ALIGNAS_PRESENT
54  #define __TBB_DECL_ATOMIC_FIELD(t,f,a) alignas(a) t f;
55 #elif __TBB_ATTRIBUTE_ALIGNED_PRESENT
56  #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a)));
57 #elif __TBB_DECLSPEC_ALIGN_PRESENT
58  #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
59 #else
60  #error Do not know syntax for forcing alignment.
61 #endif
62 
63 template<size_t S>
64 struct atomic_rep; // Primary template declared, but never defined.
65 
66 template<>
67 struct atomic_rep<1> { // Specialization
68  typedef int8_t word;
69 };
70 template<>
71 struct atomic_rep<2> { // Specialization
72  typedef int16_t word;
73 };
74 template<>
75 struct atomic_rep<4> { // Specialization
76 #if _MSC_VER && !_WIN64
77  // Work-around that avoids spurious /Wp64 warnings
78  typedef intptr_t word;
79 #else
80  typedef int32_t word;
81 #endif
82 };
83 #if __TBB_64BIT_ATOMICS
84 template<>
85 struct atomic_rep<8> { // Specialization
86  typedef int64_t word;
87 };
88 #endif
89 
90 template<typename value_type, size_t size>
92 
93 //the specializations are needed to please MSVC syntax of __declspec(align()) which accept _literal_ constants only
94 #if __TBB_ATOMIC_CTORS
95  #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \
96  template<typename value_type> \
97  struct aligned_storage<value_type,S> { \
98  __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \
99  aligned_storage() = default ; \
100  constexpr aligned_storage(value_type value):my_value(value){} \
101  }; \
102 
103 #else
104  #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \
105  template<typename value_type> \
106  struct aligned_storage<value_type,S> { \
107  __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \
108  }; \
109 
110 #endif
111 
112 template<typename value_type>
113 struct aligned_storage<value_type,1> {
114  value_type my_value;
115 #if __TBB_ATOMIC_CTORS
116  aligned_storage() = default ;
117  constexpr aligned_storage(value_type value):my_value(value){}
118 #endif
119 };
120 
123 #if __TBB_64BIT_ATOMICS
125 #endif
126 
127 template<size_t Size, memory_semantics M>
128 struct atomic_traits; // Primary template declared, but not defined.
129 
130 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \
131  template<> struct atomic_traits<S,M> { \
132  typedef atomic_rep<S>::word word; \
133  inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \
134  return __TBB_machine_cmpswp##S##M(location,new_value,comparand); \
135  } \
136  inline static word fetch_and_add( volatile void* location, word addend ) { \
137  return __TBB_machine_fetchadd##S##M(location,addend); \
138  } \
139  inline static word fetch_and_store( volatile void* location, word value ) { \
140  return __TBB_machine_fetchstore##S##M(location,value); \
141  } \
142  };
143 
144 #define __TBB_DECL_ATOMIC_PRIMITIVES(S) \
145  template<memory_semantics M> \
146  struct atomic_traits<S,M> { \
147  typedef atomic_rep<S>::word word; \
148  inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \
149  return __TBB_machine_cmpswp##S(location,new_value,comparand); \
150  } \
151  inline static word fetch_and_add( volatile void* location, word addend ) { \
152  return __TBB_machine_fetchadd##S(location,addend); \
153  } \
154  inline static word fetch_and_store( volatile void* location, word value ) { \
155  return __TBB_machine_fetchstore##S(location,value); \
156  } \
157  };
158 
159 template<memory_semantics M>
160 struct atomic_load_store_traits; // Primary template declaration
161 
162 #define __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(M) \
163  template<> struct atomic_load_store_traits<M> { \
164  template <typename T> \
165  inline static T load( const volatile T& location ) { \
166  return __TBB_load_##M( location ); \
167  } \
168  template <typename T> \
169  inline static void store( volatile T& location, T value ) { \
170  __TBB_store_##M( location, value ); \
171  } \
172  }
173 
174 #if __TBB_USE_FENCED_ATOMICS
187 #if __TBB_64BIT_ATOMICS
192 #endif
193 #else /* !__TBB_USE_FENCED_ATOMICS */
197 #if __TBB_64BIT_ATOMICS
199 #endif
200 #endif /* !__TBB_USE_FENCED_ATOMICS */
201 
206 
208 
210 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
211 
213 
215 template<typename T>
216 struct atomic_impl {
217 protected:
219 private:
220  //TODO: rechecks on recent versions of gcc if union is still the _only_ way to do a conversion without warnings
222  template<typename value_type>
223  union converter {
224  typedef typename atomic_rep<sizeof(value_type)>::word bits_type;
226  converter(value_type a_value) : value(a_value) {}
229  };
230 
231  template<typename value_t>
232  static typename converter<value_t>::bits_type to_bits(value_t value){
233  return converter<value_t>(value).bits;
234  }
235  template<typename value_t>
236  static value_t to_value(typename converter<value_t>::bits_type bits){
237  converter<value_t> u;
238  u.bits = bits;
239  return u.value;
240  }
241 
242  template<typename value_t>
243  union ptr_converter; //Primary template declared, but never defined.
244 
245  template<typename value_t>
246  union ptr_converter<value_t *> {
248  ptr_converter(value_t* a_value) : value(a_value) {}
249  value_t* value;
250  uintptr_t bits;
251  };
252  //TODO: check if making to_bits accepting reference (thus unifying it with to_bits_ref)
253  //does not hurt performance
254  template<typename value_t>
255  static typename converter<value_t>::bits_type & to_bits_ref(value_t& value){
256  //TODO: this #ifdef is temporary workaround, as union conversion seems to fail
257  //on suncc for 64 bit types for 32 bit target
258  #if !__SUNPRO_CC
259  return *(typename converter<value_t>::bits_type*)ptr_converter<value_t*>(&value).bits;
260  #else
261  return *(typename converter<value_t>::bits_type*)(&value);
262  #endif
263  }
264 
265 
266 public:
267  typedef T value_type;
268 
269 #if __TBB_ATOMIC_CTORS
270  atomic_impl() = default ;
272 #endif
273  template<memory_semantics M>
275  return to_value<value_type>(
276  internal::atomic_traits<sizeof(value_type),M>::fetch_and_store( &my_storage.my_value, to_bits(value) )
277  );
278  }
279 
281  return fetch_and_store<full_fence>(value);
282  }
283 
284  template<memory_semantics M>
286  return to_value<value_type>(
287  internal::atomic_traits<sizeof(value_type),M>::compare_and_swap( &my_storage.my_value, to_bits(value), to_bits(comparand) )
288  );
289  }
290 
292  return compare_and_swap<full_fence>(value,comparand);
293  }
294 
295  operator value_type() const volatile { // volatile qualifier here for backwards compatibility
296  return to_value<value_type>(
298  );
299  }
300 
301  template<memory_semantics M>
302  value_type load () const {
303  return to_value<value_type>(
305  );
306  }
307 
308  value_type load () const {
309  return load<acquire>();
310  }
311 
312  template<memory_semantics M>
313  void store ( value_type value ) {
315  }
316 
317  void store ( value_type value ) {
318  store<release>( value );
319  }
320 
321 protected:
323  //TODO: unify with store<release>
325  return rhs;
326  }
327 };
328 
330 
333 template<typename I, typename D, typename StepType>
335 public:
336  typedef I value_type;
337 #if __TBB_ATOMIC_CTORS
338  atomic_impl_with_arithmetic() = default ;
340 #endif
341  template<memory_semantics M>
342  value_type fetch_and_add( D addend ) {
343  return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->my_storage.my_value, addend*sizeof(StepType) ));
344  }
345 
346  value_type fetch_and_add( D addend ) {
347  return fetch_and_add<full_fence>(addend);
348  }
349 
350  template<memory_semantics M>
352  return fetch_and_add<M>(1);
353  }
354 
356  return fetch_and_add(1);
357  }
358 
359  template<memory_semantics M>
361  return fetch_and_add<M>(__TBB_MINUS_ONE(D));
362  }
363 
365  return fetch_and_add(__TBB_MINUS_ONE(D));
366  }
367 
368 public:
370  return fetch_and_add(value)+value;
371  }
372 
374  // Additive inverse of value computed using binary minus,
375  // instead of unary minus, for sake of avoiding compiler warnings.
376  return operator+=(D(0)-value);
377  }
378 
380  return fetch_and_add(1)+1;
381  }
382 
384  return fetch_and_add(__TBB_MINUS_ONE(D))-1;
385  }
386 
388  return fetch_and_add(1);
389  }
390 
392  return fetch_and_add(__TBB_MINUS_ONE(D));
393  }
394 };
395 
396 } /* Internal */
398 
400 
402 template<typename T>
403 struct atomic: internal::atomic_impl<T> {
404 #if __TBB_ATOMIC_CTORS
405  atomic() = default;
406  constexpr atomic(T arg): internal::atomic_impl<T>(arg) {}
407 #endif
408  T operator=( T rhs ) {
409  // "this" required here in strict ISO C++ because store_with_release is a dependent name
410  return this->store_with_release(rhs);
411  }
412  atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}
413 };
414 
415 #if __TBB_ATOMIC_CTORS
416  #define __TBB_DECL_ATOMIC(T) \
417  template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
418  atomic() = default; \
419  constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,char>(arg) {} \
420  \
421  T operator=( T rhs ) {return store_with_release(rhs);} \
422  atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
423  };
424 #else
425  #define __TBB_DECL_ATOMIC(T) \
426  template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
427  T operator=( T rhs ) {return store_with_release(rhs);} \
428  atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
429  };
430 #endif
431 
432 #if __TBB_64BIT_ATOMICS
433 //TODO: consider adding non-default (and atomic) copy constructor for 32bit platform
436 #else
437 // test_atomic will verify that sizeof(long long)==8
438 #endif
439 __TBB_DECL_ATOMIC(long)
440 __TBB_DECL_ATOMIC(unsigned long)
441 
442 #if _MSC_VER && !_WIN64
443 #if __TBB_ATOMIC_CTORS
444 /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option.
445  It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T)
446  with an operator=(U) that explicitly converts the U to a T. Types T and U should be
447  type synonyms on the platform. Type U should be the wider variant of T from the
448  perspective of /Wp64. */
449 #define __TBB_DECL_ATOMIC_ALT(T,U) \
450  template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
451  atomic() = default ; \
452  constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,char>(arg) {} \
453  T operator=( U rhs ) {return store_with_release(T(rhs));} \
454  atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
455  };
456 #else
457 #define __TBB_DECL_ATOMIC_ALT(T,U) \
458  template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
459  T operator=( U rhs ) {return store_with_release(T(rhs));} \
460  atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
461  };
462 #endif
463 __TBB_DECL_ATOMIC_ALT(unsigned,size_t)
464 __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
465 #else
466 __TBB_DECL_ATOMIC(unsigned)
468 #endif /* _MSC_VER && !_WIN64 */
469 
470 __TBB_DECL_ATOMIC(unsigned short)
471 __TBB_DECL_ATOMIC(short)
472 __TBB_DECL_ATOMIC(char)
473 __TBB_DECL_ATOMIC(signed char)
474 __TBB_DECL_ATOMIC(unsigned char)
475 
476 #if !_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED)
477 __TBB_DECL_ATOMIC(wchar_t)
478 #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
479 
481 template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
482 #if __TBB_ATOMIC_CTORS
483  atomic() = default ;
484  constexpr atomic(T* arg): internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T>(arg) {}
485 #endif
486  T* operator=( T* rhs ) {
487  // "this" required here in strict ISO C++ because store_with_release is a dependent name
488  return this->store_with_release(rhs);
489  }
490  atomic<T*>& operator=( const atomic<T*>& rhs ) {
491  this->store_with_release(rhs); return *this;
492  }
493  T* operator->() const {
494  return (*this);
495  }
496 };
497 
499 template<> struct atomic<void*>: internal::atomic_impl<void*> {
500 #if __TBB_ATOMIC_CTORS
501  atomic() = default ;
502  constexpr atomic(void* arg): internal::atomic_impl<void*>(arg) {}
503 #endif
504  void* operator=( void* rhs ) {
505  // "this" required here in strict ISO C++ because store_with_release is a dependent name
506  return this->store_with_release(rhs);
507  }
509  this->store_with_release(rhs); return *this;
510  }
511 };
512 
513 // Helpers to workaround ugly syntax of calling template member function of a
514 // template class with template argument dependent on template parameters.
515 
516 template <memory_semantics M, typename T>
517 T load ( const atomic<T>& a ) { return a.template load<M>(); }
518 
519 template <memory_semantics M, typename T>
520 void store ( atomic<T>& a, T value ) { a.template store<M>(value); }
521 
522 namespace interface6{
524 template<typename T>
526  atomic<T> a;
527  store<relaxed>(a,t);
528  return a;
529 }
530 }
532 
533 namespace internal {
534 template<memory_semantics M, typename T >
535 void swap(atomic<T> & lhs, atomic<T> & rhs){
536  T tmp = load<M>(lhs);
537  store<M>(lhs,load<M>(rhs));
538  store<M>(rhs,tmp);
539 }
540 
541 // only to aid in the gradual conversion of ordinary variables to proper atomics
542 template<typename T>
543 inline atomic<T>& as_atomic( T& t ) {
544  return (atomic<T>&)t;
545 }
546 } // namespace tbb::internal
547 
548 } // namespace tbb
549 
550 #if _MSC_VER && !__INTEL_COMPILER
551  #pragma warning (pop)
552 #endif // warnings are restored
553 
554 #endif /* __TBB_atomic_H */
T * operator=(T *rhs)
Definition: atomic.h:486
constexpr aligned_storage(value_type value)
Definition: atomic.h:117
value_type fetch_and_add(D addend)
Definition: atomic.h:346
value_type load() const
Definition: atomic.h:302
value_type load() const
Definition: atomic.h:308
value_type store_with_release(value_type rhs)
Definition: atomic.h:322
value_type compare_and_swap(value_type value, value_type comparand)
Definition: atomic.h:285
Base class that provides basic functionality for atomic<T> without fetch_and_add.
Definition: atomic.h:216
converter(value_type a_value)
Definition: atomic.h:226
constexpr atomic_impl(value_type value)
Definition: atomic.h:271
#define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S)
Definition: atomic.h:95
Primary template for atomic.
Definition: atomic.h:403
The graph class.
T operator=(T rhs)
Definition: atomic.h:408
Release.
Definition: atomic.h:45
aligned_storage< T, sizeof(T)> my_storage
Definition: atomic.h:218
#define __TBB_DECL_ATOMIC(T)
Definition: atomic.h:416
#define __TBB_DECL_ATOMIC_PRIMITIVES(S)
Definition: atomic.h:144
void * operator=(void *rhs)
Definition: atomic.h:504
atomic< T > & as_atomic(T &t)
Definition: atomic.h:543
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
Sequential consistency.
Definition: atomic.h:41
atomic< T > make_atomic(T t)
Make an atomic for use in an initialization (list), as an alternative to zero-initialization or norma...
Definition: atomic.h:525
void store(atomic< T > &a, T value)
Definition: atomic.h:520
#define __TBB_LONG_LONG
Definition: atomic.h:25
__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(full_fence)
Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
Definition: atomic.h:499
Base class that provides basic functionality for atomic<T> with fetch_and_add.
Definition: atomic.h:334
void store(value_type value)
Definition: atomic.h:317
atomic_rep< sizeof(value_type)>::word bits_type
Definition: atomic.h:224
#define __TBB_MINUS_ONE(T)
Additive inverse of 1 for type T.
Definition: atomic.h:210
Union type used to convert type T to underlying integral type.
Definition: atomic.h:223
value_type fetch_and_store(value_type value)
Definition: atomic.h:274
atomic< void * > & operator=(const atomic< void * > &rhs)
Definition: atomic.h:508
static converter< value_t >::bits_type & to_bits_ref(value_t &value)
Definition: atomic.h:255
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:713
atomic< T > & operator=(const atomic< T > &rhs)
Definition: atomic.h:412
void store(value_type value)
Definition: atomic.h:313
value_type compare_and_swap(value_type value, value_type comparand)
Definition: atomic.h:291
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:709
Specialization for atomic<T*> with arithmetic and operator->.
Definition: atomic.h:481
atomic()=default
static value_t to_value(typename converter< value_t >::bits_type bits)
Definition: atomic.h:236
T load(const atomic< T > &a)
Definition: atomic.h:517
constexpr atomic(void *arg)
Definition: atomic.h:502
No ordering.
Definition: atomic.h:47
value_type fetch_and_add(D addend)
Definition: atomic.h:342
static converter< value_t >::bits_type to_bits(value_t value)
Definition: atomic.h:232
constexpr atomic_impl_with_arithmetic(value_type value)
Definition: atomic.h:339
atomic< T * > & operator=(const atomic< T * > &rhs)
Definition: atomic.h:490
constexpr atomic(T *arg)
Definition: atomic.h:484
memory_semantics
Specifies memory semantics.
Definition: atomic.h:39
constexpr atomic(T arg)
Definition: atomic.h:406
#define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S, M)
Definition: atomic.h:130
T * operator->() const
Definition: atomic.h:493
Acquire.
Definition: atomic.h:43
value_type fetch_and_store(value_type value)
Definition: atomic.h:280
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition: atomic.h:535

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.