atomic< T, Allocator >

atomic< T, Allocator >#

stdgpu: stdgpu::atomic< T, Allocator >
stdgpu Latest
Efficient STL-like Data Structures on the GPU

Detailed Description

template<typename T, typename Allocator = safe_device_allocator<T>>
class stdgpu::atomic< T, Allocator >

A class to model an atomic object of type T on the GPU.

Template Parameters
TThe type of the atomically managed object
AllocatorThe allocator type

Supported types:

  • unsigned int
  • int
  • unsigned long long int
  • float (experimental)

Differences to std::atomic:

  • Atomics must be modeled as containers since threads have to operate on the exact same object (which also requires copy and move constructors)
  • Manual allocation and destruction of container required
  • All operations (including load() and store()) may follow stricter ordering than requested
  • Additional min and max functions for all supported integer and floating point types
  • Additional increment/decrement + modulo functions for unsigned int

Public Types

using allocator_type = Allocator
 
using difference_type = value_type
 
using value_type = T
 

Public Member Functions

 atomic () noexcept
 
STDGPU_DEVICE_ONLY bool compare_exchange_strong (T &expected, const T desired, const memory_order order=memory_order_seq_cst) noexcept
 
STDGPU_DEVICE_ONLY bool compare_exchange_weak (T &expected, const T desired, const memory_order order=memory_order_seq_cst) noexcept
 
STDGPU_DEVICE_ONLYexchange (const T desired, const memory_order order=memory_order_seq_cst) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >||std::is_floating_point_v< T >) >
STDGPU_DEVICE_ONLYfetch_add (const T arg, const memory_order order=memory_order_seq_cst) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) >
STDGPU_DEVICE_ONLYfetch_and (const T arg, const memory_order order=memory_order_seq_cst) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_same_v< T, unsigned int >) >
STDGPU_DEVICE_ONLYfetch_dec_mod (const T arg, const memory_order order=memory_order_seq_cst) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_same_v< T, unsigned int >) >
STDGPU_DEVICE_ONLYfetch_inc_mod (const T arg, const memory_order order=memory_order_seq_cst) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >||std::is_floating_point_v< T >) >
STDGPU_DEVICE_ONLYfetch_max (const T arg, const memory_order order=memory_order_seq_cst) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >||std::is_floating_point_v< T >) >
STDGPU_DEVICE_ONLYfetch_min (const T arg, const memory_order order=memory_order_seq_cst) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) >
STDGPU_DEVICE_ONLYfetch_or (const T arg, const memory_order order=memory_order_seq_cst) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >||std::is_floating_point_v< T >) >
STDGPU_DEVICE_ONLYfetch_sub (const T arg, const memory_order order=memory_order_seq_cst) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) >
STDGPU_DEVICE_ONLYfetch_xor (const T arg, const memory_order order=memory_order_seq_cst) noexcept
 
STDGPU_HOST_DEVICE allocator_type get_allocator () const noexcept
 
STDGPU_HOST_DEVICE bool is_lock_free () const noexcept
 
STDGPU_HOST_DEVICEload (const memory_order order=memory_order_seq_cst) const
 
STDGPU_HOST_DEVICE operator T () const
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) >
STDGPU_DEVICE_ONLYoperator&= (const T arg) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) >
STDGPU_DEVICE_ONLYoperator++ () noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) >
STDGPU_DEVICE_ONLYoperator++ (int) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >||std::is_floating_point_v< T >) >
STDGPU_DEVICE_ONLYoperator+= (const T arg) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) >
STDGPU_DEVICE_ONLYoperator-- () noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) >
STDGPU_DEVICE_ONLYoperator-- (int) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >||std::is_floating_point_v< T >) >
STDGPU_DEVICE_ONLYoperator-= (const T arg) noexcept
 
STDGPU_HOST_DEVICEoperator= (const T desired)
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) >
STDGPU_DEVICE_ONLYoperator^= (const T arg) noexcept
 
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) >
STDGPU_DEVICE_ONLYoperator|= (const T arg) noexcept
 
STDGPU_HOST_DEVICE void store (const T desired, const memory_order order=memory_order_seq_cst)
 

Static Public Member Functions

static atomic createDeviceObject (const Allocator &allocator=Allocator())
 
template<typename ExecutionPolicy , STDGPU_DETAIL_OVERLOAD_IF(is_execution_policy_v< remove_cvref_t< ExecutionPolicy > >) >
static atomic createDeviceObject (ExecutionPolicy &&policy, const Allocator &allocator=Allocator())
 
static void destroyDeviceObject (atomic &device_object)
 
template<typename ExecutionPolicy , STDGPU_DETAIL_OVERLOAD_IF(is_execution_policy_v< remove_cvref_t< ExecutionPolicy > >) >
static void destroyDeviceObject (ExecutionPolicy &&policy, atomic &device_object)