atomic< T, Allocator >#
stdgpu Latest
Efficient STL-like Data Structures on the GPU
|
stdgpu::atomic< T, Allocator >
Detailed Description
template<typename T, typename Allocator = safe_device_allocator<T>>
class stdgpu::atomic< T, Allocator >
class stdgpu::atomic< T, Allocator >
A class to model an atomic object of type T on the GPU.
- Template Parameters
-
T The type of the atomically managed object Allocator The allocator type
Supported types:
- unsigned int
- int
- unsigned long long int
- float (experimental)
Differences to std::atomic:
- Atomics must be modeled as containers since threads have to operate on the exact same object (which also requires copy and move constructors)
- Manual allocation and destruction of container required
- All operations (including load() and store()) may follow stricter ordering than requested
- Additional min and max functions for all supported integer and floating point types
- Additional increment/decrement + modulo functions for unsigned int
Public Types | |
using | allocator_type = Allocator |
using | difference_type = value_type |
using | value_type = T |
Public Member Functions | |
atomic () noexcept | |
STDGPU_DEVICE_ONLY bool | compare_exchange_strong (T &expected, const T desired, const memory_order order=memory_order_seq_cst) noexcept |
STDGPU_DEVICE_ONLY bool | compare_exchange_weak (T &expected, const T desired, const memory_order order=memory_order_seq_cst) noexcept |
STDGPU_DEVICE_ONLY T | exchange (const T desired, const memory_order order=memory_order_seq_cst) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >||std::is_floating_point_v< T >) > | |
STDGPU_DEVICE_ONLY T | fetch_add (const T arg, const memory_order order=memory_order_seq_cst) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) > | |
STDGPU_DEVICE_ONLY T | fetch_and (const T arg, const memory_order order=memory_order_seq_cst) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_same_v< T, unsigned int >) > | |
STDGPU_DEVICE_ONLY T | fetch_dec_mod (const T arg, const memory_order order=memory_order_seq_cst) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_same_v< T, unsigned int >) > | |
STDGPU_DEVICE_ONLY T | fetch_inc_mod (const T arg, const memory_order order=memory_order_seq_cst) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >||std::is_floating_point_v< T >) > | |
STDGPU_DEVICE_ONLY T | fetch_max (const T arg, const memory_order order=memory_order_seq_cst) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >||std::is_floating_point_v< T >) > | |
STDGPU_DEVICE_ONLY T | fetch_min (const T arg, const memory_order order=memory_order_seq_cst) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) > | |
STDGPU_DEVICE_ONLY T | fetch_or (const T arg, const memory_order order=memory_order_seq_cst) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >||std::is_floating_point_v< T >) > | |
STDGPU_DEVICE_ONLY T | fetch_sub (const T arg, const memory_order order=memory_order_seq_cst) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) > | |
STDGPU_DEVICE_ONLY T | fetch_xor (const T arg, const memory_order order=memory_order_seq_cst) noexcept |
STDGPU_HOST_DEVICE allocator_type | get_allocator () const noexcept |
STDGPU_HOST_DEVICE bool | is_lock_free () const noexcept |
STDGPU_HOST_DEVICE T | load (const memory_order order=memory_order_seq_cst) const |
template<typename ExecutionPolicy , STDGPU_DETAIL_OVERLOAD_IF(is_execution_policy_v< remove_cvref_t< ExecutionPolicy > >) > | |
T | load (ExecutionPolicy &&policy, const memory_order order=memory_order_seq_cst) const |
STDGPU_HOST_DEVICE | operator T () const |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) > | |
STDGPU_DEVICE_ONLY T | operator&= (const T arg) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) > | |
STDGPU_DEVICE_ONLY T | operator++ () noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) > | |
STDGPU_DEVICE_ONLY T | operator++ (int) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >||std::is_floating_point_v< T >) > | |
STDGPU_DEVICE_ONLY T | operator+= (const T arg) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) > | |
STDGPU_DEVICE_ONLY T | operator-- () noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) > | |
STDGPU_DEVICE_ONLY T | operator-- (int) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >||std::is_floating_point_v< T >) > | |
STDGPU_DEVICE_ONLY T | operator-= (const T arg) noexcept |
STDGPU_HOST_DEVICE T | operator= (const T desired) |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) > | |
STDGPU_DEVICE_ONLY T | operator^= (const T arg) noexcept |
template<STDGPU_DETAIL_OVERLOAD_IF(std::is_integral_v< T >) > | |
STDGPU_DEVICE_ONLY T | operator|= (const T arg) noexcept |
STDGPU_HOST_DEVICE void | store (const T desired, const memory_order order=memory_order_seq_cst) |
template<typename ExecutionPolicy , STDGPU_DETAIL_OVERLOAD_IF(is_execution_policy_v< remove_cvref_t< ExecutionPolicy > >) > | |
void | store (ExecutionPolicy &&policy, const T desired, const memory_order order=memory_order_seq_cst) |
Static Public Member Functions | |
static atomic | createDeviceObject (const Allocator &allocator=Allocator()) |
template<typename ExecutionPolicy , STDGPU_DETAIL_OVERLOAD_IF(is_execution_policy_v< remove_cvref_t< ExecutionPolicy > >) > | |
static atomic | createDeviceObject (ExecutionPolicy &&policy, const Allocator &allocator=Allocator()) |
static void | destroyDeviceObject (atomic &device_object) |
template<typename ExecutionPolicy , STDGPU_DETAIL_OVERLOAD_IF(is_execution_policy_v< remove_cvref_t< ExecutionPolicy > >) > | |
static void | destroyDeviceObject (ExecutionPolicy &&policy, atomic &device_object) |
Generated by 1.9.6