11 #ifndef MULTI_WRAPPER_IMPLS_CONTEXT_HPP_ 12 #define MULTI_WRAPPER_IMPLS_CONTEXT_HPP_ 14 #include "../device.hpp" 15 #include "../stream.hpp" 16 #include "../event.hpp" 17 #include "../current_context.hpp" 18 #include "../current_device.hpp" 19 #include "../peer_to_peer.hpp" 20 #include "../memory.hpp" 21 #include "../context.hpp" 30 inline handle_t get_primary_for_same_device(
handle_t handle,
bool increase_refcount)
32 auto device_id = get_device_id(handle);
33 return device::primary_context::detail_::get_handle(device_id, increase_refcount);
38 auto context_device_id = context::detail_::get_device_id(handle);
39 if (context_device_id != device_id) {
42 static constexpr
const bool dont_increase_refcount {
false };
43 auto pc_handle = device::primary_context::detail_::get_handle(device_id, dont_increase_refcount);
44 return handle == pc_handle;
51 return context::detail_::is_primary_for_device(context.handle(), context.device_id());
56 return detail_::synchronize(context.device_id(), context.handle());
67 return cc_handle == device::primary_context::detail_::get_handle(current_context_device_id);
74 auto current_context =
get();
75 return detail_::is_primary(current_context.handle(), current_context.device_id());
80 inline scoped_override_t::scoped_override_t(
bool hold_primary_context_ref_unit,
device::id_t device_id,
handle_t context_handle)
81 : hold_primary_context_ref_unit_(hold_primary_context_ref_unit), device_id_or_0_(device_id)
83 if (hold_primary_context_ref_unit) { device::primary_context::detail_::increase_refcount(device_id); }
87 inline scoped_override_t::~scoped_override_t() DESTRUCTOR_EXCEPTION_SPEC
89 #if THROW_IN_DESTRUCTORS 92 pop_and_discard_nothrow();
94 if (hold_primary_context_ref_unit_) {
95 #if THROW_IN_DESTRUCTORS 96 device::primary_context::detail_::decrease_refcount(device_id_or_0_);
98 device::primary_context::detail_::decrease_refcount_nothrow(device_id_or_0_);
107 inline handle_t push_default_if_missing()
109 auto handle = detail_::get_handle();
110 if (handle != context::detail_::none) {
114 auto current_device_id = device::current::detail_::get_id();
115 auto pc_handle = device::primary_context::detail_::obtain_and_increase_refcount(current_device_id);
137 class scoped_existence_ensurer_t {
141 bool decrease_pc_refcount_on_destruct_;
143 explicit scoped_existence_ensurer_t(
bool avoid_pc_refcount_increase =
true)
145 auto status_and_handle = get_with_status();
146 if (status_and_handle.status == cuda::status::not_yet_initialized) {
147 context_handle = context::detail_::none;
151 context_handle = status_and_handle.handle;
153 if (context_handle == context::detail_::none) {
154 device_id_ = device::current::detail_::get_id();
155 context_handle = device::primary_context::detail_::obtain_and_increase_refcount(device_id_);
156 context::current::detail_::push(context_handle);
157 decrease_pc_refcount_on_destruct_ = avoid_pc_refcount_increase;
164 decrease_pc_refcount_on_destruct_ =
false;
168 ~scoped_existence_ensurer_t()
170 if (context_handle != context::detail_::none and decrease_pc_refcount_on_destruct_) {
171 #if THROW_IN_DESTRUCTORS 172 context::current::detail_::pop();
173 device::primary_context::detail_::decrease_refcount(device_id_);
175 context::current::detail_::pop_and_discard_nothrow();
176 device::primary_context::detail_::decrease_refcount_nothrow(device_id_);
185 : parent(primary_context.is_owning(), primary_context.device_id(), primary_context.handle()) {}
186 inline scoped_override_t::scoped_override_t(
const context_t& context) : parent(context.handle()) {}
187 inline scoped_override_t::scoped_override_t(
context_t&& context) : parent(context.handle()) {}
194 bool keep_larger_local_mem_after_resize)
196 auto handle = detail_::create_and_push(device.
id(), sync_scheduling_policy, keep_larger_local_mem_after_resize);
197 bool take_ownership =
true;
198 return context::wrap(device.
id(), handle, take_ownership);
204 bool keep_larger_local_mem_after_resize)
206 auto created = create_and_push(device, sync_scheduling_policy, keep_larger_local_mem_after_resize);
211 namespace peer_to_peer {
247 namespace peer_to_peer {
251 context::peer_to_peer::detail_::enable_access_to(peer_context.handle());
256 context::peer_to_peer::detail_::disable_access_to(peer_context.handle());
267 return memory::device::detail_::allocate(context_handle_, size_in_bytes);
273 return memory::managed::detail_::allocate(context_handle_, size_in_bytes, initial_visibility);
284 static constexpr
const bool non_owning {
false };
285 return cuda::context::wrap(device_id_, context_handle_, non_owning);
290 return context::current::detail_::is_primary(handle(), device_id());
307 inline device_t context_t::device()
const 313 bool will_synchronize_with_default_stream,
316 return stream::detail_::create(device_id_, handle_, will_synchronize_with_default_stream, priority);
320 bool uses_blocking_sync,
324 return cuda::event::detail_::create(
325 device_id_, handle_, do_not_hold_primary_context_refcount_unit,
326 uses_blocking_sync, records_timing, interprocess);
329 inline stream_t context_t::default_stream()
const 334 template <
typename Kernel,
typename ... KernelParameters>
335 void context_t::launch(
338 KernelParameters... parameters)
const 340 default_stream().enqueue.kernel_launch(kernel, launch_configuration, parameters...);
345 #endif // MULTI_WRAPPER_IMPLS_CONTEXT_HPP_ event_t create_event(bool uses_blocking_sync=event::sync_by_busy_waiting, bool records_timing=event::do_record_timings, bool interprocess=event::not_interprocess) const
Create a new event within this context; see cuda::event::create() for details regarding the parameter...
Definition: context.hpp:319
Proxy class for a CUDA stream.
Definition: stream.hpp:258
Wrapper class for a CUDA context.
Definition: context.hpp:249
Definitions and functionality wrapping CUDA APIs.
Definition: array.hpp:22
int priority_t
CUDA streams have a scheduling priority, with lower values meaning higher priority.
Definition: types.hpp:243
detail_::region_helper< memory::region_t > region_t
A child class of the generic region_t with some managed-memory-specific functionality.
Definition: memory.hpp:1974
memory::region_t allocate_managed(size_t size_in_bytes, cuda::memory::managed::initial_visibility_t initial_visibility=cuda::memory::managed::initial_visibility_t::to_supporters_of_concurrent_managed_access) const
Allocates memory on the device whose pointer is also visible on the host, and possibly on other devic...
Definition: context.hpp:270
The full set of possible configuration parameters for launching a kernel on a GPU.
Definition: launch_configuration.hpp:69
CUcontext handle_t
Raw CUDA driver handle for a context; see {context_t}.
Definition: types.hpp:880
Wrapper class for a CUDA event.
Definition: event.hpp:147
A class for holding the primary context of a CUDA device.
Definition: primary_context.hpp:122
void enable_bidirectional_access(context_t first, context_t second)
Enable access both by the first to the second context and the other way around.
Definition: context.hpp:228
void disable_access_to(const context_t &peer) const
Prevent kernels and memory operations within this context from involving memory allocated in a peer c...
Definition: context.hpp:302
CUdevice id_t
Numeric ID of a CUDA device used by the CUDA Runtime API.
Definition: types.hpp:852
device::id_t id() const noexcept
Return the proxied device's ID.
Definition: device.hpp:594
bool is_primary() const
Definition: context.hpp:288
device_t associated_device() const
Device on which te memory managed with this object is allocated.
Definition: context.hpp:277
host_thread_sync_scheduling_policy_t
Scheduling policies the CUDA driver may use when the host-side thread it is running in needs to wait ...
Definition: types.hpp:886
void initialize_driver()
Obtains the CUDA Runtime version.
Definition: miscellany.hpp:26
void synchronize(const context_t &context)
Waits for all previously-scheduled tasks on all streams (= queues) in a CUDA context to conclude...
Definition: context.hpp:980
stream_t create_stream(bool will_synchronize_with_default_stream, stream::priority_t priority=cuda::stream::default_priority) const
Create a new event within this context; see cuda::stream::create() for details regarding the paramete...
Definition: context.hpp:312
device_t get(id_t id)
Returns a proxy for the CUDA device with a given id.
Definition: device.hpp:832
memory::region_t allocate(size_t size_in_bytes) const
Allocate a region of memory on the device.
Definition: context.hpp:265
stream_t wrap(device::id_t device_id, context::handle_t context_handle, handle_t stream_handle, bool take_ownership=false, bool hold_pc_refcount_unit=false) noexcept
Wrap an existing stream in a stream_t instance.
Definition: stream.hpp:1020
void enable_access_to(const context_t &peer_context)
Allows subsequently-executed memory operations and kernels to access the memory associated with the s...
Definition: context.hpp:249
CUarray handle_t
Raw CUDA driver handle for arrays (of any dimension)
Definition: array.hpp:34
void disable_access_to(const context_t &peer_context)
Prevents subsequently-executed memory operations and kernels from accessing the memory associated wit...
Definition: context.hpp:254
void enable_access_to(const context_t &peer) const
Allow kernels and memory operations within this context to involve memory allocated in a peer context...
Definition: context.hpp:297
void disable_access(context_t accessor, context_t peer)
Disable access by one CUDA device to the global memory of another.
Definition: context.hpp:223
void enable_access(context_t accessor, context_t peer)
Enable access by one CUDA device to the global memory of another.
Definition: context.hpp:218
Can be shared between processes. Must not be able to record timings.
Definition: constants.hpp:96
device_t wrap(id_t id) NOEXCEPT_IF_NDEBUG
Returns a wrapper for the CUDA device with a given id.
Definition: device.hpp:820
bool can_access(context_t accessor, context_t peer)
Check if a CUDA context can access the global memory of another CUDA context.
Definition: context.hpp:213
bool is_primary(const context_t &context)
Definition: context.hpp:49
const stream::handle_t default_stream_handle
The CUDA runtime provides a default stream on which work is scheduled when no stream is specified; fo...
Definition: constants.hpp:42
context_t associated_context() const
Context in which te memory managed with this object is recognized / usable.
Definition: context.hpp:282
void disable_bidirectional_access(context_t first, context_t second)
Disable access both by the first to the second context and the other way around.
Definition: context.hpp:235
Wrapper class for a CUDA device.
Definition: device.hpp:135
initial_visibility_t
The choices of which categories CUDA devices must a managed memory region be visible to...
Definition: types.hpp:755