|
context::handle_t | handle () const noexcept |
| The CUDA context ID this object is wrapping.
|
|
device::id_t | device_id () const noexcept |
| The device with which this context is associated.
|
|
device_t | device () const |
|
bool | is_owning () const noexcept |
| Is this wrapper responsible for having the wrapped CUDA context destroyed on destruction?
|
|
size_t | total_memory () const |
| The amount of total global device memory available to this context, including memory already allocated.
|
|
size_t | free_memory () const |
| The amount of unallocated global device memory available to this context and not yet allocated. More...
|
|
multiprocessor_cache_preference_t | cache_preference () const |
| Determines the balance between L1 space and shared memory space set for kernels executing within this context.
|
|
size_t | stack_size () const |
|
context::limit_value_t | printf_buffer_size () const |
|
context::limit_value_t | memory_allocation_heap_size () const |
|
context::limit_value_t | maximum_depth_of_child_grid_sync_calls () const |
|
global_memory_type | memory () const |
|
context::limit_value_t | maximum_outstanding_kernel_launches () const |
|
context::shared_memory_bank_size_t | shared_memory_bank_size () const |
| Returns the shared memory bank size, as described in this Parallel-for-all blog entry More...
|
|
bool | is_current () const |
| Determine if this context is the system's current CUDA context.
|
|
bool | is_primary () const |
| Determine if this context is the primary context for its associated device.
|
|
context::stream_priority_range_t | stream_priority_range () const |
|
context::limit_value_t | get_limit (context::limit_t limit_id) const |
|
version_t | api_version () const |
|
context::host_thread_sync_scheduling_policy_t | sync_scheduling_policy () const |
| Gets the synchronization policy to be used for threads synchronizing with this CUDA context. More...
|
|
bool | keeping_larger_local_mem_after_resize () const |
|
stream_t | create_stream (bool will_synchronize_with_default_stream, stream::priority_t priority=cuda::stream::default_priority) |
| See cuda::stream::create()
|
|
event_t | create_event (bool uses_blocking_sync=event::sync_by_busy_waiting, bool records_timing=event::do_record_timings, bool interprocess=event::not_interprocess) |
| See cuda::event::create()
|
|
template<typename ContiguousContainer , cuda::detail_::enable_if_t< detail_::is_kinda_like_contiguous_container< ContiguousContainer >::value, bool > = true> |
module_t | create_module (ContiguousContainer module_data, const link::options_t &link_options) const |
|
template<typename ContiguousContainer , cuda::detail_::enable_if_t< detail_::is_kinda_like_contiguous_container< ContiguousContainer >::value, bool > = true> |
module_t | create_module (ContiguousContainer module_data) const |
|
void | enable_access_to (const context_t &peer) const |
|
void | disable_access_to (const context_t &peer) const |
|
void | reset_persisting_l2_cache () const |
|
void | set_shared_memory_bank_size (context::shared_memory_bank_size_t bank_size) const |
| Sets the shared memory bank size, described in this Parallel-for-all blog entry More...
|
|
void | set_cache_preference (multiprocessor_cache_preference_t preference) const |
| Controls the balance between L1 space and shared memory space for kernels executing within this context. More...
|
|
void | set_limit (context::limit_t limit_id, context::limit_value_t new_value) const |
|
void | stack_size (context::limit_value_t new_value) const |
|
void | printf_buffer_size (context::limit_value_t new_value) const |
|
void | memory_allocation_heap_size (context::limit_value_t new_value) const |
|
void | set_maximum_depth_of_child_grid_sync_calls (context::limit_value_t new_value) const |
|
void | set_maximum_outstanding_kernel_launches (context::limit_value_t new_value) const |
|
void | synchronize () const |
| Have the calling thread wait - either busy-waiting or blocking - and return only after all pending actions within this context have concluded.
|
|
| context_t (const context_t &other) |
|
| context_t (context_t &&other) noexcept |
|
context_t & | operator= (const context_t &)=delete |
|
context_t & | operator= (context_t &&other) noexcept |
|
Wrapper class for a CUDA context.
Use this class - built around a context id - to perform all context-related operations the CUDA Driver (or, in fact, Runtime) API is capable of.
- Note
- By default this class has RAII semantics, i.e. it creates a context on construction and destroys it on destruction, and isn't merely an ephemeral wrapper one could apply and discard; but this second kind of semantics is also supported, through the context_t::holds_refcount_unit_ field.
-
A context is a specific to a device; see, therefore, also device_t .
-
This class is a "reference type", not a "value type". Therefore, making changes to properties of the context is a const-respecting operation on this class.