cuda-api-wrappers
Thin C++-flavored wrappers for the CUDA Runtime API
Public Member Functions | Friends | List of all members
cuda::device::primary_context_t Class Reference

A class for holding the primary context of a CUDA device (device_t). More...

#include <primary_context.hpp>

Inheritance diagram for cuda::device::primary_context_t:
Inheritance graph
[legend]
Collaboration diagram for cuda::device::primary_context_t:
Collaboration graph
[legend]

Public Member Functions

stream_t default_stream () const noexcept
 
 primary_context_t (const primary_context_t &other)
 
 primary_context_t (primary_context_t &&other) noexcept=default
 
primary_context_toperator= (const primary_context_t &other)=delete
 
primary_context_toperator= (primary_context_t &&other)=default
 
void set_synch_scheduling_policy (context::host_thread_synch_scheduling_policy_t new_policy) const
 
bool keeping_larger_local_mem_after_resize () const
 
void keep_larger_local_mem_after_resize (bool keep=true) const
 
void dont_keep_larger_local_mem_after_resize () const
 
- Public Member Functions inherited from cuda::context_t
context::handle_t handle () const noexcept
 The CUDA context ID this object is wrapping.
 
device::id_t device_id () const noexcept
 The device with which this context is associated.
 
device_t device () const
 
bool is_owning () const noexcept
 Is this wrapper responsible for having the wrapped CUDA context destroyed on destruction?
 
size_t total_memory () const
 The amount of total global device memory available to this context, including memory already allocated.
 
size_t free_memory () const
 The amount of unallocated global device memory available to this context and not yet allocated. More...
 
multiprocessor_cache_preference_t cache_preference () const
 Determines the balance between L1 space and shared memory space set for kernels executing within this context.
 
size_t stack_size () const
 
context::limit_value_t printf_buffer_size () const
 
context::limit_value_t memory_allocation_heap_size () const
 
context::limit_value_t maximum_depth_of_child_grid_synch_calls () const
 
global_memory_type memory () const
 
context::limit_value_t maximum_outstanding_kernel_launches () const
 
context::shared_memory_bank_size_t shared_memory_bank_size () const
 Returns the shared memory bank size, as described in this Parallel-for-all blog entry More...
 
bool is_current () const
 Determine if this context is the system's current CUDA context.
 
bool is_primary () const
 Determine if this context is the primary context for its associated device.
 
context::stream_priority_range_t stream_priority_range () const
 
context::limit_value_t get_limit (context::limit_t limit_id) const
 
version_t api_version () const
 
context::host_thread_synch_scheduling_policy_t synch_scheduling_policy () const
 Gets the synchronization policy to be used for threads synchronizing with this CUDA context. More...
 
bool keeping_larger_local_mem_after_resize () const
 
stream_t create_stream (bool will_synchronize_with_default_stream, stream::priority_t priority=cuda::stream::default_priority)
 See cuda::stream::create()
 
event_t create_event (bool uses_blocking_sync=event::sync_by_busy_waiting, bool records_timing=event::do_record_timings, bool interprocess=event::not_interprocess)
 See cuda::event::create()
 
template<typename ContiguousContainer , cuda::detail_::enable_if_t< detail_::is_kinda_like_contiguous_container< ContiguousContainer >::value, bool > = true>
module_t create_module (ContiguousContainer module_data, link::options_t link_options) const
 
template<typename ContiguousContainer , cuda::detail_::enable_if_t< detail_::is_kinda_like_contiguous_container< ContiguousContainer >::value, bool > = true>
module_t create_module (ContiguousContainer module_data) const
 
void enable_access_to (const context_t &peer) const
 
void disable_access_to (const context_t &peer) const
 
void reset_persisting_l2_cache () const
 
void set_shared_memory_bank_size (context::shared_memory_bank_size_t bank_size) const
 Sets the shared memory bank size, described in this Parallel-for-all blog entry More...
 
void set_cache_preference (multiprocessor_cache_preference_t preference) const
 Controls the balance between L1 space and shared memory space for kernels executing within this context. More...
 
void set_limit (context::limit_t limit_id, context::limit_value_t new_value) const
 
void stack_size (context::limit_value_t new_value) const
 
void printf_buffer_size (context::limit_value_t new_value) const
 
void memory_allocation_heap_size (context::limit_value_t new_value) const
 
void set_maximum_depth_of_child_grid_synch_calls (context::limit_value_t new_value) const
 
void set_maximum_outstanding_kernel_launches (context::limit_value_t new_value) const
 
void synchronize () const
 Have the calling thread wait - either busy-waiting or blocking - and return only after all pending actions within this context have concluded.
 
 context_t (const context_t &other)
 
 context_t (context_t &&other) noexcept
 
context_toperator= (const context_t &)=delete
 
context_toperator= (context_t &&other) noexcept
 

Friends

class device_t
 

Additional Inherited Members

- Public Types inherited from cuda::context_t
using scoped_setter_type = context::current::detail_::scoped_override_t
 
using flags_type = context::flags_t
 

Detailed Description

A class for holding the primary context of a CUDA device (device_t).

Note
Since the runtime API tends to make such contexts active and not let them go inactive very easily, this class assumes the primary context is active already on construction. Limiting constructor accessibility will help ensure this invariant is indeed maintained.

The documentation for this class was generated from the following files: