cuda-api-wrappers
Thin C++-flavored wrappers for the CUDA Runtime API
context.hpp
Go to the documentation of this file.
1 
10 #pragma once
11 #ifndef MULTI_WRAPPER_IMPLS_CONTEXT_HPP_
12 #define MULTI_WRAPPER_IMPLS_CONTEXT_HPP_
13 
14 #include "../device.hpp"
15 #include "../stream.hpp"
16 #include "../event.hpp"
17 #include "../kernel.hpp"
18 #include "../virtual_memory.hpp"
19 #include "../current_context.hpp"
20 #include "../current_device.hpp"
21 #include "../peer_to_peer.hpp"
22 #include "../memory.hpp"
23 #include "../context.hpp"
24 
25 
26 namespace cuda {
27 
28 namespace context {
29 
30 namespace detail_ {
31 
32 inline handle_t get_primary_for_same_device(handle_t handle, bool increase_refcount)
33 {
34  auto device_id = get_device_id(handle);
35  return device::primary_context::detail_::get_handle(device_id, increase_refcount);
36 }
37 
38 inline bool is_primary_for_device(handle_t handle, device::id_t device_id)
39 {
40  auto context_device_id = context::detail_::get_device_id(handle);
41  if (context_device_id != device_id) {
42  return false;
43  }
44  static constexpr const bool dont_increase_refcount { false };
45  auto pc_handle = device::primary_context::detail_::get_handle(device_id, dont_increase_refcount);
46  return handle == pc_handle;
47 }
48 
49 } // namespace detail
50 
51 inline bool is_primary(const context_t& context)
52 {
53  return context::detail_::is_primary_for_device(context.handle(), context.device_id());
54 }
55 
56 inline void synchronize(const context_t& context)
57 {
58  return detail_::synchronize(context.device_id(), context.handle());
59 }
60 
61 namespace current {
62 
63 namespace detail_ {
64 
65 inline bool is_primary(handle_t cc_handle, device::id_t current_context_device_id)
66 {
67  // Note we assume current_context_device_id really is the device ID for cc_handle;
68  // otherwise we could just use is_primary_for_device()
69  return cc_handle == device::primary_context::detail_::get_handle(current_context_device_id);
70 }
71 
72 } // namespace detail_
73 
74 inline bool is_primary()
75 {
76  auto current_context = get();
77  return detail_::is_primary(current_context.handle(), current_context.device_id());
78 }
79 
80 namespace detail_ {
81 
82 inline scoped_override_t::scoped_override_t(bool hold_primary_context_ref_unit, device::id_t device_id, handle_t context_handle)
83 : hold_primary_context_ref_unit_(hold_primary_context_ref_unit), device_id_or_0_(device_id)
84 {
85  if (hold_primary_context_ref_unit) { device::primary_context::detail_::increase_refcount(device_id); }
86  push(context_handle);
87 }
88 
89 inline scoped_override_t::~scoped_override_t() noexcept(false)
90 {
91  if (hold_primary_context_ref_unit_) { device::primary_context::detail_::decrease_refcount(device_id_or_0_); }
92  pop();
93 }
94 
95 
99 inline handle_t push_default_if_missing()
100 {
101  auto handle = detail_::get_handle();
102  if (handle != context::detail_::none) {
103  return handle;
104  }
105  // TODO: consider using cudaSetDevice here instead
106  auto current_device_id = device::current::detail_::get_id();
107  auto pc_handle = device::primary_context::detail_::obtain_and_increase_refcount(current_device_id);
108  push(pc_handle);
109  return pc_handle;
110 }
111 
129 class scoped_existence_ensurer_t {
130 public:
131  context::handle_t context_handle;
132  device::id_t device_id_;
133  bool decrease_pc_refcount_on_destruct_;
134 
135  explicit scoped_existence_ensurer_t(bool avoid_pc_refcount_increase = true)
136  {
137  auto status_and_handle = get_with_status();
138  if (status_and_handle.status == cuda::status::not_yet_initialized) {
139  context_handle = context::detail_::none;
140  initialize_driver(); // and the handle
141  }
142  else {
143  context_handle = status_and_handle.handle;
144  }
145  if (context_handle == context::detail_::none) {
146  device_id_ = device::current::detail_::get_id();
147  context_handle = device::primary_context::detail_::obtain_and_increase_refcount(device_id_);
148  context::current::detail_::push(context_handle);
149  decrease_pc_refcount_on_destruct_ = avoid_pc_refcount_increase;
150  }
151  else {
152  // Some compilers fail to detect that device_id is never used
153  // unless it's initialized, and thus warns us of maybe-uninitialized
154  // use, so...
155  device_id_ = 0;
156  decrease_pc_refcount_on_destruct_ = false;
157  }
158  }
159 
160  ~scoped_existence_ensurer_t()
161  {
162  if (context_handle != context::detail_::none and decrease_pc_refcount_on_destruct_) {
163  context::current::detail_::pop();
164  device::primary_context::detail_::decrease_refcount(device_id_);
165  }
166  }
167 };
168 
169 } // namespace detail_
170 
171 inline scoped_override_t::scoped_override_t(device::primary_context_t&& primary_context)
172  : parent(primary_context.is_owning(), primary_context.device_id(), primary_context.handle()) {}
173 inline scoped_override_t::scoped_override_t(const context_t& context) : parent(context.handle()) {}
174 inline scoped_override_t::scoped_override_t(context_t&& context) : parent(context.handle()) {}
175 
176 } // namespace current
177 
178 inline context_t create_and_push(
179  const device_t& device,
180  host_thread_sync_scheduling_policy_t sync_scheduling_policy,
181  bool keep_larger_local_mem_after_resize)
182 {
183  auto handle = detail_::create_and_push(device.id(), sync_scheduling_policy, keep_larger_local_mem_after_resize);
184  bool take_ownership = true;
185  return context::wrap(device.id(), handle, take_ownership);
186 }
187 
188 inline context_t create(
189  const device_t& device,
190  host_thread_sync_scheduling_policy_t sync_scheduling_policy,
191  bool keep_larger_local_mem_after_resize)
192 {
193  auto created = create_and_push(device, sync_scheduling_policy, keep_larger_local_mem_after_resize);
194  current::pop();
195  return created;
196 }
197 
198 namespace peer_to_peer {
199 
200 inline bool can_access(context_t accessor, context_t peer)
201 {
202  return device::peer_to_peer::detail_::can_access(accessor.device_id(), peer.device_id());
203 }
204 
205 inline void enable_access(context_t accessor, context_t peer)
206 {
207  detail_::enable_access(accessor.handle(), peer.handle());
208 }
209 
210 inline void disable_access(context_t accessor, context_t peer)
211 {
212  detail_::disable_access(accessor.handle(), peer.handle());
213 }
214 
216 {
217  // Note: What happens when first and second are the same context? Or on the same device?
218  enable_access(first, second);
219  enable_access(second, first );
220 }
221 
223 {
224  // Note: What happens when first and second are the same context? Or on the same device?
225  disable_access(first, second);
226  disable_access(second, first );
227 }
228 
229 
230 } // namespace peer_to_peer
231 
232 namespace current {
233 
234 namespace peer_to_peer {
235 
236 inline void enable_access_to(const context_t &peer_context)
237 {
238  context::peer_to_peer::detail_::enable_access_to(peer_context.handle());
239 }
240 
241 inline void disable_access_to(const context_t &peer_context)
242 {
243  context::peer_to_peer::detail_::disable_access_to(peer_context.handle());
244 }
245 
246 } // namespace peer_to_peer
247 
248 } // namespace current
249 
250 } // namespace context
251 
253 {
254  return memory::device::detail_::allocate(context_handle_, size_in_bytes);
255 }
256 
258  size_t size_in_bytes, memory::managed::initial_visibility_t initial_visibility)
259 {
260  return memory::managed::detail_::allocate(context_handle_, size_in_bytes, initial_visibility);
261 }
262 
263 
265 {
266  return cuda::device::get(device_id_);
267 }
268 
270 {
271  static constexpr const bool non_owning { false };
272  return cuda::context::wrap(device_id_, context_handle_, non_owning);
273 }
274 
275 inline bool context_t::is_primary() const
276 {
277  return context::current::detail_::is_primary(handle(), device_id());
278 }
279 
280 // Note: The context_t::create_module() member functions are defined in module.hpp,
281 // for better separation of runtime-origination and driver-originating headers; see
282 // issue #320 on the issue tracker.
283 
284 inline void context_t::enable_access_to(const context_t& peer) const
285 {
287 }
288 
289 inline void context_t::disable_access_to(const context_t& peer) const
290 {
292 }
293 
294 inline device_t context_t::device() const
295 {
296  return device::wrap(device_id_);
297 }
298 
300  bool will_synchronize_with_default_stream,
301  stream::priority_t priority) const
302 {
303  return stream::detail_::create(device_id_, handle_, will_synchronize_with_default_stream, priority);
304 }
305 
307  bool uses_blocking_sync,
308  bool records_timing,
309  bool interprocess) const
310 {
311  return cuda::event::detail_::create(
312  device_id_, handle_, do_not_hold_primary_context_refcount_unit,
313  uses_blocking_sync, records_timing, interprocess);
314 }
315 
316 inline stream_t context_t::default_stream() const
317 {
318  return stream::wrap(device_id_, handle_, stream::default_stream_handle, do_not_take_ownership);
319 }
320 
321 template <typename Kernel, typename ... KernelParameters>
322 void context_t::launch(
323  Kernel kernel,
324  launch_configuration_t launch_configuration,
325  KernelParameters... parameters) const
326 {
327  default_stream().enqueue.kernel_launch(kernel, launch_configuration, parameters...);
328 }
329 
330 } // namespace cuda
331 
332 #endif // MULTI_WRAPPER_IMPLS_CONTEXT_HPP_
333 
event_t create_event(bool uses_blocking_sync=event::sync_by_busy_waiting, bool records_timing=event::do_record_timings, bool interprocess=event::not_interprocess) const
Create a new event within this context; see cuda::event::create() for details regarding the parameter...
Definition: context.hpp:306
Proxy class for a CUDA stream.
Definition: stream.hpp:246
Wrapper class for a CUDA context.
Definition: context.hpp:244
Definitions and functionality wrapping CUDA APIs.
Definition: array.hpp:22
int priority_t
CUDA streams have a scheduling priority, with lower values meaning higher priority.
Definition: types.hpp:246
detail_::region_helper< memory::region_t > region_t
A child class of the generic region_t with some managed-memory-specific functionality.
Definition: memory.hpp:1960
The full set of possible configuration parameters for launching a kernel on a GPU.
Definition: launch_configuration.hpp:69
CUcontext handle_t
Raw CUDA driver handle for a context; see {context_t}.
Definition: types.hpp:878
Wrapper class for a CUDA event.
Definition: event.hpp:133
A class for holding the primary context of a CUDA device.
Definition: primary_context.hpp:112
void enable_bidirectional_access(context_t first, context_t second)
Enable access both by the first to the second context and the other way around.
Definition: context.hpp:215
void disable_access_to(const context_t &peer) const
Prevent kernels and memory operations within this context from involving memory allocated in a peer c...
Definition: context.hpp:289
CUdevice id_t
Numeric ID of a CUDA device used by the CUDA Runtime API.
Definition: types.hpp:850
device::id_t id() const noexcept
Return the proxied device&#39;s ID.
Definition: device.hpp:594
bool is_primary() const
Definition: context.hpp:275
device_t associated_device() const
Device on which te memory managed with this object is allocated.
Definition: context.hpp:264
host_thread_sync_scheduling_policy_t
Scheduling policies the CUDA driver may use when the host-side thread it is running in needs to wait ...
Definition: types.hpp:884
void initialize_driver()
Obtains the CUDA Runtime version.
Definition: miscellany.hpp:26
void synchronize(const context_t &context)
Waits for all previously-scheduled tasks on all streams (= queues) in a CUDA context to conclude...
Definition: context.hpp:968
stream_t create_stream(bool will_synchronize_with_default_stream, stream::priority_t priority=cuda::stream::default_priority) const
Create a new event within this context; see cuda::stream::create() for details regarding the paramete...
Definition: context.hpp:299
device_t get(id_t id)
Returns a proxy for the CUDA device with a given id.
Definition: device.hpp:837
stream_t wrap(device::id_t device_id, context::handle_t context_handle, handle_t stream_handle, bool take_ownership=false, bool hold_pc_refcount_unit=false) noexcept
Wrap an existing stream in a stream_t instance.
Definition: stream.hpp:1006
void enable_access_to(const context_t &peer_context)
Allows subsequently-executed memory operations and kernels to access the memory associated with the s...
Definition: context.hpp:236
CUarray handle_t
Raw CUDA driver handle for arrays (of any dimension)
Definition: array.hpp:34
memory::region_t allocate(size_t size_in_bytes)
Allocate a region of memory on the device.
Definition: context.hpp:252
void disable_access_to(const context_t &peer_context)
Prevents subsequently-executed memory operations and kernels from accessing the memory associated wit...
Definition: context.hpp:241
void enable_access_to(const context_t &peer) const
Allow kernels and memory operations within this context to involve memory allocated in a peer context...
Definition: context.hpp:284
void disable_access(context_t accessor, context_t peer)
Disable access by one CUDA device to the global memory of another.
Definition: context.hpp:210
memory::region_t allocate_managed(size_t size_in_bytes, cuda::memory::managed::initial_visibility_t initial_visibility=cuda::memory::managed::initial_visibility_t::to_supporters_of_concurrent_managed_access)
Allocates memory on the device whose pointer is also visible on the host, and possibly on other devic...
Definition: context.hpp:257
void enable_access(context_t accessor, context_t peer)
Enable access by one CUDA device to the global memory of another.
Definition: context.hpp:205
Can be shared between processes. Must not be able to record timings.
Definition: constants.hpp:96
device_t wrap(id_t id) NOEXCEPT_IF_NDEBUG
Returns a wrapper for the CUDA device with a given id.
Definition: device.hpp:825
bool can_access(context_t accessor, context_t peer)
Check if a CUDA context can access the global memory of another CUDA context.
Definition: context.hpp:200
bool is_primary(const context_t &context)
Definition: context.hpp:51
const stream::handle_t default_stream_handle
The CUDA runtime provides a default stream on which work is scheduled when no stream is specified; fo...
Definition: constants.hpp:42
context_t associated_context() const
Context in which te memory managed with this object is recognized / usable.
Definition: context.hpp:269
void disable_bidirectional_access(context_t first, context_t second)
Disable access both by the first to the second context and the other way around.
Definition: context.hpp:222
Wrapper class for a CUDA device.
Definition: device.hpp:135
initial_visibility_t
The choices of which categories CUDA devices must a managed memory region be visible to...
Definition: types.hpp:753