cuda-api-wrappers
Thin C++-flavored wrappers for the CUDA Runtime API
memory.hpp
Go to the documentation of this file.
1 
7 #pragma once
8 #ifndef MULTI_WRAPPER_IMPLS_MEMORY_HPP_
9 #define MULTI_WRAPPER_IMPLS_MEMORY_HPP_
10 
11 #include "context.hpp"
12 #include "ipc.hpp"
13 
14 #include "../memory.hpp"
15 #include "../array.hpp"
16 #include "../device.hpp"
17 #include "../pointer.hpp"
18 #include "../stream.hpp"
19 #include "../primary_context.hpp"
20 #include "../memory_pool.hpp"
21 
22 #include <driver_types.h>
23 
24 namespace cuda {
25 
26 namespace memory {
27 
28 template <typename T, dimensionality_t NumDimensions>
29 inline void copy(array_t<T, NumDimensions>& destination, span<T const> source, optional_ref<const stream_t> stream)
30 {
31  if (not stream) {
32  memory::copy<T, NumDimensions>(destination, source);
33  return;
34  }
35 #ifndef NDEBUG
36  if (source.size() != destination.size()) {
37  throw ::std::invalid_argument(
38  "Attempt to copy " + ::std::to_string(source.size()) +
39  " elements into an array of " + ::std::to_string(destination.size()) + " elements");
40  }
41 #endif
42  detail_::copy<T, NumDimensions>(destination, source.data(), stream->handle());
43 }
44 
45 // Note: Assumes the destination, source and stream are all usable on the same content
46 template <typename T, dimensionality_t NumDimensions>
47 inline void copy(T* destination, const array_t<T, NumDimensions>& source, optional_ref<const stream_t> stream)
48 {
49  if (not stream) {
50  memory::copy(context_of(destination), destination, source);
51  return;
52  }
53  if (stream->context_handle() != source.context_handle()) {
54  throw ::std::invalid_argument("Attempt to copy an array in"
55  + context::detail_::identify(source.context_handle()) + " via "
56  + stream::detail_::identify(*stream));
57  }
58  detail_::copy<T, NumDimensions>(destination, source, stream->handle());
59 }
60 
61 template<dimensionality_t NumDimensions>
62 void copy(copy_parameters_t<NumDimensions> params, optional_ref<const stream_t> stream)
63 {
64  stream::handle_t stream_handle = stream ? stream->handle() : nullptr;
65  status_t status = detail_::multidim_copy(params, stream_handle);
66  throw_if_error_lazy(status, "Copying using a general copy parameters structure");
67 }
68 
69 
70 template <typename T>
71 void copy_single(T* destination, const T* source, optional_ref<const stream_t> stream)
72 {
73  memory::copy(destination, source, sizeof(T), stream);
74 }
75 
76 // Note: Assumes the source pointer is valid in the stream's context
77 template <typename T, dimensionality_t NumDimensions>
78 inline void copy(array_t<T, NumDimensions>& destination, const T* source, optional_ref<const stream_t> stream)
79 {
80  if (not stream) {
81  memory::copy(destination, context_of(source), source);
82  return;
83  }
84  detail_::copy<T, NumDimensions>(destination, source, stream->handle());
85 }
86 
87 inline void copy(void *destination, const void *source, size_t num_bytes, optional_ref<const stream_t> stream)
88 {
89  if (not stream) {
90  context::current::detail_::scoped_existence_ensurer_t ensure_some_context{};
91  auto result = cuMemcpy(device::address(destination), device::address(source), num_bytes);
92  // TODO: Determine whether it was from host to device, device to host etc and
93  // add this information to the error string
94  throw_if_error_lazy(result, "Synchronously copying data");
95  return;
96  }
97  detail_::copy(destination, source, num_bytes, stream->handle());
98 }
99 
100 namespace device {
101 
102 inline region_t allocate(const context_t& context, size_t size_in_bytes)
103 {
104  return detail_::allocate(context.handle(), size_in_bytes);
105 }
106 
107 inline region_t allocate(const device_t& device, size_t size_in_bytes)
108 {
109  auto pc = device.primary_context();
110  return allocate(pc, size_in_bytes);
111 }
112 
113 #if CUDA_VERSION >= 11020
114 inline region_t allocate(size_t size_in_bytes, optional_ref<const stream_t> stream = {})
115 {
116  return stream ?
117  detail_::allocate(stream->context().handle(), size_in_bytes, stream->handle()) :
118  detail_::allocate_in_current_context(size_in_bytes);
119 }
120 
121 #endif // CUDA_VERSION >= 11020
122 
123 #if CUDA_VERSION >= 11020
124 inline void free(void* region_start, optional_ref<const stream_t> stream)
125 #else
126 inline void free(void* region_start)
127 #endif // CUDA_VERSION >= 11020
128 {
129 #if CUDA_VERSION >= 11020
130  if (stream) {
131  detail_::free_on_stream(region_start, stream->handle());
132  return;
133  }
134 #endif
135  context::current::detail_::scoped_existence_ensurer_t ensurer;
136  detail_::free_in_current_context(ensurer.context_handle,region_start);
137 }
138 
139 } // namespace device
140 
141 namespace inter_context {
142 
143 inline void copy(
144  void * destination,
145  const context_t& destination_context,
146  const void * source,
147  const context_t& source_context,
148  size_t num_bytes,
149  optional_ref<const stream_t> stream = {})
150 {
151  auto status = stream ?
152  cuMemcpyPeer(
153  device::address(destination),
154  destination_context.handle(),
155  device::address(source),
156  source_context.handle(),
157  num_bytes) :
158  cuMemcpyPeerAsync(
159  device::address(destination),
160  destination_context.handle(),
161  device::address(source),
162  source_context.handle(),
163  num_bytes,
164  stream->handle());
165 
166  // TODO: Determine whether it was from host to device, device to host etc and
167  // add this information to the error string
168  throw_if_error_lazy(status,
169  ::std::string("Failed copying data between devices: From address ")
170  + cuda::detail_::ptr_as_hex(source) + " in "
171  + context::detail_::identify(source_context.handle()) + " to address "
172  + cuda::detail_::ptr_as_hex(destination) + " in "
173  + context::detail_::identify(destination_context.handle()) +
174  (stream ? " on " + stream::detail_::identify(*stream) : ""));
175 }
176 
177 } // namespace inter_context
178 
179 namespace managed {
180 
181 namespace detail_ {
182 
183 template <typename GenericRegion>
184 inline device_t region_helper<GenericRegion>::preferred_location() const
185 {
186  auto device_id = range::detail_::get_scalar_attribute<bool>(*this, CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION);
187  return cuda::device::get(device_id);
188 }
189 
190 template <typename GenericRegion>
191 inline void region_helper<GenericRegion>::set_preferred_location(device_t& device) const
192 {
193  range::detail_::set_attribute(*this,CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION, device.id());
194 }
195 
196 template <typename GenericRange>
197 inline void region_helper<GenericRange>::clear_preferred_location() const
198 {
199  range::detail_::unset_attribute(*this, CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION);
200 }
201 
202 } // namespace detail_
203 
205 {
206  range::detail_::advise(region, CU_MEM_ADVISE_SET_ACCESSED_BY, device.id());
207 }
208 
210 {
211  range::detail_::advise(region, CU_MEM_ADVISE_UNSET_ACCESSED_BY, device.id());
212 }
213 
214 template <typename Allocator>
215 ::std::vector<device_t, Allocator> expected_accessors(const_region_t region, const Allocator& allocator)
216 {
217  auto num_devices = cuda::device::count();
218  ::std::vector<device_t, Allocator> devices(num_devices, allocator);
219  auto device_ids = reinterpret_cast<cuda::device::id_t *>(devices.data());
220 
221  auto status = cuMemRangeGetAttribute(
222  device_ids, sizeof(device_t) * devices.size(),
223  CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY, device::address(region.start()), region.size());
224  throw_if_error_lazy(status, "Obtaining the IDs of devices with access to the managed memory range at "
225  + cuda::detail_::ptr_as_hex(region.start()));
226  auto first_invalid_element = ::std::lower_bound(device_ids, device_ids + num_devices, cudaInvalidDeviceId);
227  // We may have gotten less results that the set of all devices, so let's whittle that down
228 
229  if (first_invalid_element - device_ids != num_devices) {
230  devices.resize(first_invalid_element - device_ids);
231  }
232 
233  return devices;
234 }
235 
236 inline void prefetch(
237  const_region_t region,
238  const cuda::device_t& destination,
239  const stream_t& stream)
240 {
241  detail_::prefetch(region, destination.id(), stream.handle());
242 }
243 
244 inline void prefetch_to_host(const_region_t region, const stream_t& stream)
245 {
246  detail_::prefetch(region, CU_DEVICE_CPU, stream.handle());
247 }
248 
250  const context_t& context,
251  size_t num_bytes,
252  initial_visibility_t initial_visibility)
253 {
254  return detail_::allocate(context.handle(), num_bytes, initial_visibility);
255 }
256 
258  const device_t& device,
259  size_t num_bytes,
260  initial_visibility_t initial_visibility)
261 {
262  auto pc = device.primary_context();
263  return allocate(pc, num_bytes, initial_visibility);
264 }
265 
266 inline region_t allocate(size_t num_bytes)
267 {
268  auto context_handle = context::current::detail_::get_with_fallback_push();
269  return allocate(context_handle, num_bytes, initial_visibility_t::to_all_devices);
270 }
271 
272 } // namespace managed
273 
274 namespace mapped {
275 
277  const cuda::device_t& device,
278  size_t size_in_bytes,
279  allocation_options options)
280 {
281  auto pc = device.primary_context();
282  return cuda::memory::mapped::detail_::allocate(pc.handle(), size_in_bytes, options);
283 }
284 
285 
287  const cuda::context_t& context,
288  size_t size_in_bytes,
289  allocation_options options)
290 {
291  return cuda::memory::mapped::detail_::allocate(context.handle(), size_in_bytes, options);
292 }
293 
294 } // namespace mapped
295 
296 namespace host {
297 
298 namespace detail_ {
299 
306 inline region_t allocate_in_current_context(
307  size_t size_in_bytes,
308  allocation_options options)
309 {
310  void* allocated = nullptr;
311  auto flags = memory::detail_::make_cuda_host_alloc_flags(options);
312  auto result = cuMemHostAlloc(&allocated, size_in_bytes, flags);
313  if (is_success(result) && allocated == nullptr) {
314  // Can this even happen? hopefully not
315  result = static_cast<status_t>(status::named_t::unknown);
316  }
317  throw_if_error_lazy(result, "Failed allocating " + ::std::to_string(size_in_bytes) + " bytes of host memory");
318  return { allocated, size_in_bytes };
319 }
320 
321 inline region_t allocate(
322  context::handle_t context_handle,
323  size_t size_in_bytes,
324  allocation_options options)
325 {
326  CAW_SET_SCOPE_CONTEXT(context_handle);
327  return allocate_in_current_context(size_in_bytes, options);
328 }
329 
330 } // namespace detail_
331 
341  size_t size_in_bytes,
342  allocation_options options)
343 {
344  static constexpr const bool dont_decrease_pc_refcount_on_destruct { false };
345  context::current::detail_::scoped_existence_ensurer_t context_ensurer{ dont_decrease_pc_refcount_on_destruct };
346  // Note: We allow a PC to leak here, in case no other context existed, so as not to risk
347  // the allocation being invalidated by the only CUDA context getting destroyed when
348  // leaving this function
349  return detail_::allocate_in_current_context(size_in_bytes, options);
350 }
351 
352 } // namespace host
353 
354 namespace pointer {
355 namespace detail_ {
356 
357 template<attribute_t attribute>
358 status_and_attribute_value<attribute> get_attribute_with_status(const void *ptr)
359 {
360  context::current::detail_::scoped_existence_ensurer_t ensure_we_have_some_context;
361  attribute_value_t <attribute> attribute_value;
362  auto status = cuPointerGetAttribute(&attribute_value, attribute, device::address(ptr));
363  return { status, attribute_value };
364 }
365 
366 
367 template<attribute_t attribute>
368 attribute_value_t<attribute> get_attribute(const void *ptr)
369 {
370  auto status_and_attribute_value = get_attribute_with_status<attribute>(ptr);
371  throw_if_error_lazy(status_and_attribute_value.status,
372  "Obtaining attribute " + ::std::to_string(static_cast<int>(attribute))
373  + " for pointer " + cuda::detail_::ptr_as_hex(ptr) );
374  return status_and_attribute_value.value;
375 }
376 
377 // TODO: Consider switching to a span with C++20
378 inline void get_attributes(unsigned num_attributes, pointer::attribute_t* attributes, void** value_ptrs, const void* ptr)
379 {
380  context::current::detail_::scoped_existence_ensurer_t ensure_we_have_some_context;
381  auto status = cuPointerGetAttributes( num_attributes, attributes, value_ptrs, device::address(ptr) );
382  throw_if_error_lazy(status, "Obtaining multiple attributes for pointer " + cuda::detail_::ptr_as_hex(ptr));
383 }
384 
385 } // namespace detail_
386 } // namespace pointer
387 
388 namespace device {
389 
390 template <typename T>
391 inline void typed_set(T* start, const T& value, size_t num_elements, optional_ref<const stream_t> stream)
392 {
393  if (stream) {
394  detail_::set(start, value, num_elements, stream->handle());
395  }
396  context::current::detail_::scoped_existence_ensurer_t ensure_some_context{};
397  static_assert(::std::is_trivially_copyable<T>::value, "Non-trivially-copyable types cannot be used for setting memory");
398  static_assert(sizeof(T) == 1 or sizeof(T) == 2 or sizeof(T) == 4,
399  "Unsupported type size - only sizes 1, 2 and 4 are supported");
400  // TODO: Consider checking for alignment when compiling without NDEBUG
401  status_t result {CUDA_SUCCESS};
402  switch(sizeof(T)) {
403  case 1: result = stream ?
404  cuMemsetD8Async (address(start), reinterpret_cast<const ::std::uint8_t& >(value), num_elements, stream->handle()) :
405  cuMemsetD8 (address(start), reinterpret_cast<const ::std::uint8_t& >(value), num_elements); break;
406  case 2: result = stream ?
407  cuMemsetD16Async(address(start), reinterpret_cast<const ::std::uint16_t&>(value), num_elements, stream->handle()) :
408  cuMemsetD16 (address(start), reinterpret_cast<const ::std::uint16_t&>(value), num_elements); break;
409  case 4: result = stream ?
410  cuMemsetD32Async(address(start), reinterpret_cast<const ::std::uint32_t&>(value), num_elements, stream->handle()) :
411  cuMemsetD32 (address(start), reinterpret_cast<const ::std::uint32_t&>(value), num_elements); break;
412  }
413  throw_if_error_lazy(result, "Setting global device memory bytes");
414 }
415 
416 } // namespace device
417 
418 inline void set(void* ptr, int byte_value, size_t num_bytes, optional_ref<const stream_t> stream)
419 {
420  switch ( type_of(ptr) ) {
421  case device_:
422 // case managed_:
423  case unified_:
424  memory::device::set(ptr, byte_value, num_bytes, stream); break;
425 // case unregistered_:
426  case host_:
427  if (stream) {
428  throw ::std::invalid_argument("Asynchronous host-memory set's not currently supported");
429  } else { ::std::memset(ptr, byte_value, num_bytes); }
430  break;
431  default:
432  throw runtime_error(
433  cuda::status::invalid_value,
434  "CUDA returned an invalid memory type for the pointer 0x" + cuda::detail_::ptr_as_hex(ptr));
435  }
436 }
437 
438 #if CUDA_VERSION >= 11020
439 namespace pool {
440 
441 template<shared_handle_kind_t SharedHandleKind>
442 pool_t create(const cuda::device_t& device)
443 {
444  return detail_::create<SharedHandleKind>(device.id());
445 }
446 
447 
448 inline region_t allocate(const pool_t& pool, const stream_t &stream, size_t num_bytes)
449 {
450  CUdeviceptr dptr;
451  auto status = cuMemAllocFromPoolAsync(&dptr, num_bytes, pool.handle(), stream.handle());
452  throw_if_error_lazy(status, "Failed scheduling an allocation of " + ::std::to_string(num_bytes)
453  + " bytes of memory from " + detail_::identify(pool) + ", on " + stream::detail_::identify(stream));
454  return {as_pointer(dptr), num_bytes };
455 }
456 
457 namespace ipc {
458 
459 template <shared_handle_kind_t Kind>
460 shared_handle_t<Kind> export_(const pool_t& pool)
461 {
462  shared_handle_t<Kind> result;
463  static constexpr const unsigned long long flags { 0 };
464  auto status = cuMemPoolExportToShareableHandle(&result, pool.handle(), static_cast<CUmemAllocationHandleType>(Kind), flags);
465  throw_if_error_lazy(status, "Exporting " + pool::detail_::identify(pool) +" for inter-process use");
466  return result;
467 }
468 
469 template <shared_handle_kind_t Kind>
470 pool_t import(const device_t& device, const shared_handle_t<Kind>& shared_pool_handle)
471 {
472  auto handle = detail_::import<Kind>(shared_pool_handle);
473  // TODO: MUST SUPPORT SAYING THIS POOL CAN'T ALLOCATE - NOT AN EXTRA FLAG IN THE POOL CLASS
474  return memory::pool::wrap(device.id(), handle, do_not_take_ownership);
475 }
476 
477 } // namespace ipc
478 
479 
480 } // namespace pool
481 
482 inline region_t pool_t::allocate(const stream_t& stream, size_t num_bytes) const
483 {
484  return pool::allocate(*this, stream, num_bytes);
485 }
486 
487 inline cuda::device_t pool_t::device() const noexcept
488 {
489  return cuda::device::wrap(device_id_);
490 }
491 
492 inline pool::ipc::imported_ptr_t pool_t::import(const memory::pool::ipc::ptr_handle_t& exported_handle) const
493 {
494  return pool::ipc::import_ptr(*this, exported_handle);
495 }
496 
497 inline permissions_t get_permissions(const cuda::device_t& device, const pool_t& pool)
498 {
499  return cuda::memory::detail_::get_permissions(device.id(), pool.handle());
500 }
501 
502 inline void set_permissions(const cuda::device_t& device, const pool_t& pool, permissions_t permissions)
503 {
504  if (pool.device_id() == device.id()) {
505  throw ::std::invalid_argument("Cannot change the access get_permissions to a pool of the device "
506  "on which the pool's memory is allocated (" + cuda::device::detail_::identify(device.id()) + ')');
507  }
508  cuda::memory::detail_::set_permissions(device.id(), pool.handle(), permissions);
509 }
510 
511 template <typename DeviceRange>
512 void set_permissions(DeviceRange devices, const pool_t& pool, permissions_t permissions)
513 {
514  // Not depending on unique_span here :-(
515  auto device_ids = ::std::unique_ptr<cuda::device::id_t[]>(new cuda::device::id_t[devices.size()]);
516  auto device_to_id = [](device_t const& device){ return device.id(); };
517  ::std::transform(::std::begin(devices), ::std::end(devices), device_ids.get(), device_to_id);
518  cuda::memory::detail_::set_permissions( { device_ids.get(), devices.size() }, pool.handle(), permissions);
519 }
520 #endif // #if CUDA_VERSION >= 11020
521 
522 } // namespace memory
523 
524 #if CUDA_VERSION >= 11020
525 
526 template <memory::pool::shared_handle_kind_t Kind>
527 memory::pool_t device_t::create_memory_pool() const
528 {
529  return cuda::memory::pool::detail_::create<Kind>(id_);
530 }
531 
532 inline memory::region_t stream_t::enqueue_t::allocate(const memory::pool_t& pool, size_t num_bytes) const
533 {
534  return memory::pool::allocate(pool, associated_stream, num_bytes);
535 }
536 
537 inline memory::pool_t device_t::default_memory_pool() const
538 {
539  memory::pool::handle_t handle;
540  auto status = cuDeviceGetDefaultMemPool(&handle, id_);
541  throw_if_error_lazy(status, "Failed obtaining the default memory pool for " + device::detail_::identify(id_));
542  return memory::pool::wrap(id_, handle, do_not_take_ownership);
543 }
544 
545 #endif // CUDA_VERSION >= 11020
546 } // namespace cuda
547 
548 #endif // MULTI_WRAPPER_IMPLS_MEMORY_HPP_
549 
Proxy class for a CUDA stream.
Definition: stream.hpp:258
void prefetch_to_host(const_region_t region, const stream_t &stream)
Prefetches a region of managed memory into host memory.
Definition: memory.hpp:244
stream::handle_t handle() const noexcept
The raw CUDA handle for a stream which this class wraps.
Definition: stream.hpp:269
::std::vector< device_t, Allocator > expected_accessors(const_region_t region, const Allocator &allocator=Allocator())
Definition: memory.hpp:215
Wrapper class for a CUDA context.
Definition: context.hpp:249
Definitions and functionality wrapping CUDA APIs.
Definition: array.hpp:22
device::id_t count()
Get the number of CUDA devices usable on the system (with the current CUDA library and kernel driver)...
Definition: miscellany.hpp:63
detail_::region_helper< memory::region_t > region_t
A child class of the generic region_t with some managed-memory-specific functionality.
Definition: memory.hpp:1974
CUcontext handle_t
Raw CUDA driver handle for a context; see {context_t}.
Definition: types.hpp:880
region_t allocate(const context_t &context, size_t size_in_bytes)
Allocate device-side memory on a CUDA device context.
Definition: memory.hpp:102
Owning wrapper for CUDA 2D and 3D arrays.
Definition: array.hpp:29
void typed_set(T *start, const T &value, size_t num_elements, optional_ref< const stream_t > stream={})
Sets consecutive elements of a region of memory to a fixed value of some width.
Definition: memory.hpp:391
Implementations of inter-processing-communications related functions and classes requiring the defini...
CUdevice id_t
Numeric ID of a CUDA device used by the CUDA Runtime API.
Definition: types.hpp:852
void advise_expected_access_by(const_region_t region, device_t &device)
Advice the CUDA driver that device is expected to access region.
Definition: memory.hpp:204
device::id_t id() const noexcept
Return the proxied device&#39;s ID.
Definition: device.hpp:594
memory::type_t type_of(const void *ptr)
Determine the type of memory at a given address vis-a-vis the CUDA ecosystem: Was it allocated by the...
Definition: pointer.hpp:112
void free(void *ptr)
Free a region of device-side memory (regardless of how it was allocated)
Definition: memory.hpp:126
context_t context_of(const void *ptr)
Obtain (a non-owning wrapper for) the CUDA context with which a memory address is associated (e...
Definition: pointer.hpp:50
void start()
Start CUDA profiling for the current process.
Definition: profiling.hpp:229
CUpointer_attribute attribute_t
Raw CUDA driver choice type for attributes of pointers.
Definition: types.hpp:664
Implementations requiring the definitions of multiple CUDA entity proxy classes, and which regard con...
void copy(span< T > destination, c_array< const T, N > const &source, optional_ref< const stream_t > stream={})
Copy the contents of a C-style array into a span of same-type elements.
Definition: memory.hpp:627
options accepted by CUDA&#39;s allocator of memory with a host-side aspect (host-only or managed memory)...
Definition: memory.hpp:93
A (base?) class for exceptions raised by CUDA code; these errors are thrown by essentially all CUDA R...
Definition: error.hpp:282
A pair of memory regions, one in system (=host) memory and one on a CUDA device&#39;s memory - mapped to ...
Definition: memory.hpp:160
device::primary_context_t primary_context(bool hold_pc_refcount_unit=false) const
Produce a proxy for the device&#39;s primary context - the one used by runtime API calls.
Definition: device.hpp:152
void set(void *start, int byte_value, size_t num_bytes, optional_ref< const stream_t > stream={})
Sets all bytes in a region of memory to a fixed value.
Definition: memory.hpp:387
void set(region_t region, int byte_value)
Definition: memory.hpp:1822
device_t get(id_t id)
Returns a proxy for the CUDA device with a given id.
Definition: device.hpp:832
#define throw_if_error_lazy(status__,...)
A macro for only throwing an error if we&#39;ve failed - which also ensures no string is constructed unle...
Definition: error.hpp:327
CUarray handle_t
Raw CUDA driver handle for arrays (of any dimension)
Definition: array.hpp:34
region_pair_t allocate(const cuda::device_t &device, size_t size_in_bytes, allocation_options options=allocation_options{})
Allocate a memory region on the host, which is also mapped to a memory region in the global memory of...
Definition: memory.hpp:276
array_t< T, NumDimensions > wrap(device::id_t device_id, context::handle_t context_handle, handle_t handle, dimensions_t< NumDimensions > dimensions) noexcept
Wrap an existing CUDA array in an array_t instance.
Definition: array.hpp:271
A builder-ish subclass template around the basic 2D or 3D copy parameters which CUDA&#39;s complex copyin...
Definition: copy_parameters.hpp:68
detail_::region_helper< memory::const_region_t > const_region_t
A child class of the generic const_region_t with some managed-memory-specific functionality.
Definition: memory.hpp:1976
address_t address(const void *device_ptr) noexcept
Definition: types.hpp:684
CUstream handle_t
The CUDA driver&#39;s raw handle for streams.
Definition: types.hpp:236
void * as_pointer(device::address_t address) noexcept
Definition: types.hpp:702
void advise_no_access_expected_by(const_region_t region, device_t &device)
Advice the CUDA driver that device is not expected to access region.
Definition: memory.hpp:209
device_t wrap(id_t id) NOEXCEPT_IF_NDEBUG
Returns a wrapper for the CUDA device with a given id.
Definition: device.hpp:820
void copy_single(T *destination, const T *source, optional_ref< const stream_t > stream={})
Synchronously copies a single (typed) value between two memory locations.
Definition: memory.hpp:71
void prefetch(const_region_t region, const cuda::device_t &destination, const stream_t &stream)
Prefetches a region of managed memory to a specific device, so it can later be used there without wai...
Definition: memory.hpp:236
detail_::all_devices devices()
Definition: devices.hpp:224
Wrapper class for a CUDA device.
Definition: device.hpp:135
initial_visibility_t
The choices of which categories CUDA devices must a managed memory region be visible to...
Definition: types.hpp:755
constexpr bool is_success(status_t status)
Determine whether the API call returning the specified status had succeeded.
Definition: error.hpp:214
CUresult status_t
Indicates either the result (success or error index) of a CUDA Runtime or Driver API call...
Definition: types.hpp:74