aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorAndrzej Janik <[email protected]>2024-12-10 21:48:10 +0100
committerGitHub <[email protected]>2024-12-10 21:48:10 +0100
commit7ac67a89e9ac08d743242627cacefda518cefd68 (patch)
tree5fdb6c1519256268ef1b72a83728fb72b813c78c
parent7a6df9dcbf59edef371e7f63c16c64916ddb0c0b (diff)
downloadZLUDA-7ac67a89e9ac08d743242627cacefda518cefd68.tar.gz
ZLUDA-7ac67a89e9ac08d743242627cacefda518cefd68.zip
Enable Geekbench 5 (#304)
-rw-r--r--.devcontainer/Dockerfile1
-rw-r--r--comgr/src/lib.rs20
-rw-r--r--cuda_base/Cargo.toml2
-rw-r--r--cuda_base/src/cuda.rs3671
-rw-r--r--cuda_base/src/lib.rs13
-rw-r--r--cuda_base/src/nvml.rs7857
-rw-r--r--cuda_types/src/cuda.rs8110
-rw-r--r--cuda_types/src/lib.rs8112
-rw-r--r--cuda_types/src/nvml.rs4185
-rw-r--r--ptx/lib/zluda_ptx_impl.bcbin4816 -> 7524 bytes
-rw-r--r--ptx/lib/zluda_ptx_impl.cpp28
-rw-r--r--ptx/src/pass/emit_llvm.rs80
-rw-r--r--ptx/src/pass/insert_explicit_load_store.rs42
-rw-r--r--ptx/src/pass/mod.rs4
-rw-r--r--ptx/src/pass/replace_instructions_with_function_calls.rs3
-rw-r--r--ptx/src/pass/replace_known_functions.rs38
-rw-r--r--ptx/src/test/spirv_run/mod.rs6
-rw-r--r--zluda/src/impl/context.rs2
-rw-r--r--zluda/src/impl/device.rs123
-rw-r--r--zluda/src/impl/driver.rs4
-rw-r--r--zluda/src/impl/memory.rs9
-rw-r--r--zluda/src/impl/mod.rs7
-rw-r--r--zluda/src/impl/module.rs2
-rw-r--r--zluda/src/impl/pointer.rs2
-rw-r--r--zluda/src/lib.rs8
-rw-r--r--zluda_bindgen/src/main.rs129
-rw-r--r--zluda_dump/src/dark_api.rs2
-rw-r--r--zluda_dump/src/format.rs158
-rw-r--r--zluda_dump/src/format_generated.rs4444
-rw-r--r--zluda_dump/src/lib.rs6
-rw-r--r--zluda_dump/src/log.rs5
-rw-r--r--zluda_dump/src/os_unix.rs2
-rw-r--r--zluda_dump/src/side_by_side.rs2
-rw-r--r--zluda_dump/src/trace.rs2
-rw-r--r--zluda_inject/Cargo.toml2
-rw-r--r--zluda_inject/build.rs3
-rw-r--r--zluda_inject/tests/helpers/do_cuinit_early.rs2
-rw-r--r--zluda_ml/Cargo.toml4
-rw-r--r--zluda_ml/README3
-rw-r--r--zluda_ml/src/impl.rs35
-rw-r--r--zluda_ml/src/lib.rs37
-rw-r--r--zluda_ml/src/nvml.rs3171
42 files changed, 24893 insertions, 15443 deletions
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index 99fb890..ca8cc77 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -27,6 +27,7 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get update -y && DEBIAN_FRONTEND=noninter
libgl-dev libegl-dev libvdpau-dev \
nvidia-utils-${CUDA_DRIVER} \
cuda-cudart-dev-${CUDA_PKG_VERSION} \
+ cuda-nvml-dev-${CUDA_PKG_VERSION} \
cuda-cudart-${CUDA_PKG_VERSION} \
cuda-profiler-api-${CUDA_PKG_VERSION} \
cuda-nvcc-${CUDA_PKG_VERSION}
diff --git a/comgr/src/lib.rs b/comgr/src/lib.rs
index 94ba6ef..0ff838b 100644
--- a/comgr/src/lib.rs
+++ b/comgr/src/lib.rs
@@ -1,5 +1,5 @@
use amd_comgr_sys::*;
-use std::{ffi::CStr, iter, mem, ptr};
+use std::{ffi::CStr, mem, ptr};
struct Data(amd_comgr_data_t);
@@ -137,7 +137,8 @@ pub fn compile_bitcode(
link_with_device_libs_info.set_isa_name(gcn_arch)?;
link_with_device_libs_info.set_language(amd_comgr_language_t::AMD_COMGR_LANGUAGE_LLVM_IR)?;
// This makes no sense, but it makes ockl linking work
- link_with_device_libs_info.set_options([c"-Xclang", c"-mno-link-builtin-bitcode-postopt"].into_iter())?;
+ link_with_device_libs_info
+ .set_options([c"-Xclang", c"-mno-link-builtin-bitcode-postopt"].into_iter())?;
let with_device_libs = do_action(
&linked_data_set,
&link_with_device_libs_info,
@@ -145,7 +146,20 @@ pub fn compile_bitcode(
)?;
let compile_action_info = ActionInfo::new()?;
compile_action_info.set_isa_name(gcn_arch)?;
- compile_action_info.set_options(iter::once(c"-O3"))?;
+ let common_options = [c"-O3", c"-mno-wavefrontsize64", c"-mcumode"].into_iter();
+ let opt_options = if cfg!(debug_assertions) {
+ [c"-g", c"", c"", c"", c""]
+ } else {
+ [
+ c"-g0",
+ // default inlining threshold times 10
+ c"-mllvm",
+ c"-inline-threshold=2250",
+ c"-mllvm",
+ c"-inlinehint-threshold=3250",
+ ]
+ };
+ compile_action_info.set_options(common_options.chain(opt_options))?;
let reloc_data_set = do_action(
&with_device_libs,
&compile_action_info,
diff --git a/cuda_base/Cargo.toml b/cuda_base/Cargo.toml
index 9c9d531..b2bbdaa 100644
--- a/cuda_base/Cargo.toml
+++ b/cuda_base/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
quote = "1.0"
-syn = { version = "2.0", features = ["full", "visit-mut"] }
+syn = { version = "2.0", features = ["full", "visit-mut", "extra-traits"] }
proc-macro2 = "1.0"
rustc-hash = "1.1.0"
diff --git a/cuda_base/src/cuda.rs b/cuda_base/src/cuda.rs
index 2cc5a56..37aadf1 100644
--- a/cuda_base/src/cuda.rs
+++ b/cuda_base/src/cuda.rs
@@ -20,9 +20,9 @@ extern "system" {
::CUresult,
::cudaGetErrorString*/
fn cuGetErrorString(
- error: cuda_types::CUresult,
+ error: cuda_types::cuda::CUresult,
pStr: *mut *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the string representation of an error code enum name
Sets \p *pStr to the address of a NULL-terminated string representation
@@ -41,9 +41,9 @@ extern "system" {
::CUresult,
::cudaGetErrorName*/
fn cuGetErrorName(
- error: cuda_types::CUresult,
+ error: cuda_types::cuda::CUresult,
pStr: *mut *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initialize the CUDA driver API
Initializes the driver API and must be called before any other function from
the driver API in the current process. Currently, the \p Flags parameter must be 0. If ::cuInit()
@@ -59,7 +59,7 @@ extern "system" {
::CUDA_ERROR_SYSTEM_DRIVER_MISMATCH,
::CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE
\notefnerr*/
- fn cuInit(Flags: ::core::ffi::c_uint) -> cuda_types::CUresult;
+ fn cuInit(Flags: ::core::ffi::c_uint) -> cuda_types::cuda::CUresult;
/** \brief Returns the latest CUDA version supported by driver
Returns in \p *driverVersion the version of CUDA supported by
@@ -82,7 +82,7 @@ extern "system" {
::cudaRuntimeGetVersion*/
fn cuDriverGetVersion(
driverVersion: *mut ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a handle to a compute device
Returns in \p *device a device handle given an ordinal in the range <b>[0,
@@ -109,9 +109,9 @@ extern "system" {
::cuDeviceTotalMem,
::cuDeviceGetExecAffinitySupport*/
fn cuDeviceGet(
- device: *mut cuda_types::CUdevice,
+ device: *mut cuda_types::cuda::CUdevice,
ordinal: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the number of compute-capable devices
Returns in \p *count the number of devices with compute capability greater
@@ -137,7 +137,7 @@ extern "system" {
::cuDeviceTotalMem,
::cuDeviceGetExecAffinitySupport,
::cudaGetDeviceCount*/
- fn cuDeviceGetCount(count: *mut ::core::ffi::c_int) -> cuda_types::CUresult;
+ fn cuDeviceGetCount(count: *mut ::core::ffi::c_int) -> cuda_types::cuda::CUresult;
/** \brief Returns an identifier string for the device
Returns an ASCII string identifying the device \p dev in the NULL-terminated
@@ -169,8 +169,8 @@ extern "system" {
fn cuDeviceGetName(
name: *mut ::core::ffi::c_char,
len: ::core::ffi::c_int,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Return an UUID for the device
Note there is a later version of this API, ::cuDeviceGetUuid_v2. It will
@@ -201,9 +201,9 @@ extern "system" {
::cuDeviceGetExecAffinitySupport,
::cudaGetDeviceProperties*/
fn cuDeviceGetUuid(
- uuid: *mut cuda_types::CUuuid,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ uuid: *mut cuda_types::cuda::CUuuid,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Return an UUID for the device (11.4+)
Returns 16-octets identifying the device \p dev in the structure
@@ -230,9 +230,9 @@ extern "system" {
::cuDeviceTotalMem,
::cudaGetDeviceProperties*/
fn cuDeviceGetUuid_v2(
- uuid: *mut cuda_types::CUuuid,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ uuid: *mut cuda_types::cuda::CUuuid,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Return an LUID and device node mask for the device
Return identifying information (\p luid and \p deviceNodeMask) to allow
@@ -261,8 +261,8 @@ extern "system" {
fn cuDeviceGetLuid(
luid: *mut ::core::ffi::c_char,
deviceNodeMask: *mut ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the total amount of memory on the device
Returns in \p *bytes the total amount of memory available on the device
@@ -290,8 +290,8 @@ extern "system" {
::cudaMemGetInfo*/
fn cuDeviceTotalMem_v2(
bytes: *mut usize,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the maximum number of elements allocatable in a 1D linear texture for a given texture element size.
Returns in \p maxWidthInElements the maximum number of texture elements allocatable in a 1D linear texture
@@ -321,10 +321,10 @@ extern "system" {
::cuDeviceTotalMem*/
fn cuDeviceGetTexture1DLinearMaxWidth(
maxWidthInElements: *mut usize,
- format: cuda_types::CUarray_format,
+ format: cuda_types::cuda::CUarray_format,
numChannels: ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns information about the device
Returns in \p *pi the integer value of the attribute \p attrib on device
@@ -546,9 +546,9 @@ extern "system" {
::cudaGetDeviceProperties*/
fn cuDeviceGetAttribute(
pi: *mut ::core::ffi::c_int,
- attrib: cuda_types::CUdevice_attribute,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ attrib: cuda_types::cuda::CUdevice_attribute,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Return NvSciSync attributes that this device can support.
Returns in \p nvSciSyncAttrList, the properties of NvSciSync that
@@ -610,9 +610,9 @@ extern "system" {
::cuWaitExternalSemaphoresAsync*/
fn cuDeviceGetNvSciSyncAttributes(
nvSciSyncAttrList: *mut ::core::ffi::c_void,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
flags: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the current memory pool of a device
The memory pool must be local to the specified device.
@@ -628,9 +628,9 @@ extern "system" {
\sa ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, ::cuMemPoolDestroy, ::cuMemAllocFromPoolAsync*/
fn cuDeviceSetMemPool(
- dev: cuda_types::CUdevice,
- pool: cuda_types::CUmemoryPool,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ pool: cuda_types::cuda::CUmemoryPool,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the current mempool for a device
Returns the last pool provided to ::cuDeviceSetMemPool for this device
@@ -644,9 +644,9 @@ extern "system" {
\sa ::cuDeviceGetDefaultMemPool, ::cuMemPoolCreate, ::cuDeviceSetMemPool*/
fn cuDeviceGetMemPool(
- pool: *mut cuda_types::CUmemoryPool,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ pool: *mut cuda_types::cuda::CUmemoryPool,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the default mempool of a device
The default mempool of a device contains device memory from that device.
@@ -662,9 +662,9 @@ extern "system" {
\sa ::cuMemAllocAsync, ::cuMemPoolTrimTo, ::cuMemPoolGetAttribute, ::cuMemPoolSetAttribute, cuMemPoolSetAccess, ::cuDeviceGetMemPool, ::cuMemPoolCreate*/
fn cuDeviceGetDefaultMemPool(
- pool_out: *mut cuda_types::CUmemoryPool,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ pool_out: *mut cuda_types::cuda::CUmemoryPool,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns information about the execution affinity support of the device.
Returns in \p *pi whether execution affinity type \p type is supported by device \p dev.
@@ -694,9 +694,9 @@ extern "system" {
::cuDeviceTotalMem*/
fn cuDeviceGetExecAffinitySupport(
pi: *mut ::core::ffi::c_int,
- type_: cuda_types::CUexecAffinityType,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ type_: cuda_types::cuda::CUexecAffinityType,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Blocks until remote writes are visible to the specified scope
Blocks until GPUDirect RDMA writes to the target context via mappings
@@ -725,9 +725,9 @@ extern "system" {
\notefnerr
*/
fn cuFlushGPUDirectRDMAWrites(
- target: cuda_types::CUflushGPUDirectRDMAWritesTarget,
- scope: cuda_types::CUflushGPUDirectRDMAWritesScope,
- ) -> cuda_types::CUresult;
+ target: cuda_types::cuda::CUflushGPUDirectRDMAWritesTarget,
+ scope: cuda_types::cuda::CUflushGPUDirectRDMAWritesScope,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns properties for a selected device
\deprecated
@@ -789,9 +789,9 @@ int textureAlign
::cuDeviceGet,
::cuDeviceTotalMem*/
fn cuDeviceGetProperties(
- prop: *mut cuda_types::CUdevprop,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ prop: *mut cuda_types::cuda::CUdevprop,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the compute capability of the device
\deprecated
@@ -825,8 +825,8 @@ int textureAlign
fn cuDeviceComputeCapability(
major: *mut ::core::ffi::c_int,
minor: *mut ::core::ffi::c_int,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Retain the primary context on the GPU
Retains the primary context on the device.
@@ -874,9 +874,9 @@ int textureAlign
::cuCtxSetLimit,
::cuCtxSynchronize*/
fn cuDevicePrimaryCtxRetain(
- pctx: *mut cuda_types::CUcontext,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ pctx: *mut cuda_types::cuda::CUcontext,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Release the primary context on the GPU
Releases the primary context interop on the device.
@@ -914,7 +914,9 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSetLimit,
::cuCtxSynchronize*/
- fn cuDevicePrimaryCtxRelease_v2(dev: cuda_types::CUdevice) -> cuda_types::CUresult;
+ fn cuDevicePrimaryCtxRelease_v2(
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Set flags for the primary context
Sets the flags for the primary context on the device overwriting perviously
@@ -1008,9 +1010,9 @@ int textureAlign
::cuCtxSetFlags,
::cudaSetDeviceFlags*/
fn cuDevicePrimaryCtxSetFlags_v2(
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get the state of the primary context
Returns in \p *flags the flags for the primary context of \p dev, and in
@@ -1035,10 +1037,10 @@ int textureAlign
::cuCtxSetFlags,
::cudaGetDeviceFlags*/
fn cuDevicePrimaryCtxGetState(
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
flags: *mut ::core::ffi::c_uint,
active: *mut ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroy all allocations and reset all state on the primary context
Explicitly destroys and cleans up all resources associated with the current
@@ -1075,7 +1077,9 @@ int textureAlign
::cuCtxSetLimit,
::cuCtxSynchronize,
::cudaDeviceReset*/
- fn cuDevicePrimaryCtxReset_v2(dev: cuda_types::CUdevice) -> cuda_types::CUresult;
+ fn cuDevicePrimaryCtxReset_v2(
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a CUDA context
\note In most cases it is recommended to use ::cuDevicePrimaryCtxRetain.
@@ -1201,10 +1205,10 @@ int textureAlign
::cuCoredumpSetAttribute,
::cuCtxSynchronize*/
fn cuCtxCreate_v2(
- pctx: *mut cuda_types::CUcontext,
+ pctx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a CUDA context with execution affinity
Creates a new CUDA context with execution affinity and associates it with
@@ -1338,12 +1342,12 @@ int textureAlign
::cuCoredumpSetAttribute,
::CUexecAffinityParam*/
fn cuCtxCreate_v3(
- pctx: *mut cuda_types::CUcontext,
- paramsArray: *mut cuda_types::CUexecAffinityParam,
+ pctx: *mut cuda_types::cuda::CUcontext,
+ paramsArray: *mut cuda_types::cuda::CUexecAffinityParam,
numParams: ::core::ffi::c_int,
flags: ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroy a CUDA context
Destroys the CUDA context specified by \p ctx. The context \p ctx will be
@@ -1385,7 +1389,7 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSetLimit,
::cuCtxSynchronize*/
- fn cuCtxDestroy_v2(ctx: cuda_types::CUcontext) -> cuda_types::CUresult;
+ fn cuCtxDestroy_v2(ctx: cuda_types::cuda::CUcontext) -> cuda_types::cuda::CUresult;
/** \brief Pushes a context on the current CPU thread
Pushes the given context \p ctx onto the CPU thread's stack of current
@@ -1416,7 +1420,9 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSetLimit,
::cuCtxSynchronize*/
- fn cuCtxPushCurrent_v2(ctx: cuda_types::CUcontext) -> cuda_types::CUresult;
+ fn cuCtxPushCurrent_v2(
+ ctx: cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Pops the current CUDA context from the current CPU thread.
Pops the current CUDA context from the CPU thread and passes back the
@@ -1447,7 +1453,9 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSetLimit,
::cuCtxSynchronize*/
- fn cuCtxPopCurrent_v2(pctx: *mut cuda_types::CUcontext) -> cuda_types::CUresult;
+ fn cuCtxPopCurrent_v2(
+ pctx: *mut cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Binds the specified CUDA context to the calling CPU thread
Binds the specified CUDA context to the calling CPU thread.
@@ -1474,7 +1482,7 @@ int textureAlign
::cuCtxCreate,
::cuCtxDestroy,
::cudaSetDevice*/
- fn cuCtxSetCurrent(ctx: cuda_types::CUcontext) -> cuda_types::CUresult;
+ fn cuCtxSetCurrent(ctx: cuda_types::cuda::CUcontext) -> cuda_types::cuda::CUresult;
/** \brief Returns the CUDA context bound to the calling CPU thread.
Returns in \p *pctx the CUDA context bound to the calling CPU thread.
@@ -1494,7 +1502,9 @@ int textureAlign
::cuCtxCreate,
::cuCtxDestroy,
::cudaGetDevice*/
- fn cuCtxGetCurrent(pctx: *mut cuda_types::CUcontext) -> cuda_types::CUresult;
+ fn cuCtxGetCurrent(
+ pctx: *mut cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the device ID for the current context
Returns in \p *device the ordinal of the current context's device.
@@ -1521,7 +1531,9 @@ int textureAlign
::cuCtxSetLimit,
::cuCtxSynchronize,
::cudaGetDevice*/
- fn cuCtxGetDevice(device: *mut cuda_types::CUdevice) -> cuda_types::CUresult;
+ fn cuCtxGetDevice(
+ device: *mut cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the flags for the current context
Returns in \p *flags the flags of the current context. See ::cuCtxCreate
@@ -1547,7 +1559,7 @@ int textureAlign
::cuCtxGetStreamPriorityRange,
::cuCtxSetFlags,
::cudaGetDeviceFlags*/
- fn cuCtxGetFlags(flags: *mut ::core::ffi::c_uint) -> cuda_types::CUresult;
+ fn cuCtxGetFlags(flags: *mut ::core::ffi::c_uint) -> cuda_types::cuda::CUresult;
/** \brief Sets the flags for the current context
Sets the flags for the current context overwriting previously set ones. See
@@ -1574,7 +1586,7 @@ int textureAlign
::cuCtxGetFlags,
::cudaGetDeviceFlags,
::cuDevicePrimaryCtxSetFlags,*/
- fn cuCtxSetFlags(flags: ::core::ffi::c_uint) -> cuda_types::CUresult;
+ fn cuCtxSetFlags(flags: ::core::ffi::c_uint) -> cuda_types::cuda::CUresult;
/** \brief Returns the unique Id associated with the context supplied
Returns in \p ctxId the unique Id which is associated with a given context.
@@ -1603,9 +1615,9 @@ int textureAlign
::cuCtxGetLimit,
::cuCtxPushCurrent*/
fn cuCtxGetId(
- ctx: cuda_types::CUcontext,
+ ctx: cuda_types::cuda::CUcontext,
ctxId: *mut ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Block for a context's tasks to complete
Blocks until the device has completed all preceding requested tasks.
@@ -1632,7 +1644,7 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSetLimit,
::cudaDeviceSynchronize*/
- fn cuCtxSynchronize() -> cuda_types::CUresult;
+ fn cuCtxSynchronize() -> cuda_types::cuda::CUresult;
/** \brief Set resource limits
Setting \p limit to \p value is a request by the application to update
@@ -1728,7 +1740,10 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSynchronize,
::cudaDeviceSetLimit*/
- fn cuCtxSetLimit(limit: cuda_types::CUlimit, value: usize) -> cuda_types::CUresult;
+ fn cuCtxSetLimit(
+ limit: cuda_types::cuda::CUlimit,
+ value: usize,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns resource limits
Returns in \p *pvalue the current size of \p limit. The supported
@@ -1769,8 +1784,8 @@ int textureAlign
::cudaDeviceGetLimit*/
fn cuCtxGetLimit(
pvalue: *mut usize,
- limit: cuda_types::CUlimit,
- ) -> cuda_types::CUresult;
+ limit: cuda_types::cuda::CUlimit,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the preferred cache configuration for the current context.
On devices where the L1 cache and shared memory use the same hardware
@@ -1812,8 +1827,8 @@ int textureAlign
::cuFuncSetCacheConfig,
::cudaDeviceGetCacheConfig*/
fn cuCtxGetCacheConfig(
- pconfig: *mut cuda_types::CUfunc_cache,
- ) -> cuda_types::CUresult;
+ pconfig: *mut cuda_types::cuda::CUfunc_cache,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the preferred cache configuration for the current context.
On devices where the L1 cache and shared memory use the same hardware
@@ -1862,7 +1877,9 @@ int textureAlign
::cuFuncSetCacheConfig,
::cudaDeviceSetCacheConfig,
::cuKernelSetCacheConfig*/
- fn cuCtxSetCacheConfig(config: cuda_types::CUfunc_cache) -> cuda_types::CUresult;
+ fn cuCtxSetCacheConfig(
+ config: cuda_types::cuda::CUfunc_cache,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the context's API version.
Returns a version number in \p version corresponding to the capabilities of
@@ -1898,9 +1915,9 @@ int textureAlign
::cuCtxSetLimit,
::cuCtxSynchronize*/
fn cuCtxGetApiVersion(
- ctx: cuda_types::CUcontext,
+ ctx: cuda_types::cuda::CUcontext,
version: *mut ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns numerical values that correspond to the least and
greatest stream priorities.
@@ -1940,7 +1957,7 @@ int textureAlign
fn cuCtxGetStreamPriorityRange(
leastPriority: *mut ::core::ffi::c_int,
greatestPriority: *mut ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Resets all persisting lines in cache to normal status.
::cuCtxResetPersistingL2Cache Resets all persisting lines in cache to normal
@@ -1953,7 +1970,7 @@ int textureAlign
\sa
::CUaccessPolicyWindow*/
- fn cuCtxResetPersistingL2Cache() -> cuda_types::CUresult;
+ fn cuCtxResetPersistingL2Cache() -> cuda_types::cuda::CUresult;
/** \brief Returns the execution affinity setting for the current context.
Returns in \p *pExecAffinity the current value of \p type. The supported
@@ -1975,9 +1992,9 @@ int textureAlign
\sa
::CUexecAffinityParam*/
fn cuCtxGetExecAffinity(
- pExecAffinity: *mut cuda_types::CUexecAffinityParam,
- type_: cuda_types::CUexecAffinityType,
- ) -> cuda_types::CUresult;
+ pExecAffinity: *mut cuda_types::cuda::CUexecAffinityParam,
+ type_: cuda_types::cuda::CUexecAffinityType,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Increment a context's usage-count
\deprecated
@@ -2016,9 +2033,9 @@ int textureAlign
::cuCtxSetLimit,
::cuCtxSynchronize*/
fn cuCtxAttach(
- pctx: *mut cuda_types::CUcontext,
+ pctx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Decrement a context's usage-count
\deprecated
@@ -2051,7 +2068,7 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSetLimit,
::cuCtxSynchronize*/
- fn cuCtxDetach(ctx: cuda_types::CUcontext) -> cuda_types::CUresult;
+ fn cuCtxDetach(ctx: cuda_types::cuda::CUcontext) -> cuda_types::cuda::CUresult;
/** \brief Returns the current shared memory configuration for the current context.
\deprecated
@@ -2093,8 +2110,8 @@ int textureAlign
::cuFuncSetCacheConfig,
::cudaDeviceGetSharedMemConfig*/
fn cuCtxGetSharedMemConfig(
- pConfig: *mut cuda_types::CUsharedconfig,
- ) -> cuda_types::CUresult;
+ pConfig: *mut cuda_types::cuda::CUsharedconfig,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the shared memory configuration for the current context.
\deprecated
@@ -2147,8 +2164,8 @@ int textureAlign
::cuFuncSetCacheConfig,
::cudaDeviceSetSharedMemConfig*/
fn cuCtxSetSharedMemConfig(
- config: cuda_types::CUsharedconfig,
- ) -> cuda_types::CUresult;
+ config: cuda_types::cuda::CUsharedconfig,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Loads a compute module
Takes a filename \p fname and loads the corresponding module \p module into
@@ -2187,9 +2204,9 @@ int textureAlign
::cuModuleLoadFatBinary,
::cuModuleUnload*/
fn cuModuleLoad(
- module: *mut cuda_types::CUmodule,
+ module: *mut cuda_types::cuda::CUmodule,
fname: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Load a module's data
Takes a pointer \p image and loads the corresponding module \p module into
@@ -2223,9 +2240,9 @@ int textureAlign
::cuModuleLoadFatBinary,
::cuModuleUnload*/
fn cuModuleLoadData(
- module: *mut cuda_types::CUmodule,
+ module: *mut cuda_types::cuda::CUmodule,
image: *const ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Load a module's data with options
Takes a pointer \p image and loads the corresponding module \p module into
@@ -2262,12 +2279,12 @@ int textureAlign
::cuModuleLoadFatBinary,
::cuModuleUnload*/
fn cuModuleLoadDataEx(
- module: *mut cuda_types::CUmodule,
+ module: *mut cuda_types::cuda::CUmodule,
image: *const ::core::ffi::c_void,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Load a module's data
Takes a pointer \p fatCubin and loads the corresponding module \p module
@@ -2308,9 +2325,9 @@ int textureAlign
::cuModuleLoadDataEx,
::cuModuleUnload*/
fn cuModuleLoadFatBinary(
- module: *mut cuda_types::CUmodule,
+ module: *mut cuda_types::cuda::CUmodule,
fatCubin: *const ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unloads a module
Unloads a module \p hmod from the current context. Attempting to unload
@@ -2336,7 +2353,7 @@ int textureAlign
::cuModuleLoadData,
::cuModuleLoadDataEx,
::cuModuleLoadFatBinary*/
- fn cuModuleUnload(hmod: cuda_types::CUmodule) -> cuda_types::CUresult;
+ fn cuModuleUnload(hmod: cuda_types::cuda::CUmodule) -> cuda_types::cuda::CUresult;
/** \brief Query lazy loading mode
Returns lazy loading mode
@@ -2352,8 +2369,8 @@ int textureAlign
\sa
::cuModuleLoad,*/
fn cuModuleGetLoadingMode(
- mode: *mut cuda_types::CUmoduleLoadingMode,
- ) -> cuda_types::CUresult;
+ mode: *mut cuda_types::cuda::CUmoduleLoadingMode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a function handle
Returns in \p *hfunc the handle of the function of name \p name located in
@@ -2381,10 +2398,10 @@ int textureAlign
::cuModuleLoadFatBinary,
::cuModuleUnload*/
fn cuModuleGetFunction(
- hfunc: *mut cuda_types::CUfunction,
- hmod: cuda_types::CUmodule,
+ hfunc: *mut cuda_types::cuda::CUfunction,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the number of functions within a module
Returns in \p count the number of functions in \p mod.
@@ -2398,8 +2415,8 @@ int textureAlign
::CUDA_ERROR_INVALID_VALUE*/
fn cuModuleGetFunctionCount(
count: *mut ::core::ffi::c_uint,
- mod_: cuda_types::CUmodule,
- ) -> cuda_types::CUresult;
+ mod_: cuda_types::cuda::CUmodule,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the function handles within a module.
Returns in \p functions a maximum number of \p numFunctions function handles within \p mod. When
@@ -2424,10 +2441,10 @@ int textureAlign
::cuFuncIsLoaded,
::cuFuncLoad*/
fn cuModuleEnumerateFunctions(
- functions: *mut cuda_types::CUfunction,
+ functions: *mut cuda_types::cuda::CUfunction,
numFunctions: ::core::ffi::c_uint,
- mod_: cuda_types::CUmodule,
- ) -> cuda_types::CUresult;
+ mod_: cuda_types::cuda::CUmodule,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a global pointer from a module
Returns in \p *dptr and \p *bytes the base pointer and size of the
@@ -2460,11 +2477,11 @@ int textureAlign
::cudaGetSymbolAddress,
::cudaGetSymbolSize*/
fn cuModuleGetGlobal_v2(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytes: *mut usize,
- hmod: cuda_types::CUmodule,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a pending JIT linker invocation.
If the call is successful, the caller owns the returned CUlinkState, which
@@ -2505,10 +2522,10 @@ int textureAlign
::cuLinkDestroy*/
fn cuLinkCreate_v2(
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- stateOut: *mut cuda_types::CUlinkState,
- ) -> cuda_types::CUresult;
+ stateOut: *mut cuda_types::cuda::CUlinkState,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Add an input to a pending linker invocation
Ownership of \p data is retained by the caller. No reference is retained to any
@@ -2545,15 +2562,15 @@ int textureAlign
::cuLinkComplete,
::cuLinkDestroy*/
fn cuLinkAddData_v2(
- state: cuda_types::CUlinkState,
- type_: cuda_types::CUjitInputType,
+ state: cuda_types::cuda::CUlinkState,
+ type_: cuda_types::cuda::CUjitInputType,
data: *mut ::core::ffi::c_void,
size: usize,
name: *const ::core::ffi::c_char,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Add a file input to a pending linker invocation
No reference is retained to any inputs after this call returns.
@@ -2591,13 +2608,13 @@ int textureAlign
::cuLinkComplete,
::cuLinkDestroy*/
fn cuLinkAddFile_v2(
- state: cuda_types::CUlinkState,
- type_: cuda_types::CUjitInputType,
+ state: cuda_types::cuda::CUlinkState,
+ type_: cuda_types::cuda::CUjitInputType,
path: *const ::core::ffi::c_char,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Complete a pending linker invocation
Completes the pending linker action and returns the cubin image for the linked
@@ -2620,10 +2637,10 @@ int textureAlign
::cuLinkDestroy,
::cuModuleLoadData*/
fn cuLinkComplete(
- state: cuda_types::CUlinkState,
+ state: cuda_types::cuda::CUlinkState,
cubinOut: *mut *mut ::core::ffi::c_void,
sizeOut: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys state for a JIT linker invocation.
\param state State object for the linker invocation
@@ -2633,7 +2650,7 @@ int textureAlign
::CUDA_ERROR_INVALID_HANDLE
\sa ::cuLinkCreate*/
- fn cuLinkDestroy(state: cuda_types::CUlinkState) -> cuda_types::CUresult;
+ fn cuLinkDestroy(state: cuda_types::cuda::CUlinkState) -> cuda_types::cuda::CUresult;
/** \brief Returns a handle to a texture reference
\deprecated
@@ -2667,10 +2684,10 @@ int textureAlign
::cuModuleLoadFatBinary,
::cuModuleUnload*/
fn cuModuleGetTexRef(
- pTexRef: *mut cuda_types::CUtexref,
- hmod: cuda_types::CUmodule,
+ pTexRef: *mut cuda_types::cuda::CUtexref,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a handle to a surface reference
\deprecated
@@ -2702,10 +2719,10 @@ int textureAlign
::cuModuleLoadFatBinary,
::cuModuleUnload*/
fn cuModuleGetSurfRef(
- pSurfRef: *mut cuda_types::CUsurfref,
- hmod: cuda_types::CUmodule,
+ pSurfRef: *mut cuda_types::cuda::CUsurfref,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Load a library with specified code and options
Takes a pointer \p code and loads the corresponding library \p library based on
@@ -2758,15 +2775,15 @@ int textureAlign
::cuModuleLoadData,
::cuModuleLoadDataEx*/
fn cuLibraryLoadData(
- library: *mut cuda_types::CUlibrary,
+ library: *mut cuda_types::cuda::CUlibrary,
code: *const ::core::ffi::c_void,
- jitOptions: *mut cuda_types::CUjit_option,
+ jitOptions: *mut cuda_types::cuda::CUjit_option,
jitOptionsValues: *mut *mut ::core::ffi::c_void,
numJitOptions: ::core::ffi::c_uint,
- libraryOptions: *mut cuda_types::CUlibraryOption,
+ libraryOptions: *mut cuda_types::cuda::CUlibraryOption,
libraryOptionValues: *mut *mut ::core::ffi::c_void,
numLibraryOptions: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Load a library with specified file and options
Takes a pointer \p code and loads the corresponding library \p library based on
@@ -2819,15 +2836,15 @@ int textureAlign
::cuModuleLoadData,
::cuModuleLoadDataEx*/
fn cuLibraryLoadFromFile(
- library: *mut cuda_types::CUlibrary,
+ library: *mut cuda_types::cuda::CUlibrary,
fileName: *const ::core::ffi::c_char,
- jitOptions: *mut cuda_types::CUjit_option,
+ jitOptions: *mut cuda_types::cuda::CUjit_option,
jitOptionsValues: *mut *mut ::core::ffi::c_void,
numJitOptions: ::core::ffi::c_uint,
- libraryOptions: *mut cuda_types::CUlibraryOption,
+ libraryOptions: *mut cuda_types::cuda::CUlibraryOption,
libraryOptionValues: *mut *mut ::core::ffi::c_void,
numLibraryOptions: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unloads a library
Unloads the library specified with \p library
@@ -2843,7 +2860,9 @@ int textureAlign
\sa ::cuLibraryLoadData,
::cuLibraryLoadFromFile,
::cuModuleUnload*/
- fn cuLibraryUnload(library: cuda_types::CUlibrary) -> cuda_types::CUresult;
+ fn cuLibraryUnload(
+ library: cuda_types::cuda::CUlibrary,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a kernel handle
Returns in \p pKernel the handle of the kernel with name \p name located in library \p library.
@@ -2868,10 +2887,10 @@ int textureAlign
::cuLibraryGetModule,
::cuModuleGetFunction*/
fn cuLibraryGetKernel(
- pKernel: *mut cuda_types::CUkernel,
- library: cuda_types::CUlibrary,
+ pKernel: *mut cuda_types::cuda::CUkernel,
+ library: cuda_types::cuda::CUlibrary,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the number of kernels within a library
Returns in \p count the number of kernels in \p lib.
@@ -2885,8 +2904,8 @@ int textureAlign
::CUDA_ERROR_INVALID_VALUE*/
fn cuLibraryGetKernelCount(
count: *mut ::core::ffi::c_uint,
- lib: cuda_types::CUlibrary,
- ) -> cuda_types::CUresult;
+ lib: cuda_types::cuda::CUlibrary,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Retrieve the kernel handles within a library.
Returns in \p kernels a maximum number of \p numKernels kernel handles within \p lib.
@@ -2903,10 +2922,10 @@ int textureAlign
\sa ::cuLibraryGetKernelCount*/
fn cuLibraryEnumerateKernels(
- kernels: *mut cuda_types::CUkernel,
+ kernels: *mut cuda_types::cuda::CUkernel,
numKernels: ::core::ffi::c_uint,
- lib: cuda_types::CUlibrary,
- ) -> cuda_types::CUresult;
+ lib: cuda_types::cuda::CUlibrary,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a module handle
Returns in \p pMod the module handle associated with the current context located in
@@ -2930,9 +2949,9 @@ int textureAlign
::cuLibraryUnload,
::cuModuleGetFunction*/
fn cuLibraryGetModule(
- pMod: *mut cuda_types::CUmodule,
- library: cuda_types::CUlibrary,
- ) -> cuda_types::CUresult;
+ pMod: *mut cuda_types::cuda::CUmodule,
+ library: cuda_types::cuda::CUlibrary,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a function handle
Returns in \p pFunc the handle of the function for the requested kernel \p kernel and
@@ -2958,9 +2977,9 @@ int textureAlign
::cuLibraryGetModule,
::cuModuleGetFunction*/
fn cuKernelGetFunction(
- pFunc: *mut cuda_types::CUfunction,
- kernel: cuda_types::CUkernel,
- ) -> cuda_types::CUresult;
+ pFunc: *mut cuda_types::cuda::CUfunction,
+ kernel: cuda_types::cuda::CUkernel,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a global device pointer
Returns in \p *dptr and \p *bytes the base pointer and size of the global with
@@ -2990,11 +3009,11 @@ int textureAlign
::cuLibraryGetModule,
cuModuleGetGlobal*/
fn cuLibraryGetGlobal(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytes: *mut usize,
- library: cuda_types::CUlibrary,
+ library: cuda_types::cuda::CUlibrary,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a pointer to managed memory
Returns in \p *dptr and \p *bytes the base pointer and size of the managed memory with
@@ -3024,11 +3043,11 @@ int textureAlign
::cuLibraryLoadFromFile,
::cuLibraryUnload*/
fn cuLibraryGetManaged(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytes: *mut usize,
- library: cuda_types::CUlibrary,
+ library: cuda_types::cuda::CUlibrary,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a pointer to a unified function
Returns in \p *fptr the function pointer to a unified function denoted by \p symbol.
@@ -3053,9 +3072,9 @@ int textureAlign
::cuLibraryUnload*/
fn cuLibraryGetUnifiedFunction(
fptr: *mut *mut ::core::ffi::c_void,
- library: cuda_types::CUlibrary,
+ library: cuda_types::cuda::CUlibrary,
symbol: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns information about a kernel
Returns in \p *pi the integer value of the attribute \p attrib for the kernel
@@ -3142,10 +3161,10 @@ int textureAlign
::cuFuncGetAttribute*/
fn cuKernelGetAttribute(
pi: *mut ::core::ffi::c_int,
- attrib: cuda_types::CUfunction_attribute,
- kernel: cuda_types::CUkernel,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ attrib: cuda_types::cuda::CUfunction_attribute,
+ kernel: cuda_types::cuda::CUkernel,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets information about a kernel
This call sets the value of a specified attribute \p attrib on the kernel \p kernel
@@ -3221,11 +3240,11 @@ int textureAlign
::cuModuleGetFunction,
::cuFuncSetAttribute*/
fn cuKernelSetAttribute(
- attrib: cuda_types::CUfunction_attribute,
+ attrib: cuda_types::cuda::CUfunction_attribute,
val: ::core::ffi::c_int,
- kernel: cuda_types::CUkernel,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ kernel: cuda_types::cuda::CUkernel,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the preferred cache configuration for a device kernel.
On devices where the L1 cache and shared memory use the same hardware
@@ -3282,10 +3301,10 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuLaunchKernel*/
fn cuKernelSetCacheConfig(
- kernel: cuda_types::CUkernel,
- config: cuda_types::CUfunc_cache,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ kernel: cuda_types::cuda::CUkernel,
+ config: cuda_types::cuda::CUfunc_cache,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the function name for a ::CUkernel handle
Returns in \p **name the function name associated with the kernel handle \p hfunc .
@@ -3305,8 +3324,8 @@ int textureAlign
*/
fn cuKernelGetName(
name: *mut *const ::core::ffi::c_char,
- hfunc: cuda_types::CUkernel,
- ) -> cuda_types::CUresult;
+ hfunc: cuda_types::cuda::CUkernel,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the offset and size of a kernel parameter in the device-side parameter layout
Queries the kernel parameter at \p paramIndex into \p kernel's list of parameters, and returns
@@ -3328,11 +3347,11 @@ int textureAlign
\sa ::cuFuncGetParamInfo*/
fn cuKernelGetParamInfo(
- kernel: cuda_types::CUkernel,
+ kernel: cuda_types::cuda::CUkernel,
paramIndex: usize,
paramOffset: *mut usize,
paramSize: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets free and total memory
Returns in \p *total the total amount of memory available to the the current context.
@@ -3372,7 +3391,10 @@ int textureAlign
::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMemGetInfo*/
- fn cuMemGetInfo_v2(free: *mut usize, total: *mut usize) -> cuda_types::CUresult;
+ fn cuMemGetInfo_v2(
+ free: *mut usize,
+ total: *mut usize,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocates device memory
Allocates \p bytesize bytes of linear memory on the device and returns in
@@ -3404,9 +3426,9 @@ int textureAlign
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMalloc*/
fn cuMemAlloc_v2(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocates pitched device memory
Allocates at least \p WidthInBytes * \p Height bytes of linear memory on
@@ -3466,12 +3488,12 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMallocPitch*/
fn cuMemAllocPitch_v2(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
pPitch: *mut usize,
WidthInBytes: usize,
Height: usize,
ElementSizeBytes: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Frees device memory
Frees the memory space pointed to by \p dptr, which must have been returned
@@ -3504,7 +3526,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaFree*/
- fn cuMemFree_v2(dptr: cuda_types::CUdeviceptr) -> cuda_types::CUresult;
+ fn cuMemFree_v2(dptr: cuda_types::cuda::CUdeviceptr) -> cuda_types::cuda::CUresult;
/** \brief Get information on memory allocations
Returns the base address in \p *pbase and size in \p *psize of the
@@ -3536,10 +3558,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32*/
fn cuMemGetAddressRange_v2(
- pbase: *mut cuda_types::CUdeviceptr,
+ pbase: *mut cuda_types::cuda::CUdeviceptr,
psize: *mut usize,
- dptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ dptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocates page-locked host memory
Allocates \p bytesize bytes of host memory that is page-locked and
@@ -3590,7 +3612,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
fn cuMemAllocHost_v2(
pp: *mut *mut ::core::ffi::c_void,
bytesize: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Frees page-locked host memory
Frees the memory space pointed to by \p p, which must have been returned by
@@ -3617,7 +3639,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaFreeHost*/
- fn cuMemFreeHost(p: *mut ::core::ffi::c_void) -> cuda_types::CUresult;
+ fn cuMemFreeHost(p: *mut ::core::ffi::c_void) -> cuda_types::cuda::CUresult;
/** \brief Allocates page-locked host memory
Allocates \p bytesize bytes of host memory that is page-locked and accessible
@@ -3701,7 +3723,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
pp: *mut *mut ::core::ffi::c_void,
bytesize: usize,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Passes back device pointer of mapped pinned memory
Passes back the device pointer \p pdptr corresponding to the mapped, pinned
@@ -3752,10 +3774,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaHostGetDevicePointer*/
fn cuMemHostGetDevicePointer_v2(
- pdptr: *mut cuda_types::CUdeviceptr,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr,
p: *mut ::core::ffi::c_void,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Passes back flags that were used for a pinned allocation
Passes back the flags \p pFlags that were specified when allocating
@@ -3782,7 +3804,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
fn cuMemHostGetFlags(
pFlags: *mut ::core::ffi::c_uint,
p: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocates memory that will be automatically managed by the Unified Memory system
Allocates \p bytesize bytes of managed memory on the device and returns in
@@ -3890,10 +3912,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuDeviceGetAttribute, ::cuStreamAttachMemAsync,
::cudaMallocManaged*/
fn cuMemAllocManaged(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Registers a callback function to receive async notifications
Registers \p callbackFunc to receive async notifications.
@@ -3928,11 +3950,11 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
\sa
::cuDeviceUnregisterAsyncNotification*/
fn cuDeviceRegisterAsyncNotification(
- device: cuda_types::CUdevice,
- callbackFunc: cuda_types::CUasyncCallback,
+ device: cuda_types::cuda::CUdevice,
+ callbackFunc: cuda_types::cuda::CUasyncCallback,
userData: *mut ::core::ffi::c_void,
- callback: *mut cuda_types::CUasyncCallbackHandle,
- ) -> cuda_types::CUresult;
+ callback: *mut cuda_types::cuda::CUasyncCallbackHandle,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unregisters an async notification callback
Unregisters \p callback so that the corresponding callback function will stop receiving
@@ -3953,9 +3975,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
\sa
::cuDeviceRegisterAsyncNotification*/
fn cuDeviceUnregisterAsyncNotification(
- device: cuda_types::CUdevice,
- callback: cuda_types::CUasyncCallbackHandle,
- ) -> cuda_types::CUresult;
+ device: cuda_types::cuda::CUdevice,
+ callback: cuda_types::cuda::CUasyncCallbackHandle,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a handle to a compute device
Returns in \p *device a device handle given a PCI bus ID string.
@@ -3982,9 +4004,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuDeviceGetPCIBusId,
::cudaDeviceGetByPCIBusId*/
fn cuDeviceGetByPCIBusId(
- dev: *mut cuda_types::CUdevice,
+ dev: *mut cuda_types::cuda::CUdevice,
pciBusId: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a PCI Bus Id string for the device
Returns an ASCII string identifying the device \p dev in the NULL-terminated
@@ -4016,8 +4038,8 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
fn cuDeviceGetPCIBusId(
pciBusId: *mut ::core::ffi::c_char,
len: ::core::ffi::c_int,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets an interprocess handle for a previously allocated event
Takes as input a previously allocated event. This event must have been
@@ -4062,9 +4084,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuIpcCloseMemHandle,
::cudaIpcGetEventHandle*/
fn cuIpcGetEventHandle(
- pHandle: *mut cuda_types::CUipcEventHandle,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ pHandle: *mut cuda_types::cuda::CUipcEventHandle,
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Opens an interprocess event handle for use in the current process
Opens an interprocess event handle exported from another process with
@@ -4104,9 +4126,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuIpcCloseMemHandle,
::cudaIpcOpenEventHandle*/
fn cuIpcOpenEventHandle(
- phEvent: *mut cuda_types::CUevent,
- handle: cuda_types::CUipcEventHandle,
- ) -> cuda_types::CUresult;
+ phEvent: *mut cuda_types::cuda::CUevent,
+ handle: cuda_types::cuda::CUipcEventHandle,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets an interprocess memory handle for an existing device memory
allocation
@@ -4147,9 +4169,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuIpcCloseMemHandle,
::cudaIpcGetMemHandle*/
fn cuIpcGetMemHandle(
- pHandle: *mut cuda_types::CUipcMemHandle,
- dptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ pHandle: *mut cuda_types::cuda::CUipcMemHandle,
+ dptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Opens an interprocess memory handle exported from another process
and returns a device pointer usable in the local process.
@@ -4207,10 +4229,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuDeviceCanAccessPeer,
::cudaIpcOpenMemHandle*/
fn cuIpcOpenMemHandle_v2(
- pdptr: *mut cuda_types::CUdeviceptr,
- handle: cuda_types::CUipcMemHandle,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr,
+ handle: cuda_types::cuda::CUipcMemHandle,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Attempts to close memory mapped with ::cuIpcOpenMemHandle
Decrements the reference count of the memory returned by ::cuIpcOpenMemHandle by 1.
@@ -4243,7 +4265,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuIpcGetMemHandle,
::cuIpcOpenMemHandle,
::cudaIpcCloseMemHandle*/
- fn cuIpcCloseMemHandle(dptr: cuda_types::CUdeviceptr) -> cuda_types::CUresult;
+ fn cuIpcCloseMemHandle(
+ dptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Registers an existing host memory range for use by CUDA
Page-locks the memory range specified by \p p and \p bytesize and maps it
@@ -4335,7 +4359,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
p: *mut ::core::ffi::c_void,
bytesize: usize,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unregisters a memory range that was registered with cuMemHostRegister.
Unmaps the memory range whose base address is specified by \p p, and makes
@@ -4358,7 +4382,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
\sa
::cuMemHostRegister,
::cudaHostUnregister*/
- fn cuMemHostUnregister(p: *mut ::core::ffi::c_void) -> cuda_types::CUresult;
+ fn cuMemHostUnregister(p: *mut ::core::ffi::c_void) -> cuda_types::cuda::CUresult;
/** \brief Copies memory
Copies data between two pointers.
@@ -4396,10 +4420,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cudaMemcpyToSymbol,
::cudaMemcpyFromSymbol*/
fn cuMemcpy_ptds(
- dst: cuda_types::CUdeviceptr,
- src: cuda_types::CUdeviceptr,
+ dst: cuda_types::cuda::CUdeviceptr,
+ src: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies device memory between two contexts
Copies from device memory in one context to device memory in another
@@ -4427,12 +4451,12 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemcpy3DPeerAsync,
::cudaMemcpyPeer*/
fn cuMemcpyPeer_ptds(
- dstDevice: cuda_types::CUdeviceptr,
- dstContext: cuda_types::CUcontext,
- srcDevice: cuda_types::CUdeviceptr,
- srcContext: cuda_types::CUcontext,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ dstContext: cuda_types::cuda::CUcontext,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
+ srcContext: cuda_types::cuda::CUcontext,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Host to Device
Copies from host memory to device memory. \p dstDevice and \p srcHost are
@@ -4466,10 +4490,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cudaMemcpy,
::cudaMemcpyToSymbol*/
fn cuMemcpyHtoD_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Device to Host
Copies from device to host memory. \p dstHost and \p srcDevice specify the
@@ -4504,9 +4528,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cudaMemcpyFromSymbol*/
fn cuMemcpyDtoH_v2_ptds(
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Device to Device
Copies from device memory to device memory. \p dstDevice and \p srcDevice
@@ -4540,10 +4564,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cudaMemcpyToSymbol,
::cudaMemcpyFromSymbol*/
fn cuMemcpyDtoD_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
- srcDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Device to Array
Copies from device memory to a 1D CUDA array. \p dstArray and \p dstOffset
@@ -4577,11 +4601,11 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMemcpyToArray*/
fn cuMemcpyDtoA_v2_ptds(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Array to Device
Copies from one 1D CUDA array to device memory. \p dstDevice specifies the
@@ -4617,11 +4641,11 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMemcpyFromArray*/
fn cuMemcpyAtoD_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
- srcArray: cuda_types::CUarray,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Host to Array
Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset
@@ -4656,11 +4680,11 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMemcpyToArray*/
fn cuMemcpyHtoA_v2_ptds(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Array to Host
Copies from one 1D CUDA array to host memory. \p dstHost specifies the base
@@ -4696,10 +4720,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cudaMemcpyFromArray*/
fn cuMemcpyAtoH_v2_ptds(
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Array to Array
Copies from one 1D CUDA array to another. \p dstArray and \p srcArray
@@ -4737,12 +4761,12 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMemcpyArrayToArray*/
fn cuMemcpyAtoA_v2_ptds(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory for 2D arrays
Perform a 2D memory copy according to the parameters specified in \p pCopy.
@@ -4904,8 +4928,8 @@ CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes;
::cudaMemcpy2DToArray,
::cudaMemcpy2DFromArray*/
fn cuMemcpy2D_v2_ptds(
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory for 2D arrays
Perform a 2D memory copy according to the parameters specified in \p pCopy.
@@ -5065,8 +5089,8 @@ CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes;
::cudaMemcpy2DToArray,
::cudaMemcpy2DFromArray*/
fn cuMemcpy2DUnaligned_v2_ptds(
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory for 3D arrays
Perform a 3D memory copy according to the parameters specified in
@@ -5233,8 +5257,8 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMemcpy3D*/
fn cuMemcpy3D_v2_ptds(
- pCopy: *const cuda_types::CUDA_MEMCPY3D,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory between contexts
Perform a 3D memory copy according to the parameters specified in
@@ -5256,8 +5280,8 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemcpy3DPeerAsync,
::cudaMemcpy3DPeer*/
fn cuMemcpy3DPeer_ptds(
- pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_PEER,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory asynchronously
Copies data between two pointers.
@@ -5300,11 +5324,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cudaMemcpyToSymbolAsync,
::cudaMemcpyFromSymbolAsync*/
fn cuMemcpyAsync_ptsz(
- dst: cuda_types::CUdeviceptr,
- src: cuda_types::CUdeviceptr,
+ dst: cuda_types::cuda::CUdeviceptr,
+ src: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies device memory between two contexts asynchronously.
Copies from device memory in one context to device memory in another
@@ -5335,13 +5359,13 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemcpy3DPeerAsync,
::cudaMemcpyPeerAsync*/
fn cuMemcpyPeerAsync_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
- dstContext: cuda_types::CUcontext,
- srcDevice: cuda_types::CUdeviceptr,
- srcContext: cuda_types::CUcontext,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ dstContext: cuda_types::cuda::CUcontext,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
+ srcContext: cuda_types::cuda::CUcontext,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Host to Device
Copies from host memory to device memory. \p dstDevice and \p srcHost are
@@ -5380,11 +5404,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cudaMemcpyAsync,
::cudaMemcpyToSymbolAsync*/
fn cuMemcpyHtoDAsync_v2_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Device to Host
Copies from device to host memory. \p dstHost and \p srcDevice specify the
@@ -5424,10 +5448,10 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cudaMemcpyFromSymbolAsync*/
fn cuMemcpyDtoHAsync_v2_ptsz(
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Device to Device
Copies from device memory to device memory. \p dstDevice and \p srcDevice
@@ -5466,11 +5490,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cudaMemcpyToSymbolAsync,
::cudaMemcpyFromSymbolAsync*/
fn cuMemcpyDtoDAsync_v2_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
- srcDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Host to Array
Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset
@@ -5510,12 +5534,12 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemcpyToArrayAsync*/
fn cuMemcpyHtoAAsync_v2_ptsz(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Array to Host
Copies from one 1D CUDA array to host memory. \p dstHost specifies the base
@@ -5556,11 +5580,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cudaMemcpyFromArrayAsync*/
fn cuMemcpyAtoHAsync_v2_ptsz(
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory for 2D arrays
Perform a 2D memory copy according to the parameters specified in \p pCopy.
@@ -5727,9 +5751,9 @@ CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes;
::cudaMemcpy2DToArrayAsync,
::cudaMemcpy2DFromArrayAsync*/
fn cuMemcpy2DAsync_v2_ptsz(
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory for 3D arrays
Perform a 3D memory copy according to the parameters specified in
@@ -5901,9 +5925,9 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemcpy3DAsync*/
fn cuMemcpy3DAsync_v2_ptsz(
- pCopy: *const cuda_types::CUDA_MEMCPY3D,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory between contexts asynchronously.
Perform a 3D memory copy according to the parameters specified in
@@ -5927,9 +5951,9 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemcpy3DPeerAsync,
::cudaMemcpy3DPeerAsync*/
fn cuMemcpy3DPeerAsync_ptsz(
- pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_PEER,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes device memory
Sets the memory range of \p N 8-bit values to the specified value
@@ -5962,10 +5986,10 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset*/
fn cuMemsetD8_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
uc: ::core::ffi::c_uchar,
N: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes device memory
Sets the memory range of \p N 16-bit values to the specified value
@@ -5998,10 +6022,10 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset*/
fn cuMemsetD16_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
us: ::core::ffi::c_ushort,
N: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes device memory
Sets the memory range of \p N 32-bit values to the specified value
@@ -6034,10 +6058,10 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32Async,
::cudaMemset*/
fn cuMemsetD32_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
ui: ::core::ffi::c_uint,
N: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes device memory
Sets the 2D memory range of \p Width 8-bit values to the specified value
@@ -6075,12 +6099,12 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset2D*/
fn cuMemsetD2D8_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
uc: ::core::ffi::c_uchar,
Width: usize,
Height: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes device memory
Sets the 2D memory range of \p Width 16-bit values to the specified value
@@ -6119,12 +6143,12 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset2D*/
fn cuMemsetD2D16_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
us: ::core::ffi::c_ushort,
Width: usize,
Height: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes device memory
Sets the 2D memory range of \p Width 32-bit values to the specified value
@@ -6163,12 +6187,12 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset2D*/
fn cuMemsetD2D32_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
ui: ::core::ffi::c_uint,
Width: usize,
Height: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets device memory
Sets the memory range of \p N 8-bit values to the specified value
@@ -6203,11 +6227,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemsetAsync*/
fn cuMemsetD8Async_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
uc: ::core::ffi::c_uchar,
N: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets device memory
Sets the memory range of \p N 16-bit values to the specified value
@@ -6242,11 +6266,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemsetAsync*/
fn cuMemsetD16Async_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
us: ::core::ffi::c_ushort,
N: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets device memory
Sets the memory range of \p N 32-bit values to the specified value
@@ -6280,11 +6304,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, ::cuMemsetD32,
::cudaMemsetAsync*/
fn cuMemsetD32Async_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
ui: ::core::ffi::c_uint,
N: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets device memory
Sets the 2D memory range of \p Width 8-bit values to the specified value
@@ -6324,13 +6348,13 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset2DAsync*/
fn cuMemsetD2D8Async_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
uc: ::core::ffi::c_uchar,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets device memory
Sets the 2D memory range of \p Width 16-bit values to the specified value
@@ -6371,13 +6395,13 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset2DAsync*/
fn cuMemsetD2D16Async_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
us: ::core::ffi::c_ushort,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets device memory
Sets the 2D memory range of \p Width 32-bit values to the specified value
@@ -6418,13 +6442,13 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset2DAsync*/
fn cuMemsetD2D32Async_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
ui: ::core::ffi::c_uint,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a 1D or 2D CUDA array
Creates a CUDA array according to the ::CUDA_ARRAY_DESCRIPTOR structure
@@ -6526,9 +6550,9 @@ desc.Height = height;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMallocArray*/
fn cuArrayCreate_v2(
- pHandle: *mut cuda_types::CUarray,
- pAllocateArray: *const cuda_types::CUDA_ARRAY_DESCRIPTOR,
- ) -> cuda_types::CUresult;
+ pHandle: *mut cuda_types::cuda::CUarray,
+ pAllocateArray: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get a 1D or 2D CUDA array descriptor
Returns in \p *pArrayDescriptor a descriptor containing information on the
@@ -6560,9 +6584,9 @@ desc.Height = height;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaArrayGetInfo*/
fn cuArrayGetDescriptor_v2(
- pArrayDescriptor: *mut cuda_types::CUDA_ARRAY_DESCRIPTOR,
- hArray: cuda_types::CUarray,
- ) -> cuda_types::CUresult;
+ pArrayDescriptor: *mut cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR,
+ hArray: cuda_types::cuda::CUarray,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the layout properties of a sparse CUDA array
Returns the layout properties of a sparse CUDA array in \p sparseProperties
@@ -6584,9 +6608,9 @@ desc.Height = height;
\param[in] array - CUDA array to get the sparse properties of
\sa ::cuMipmappedArrayGetSparseProperties, ::cuMemMapArrayAsync*/
fn cuArrayGetSparseProperties(
- sparseProperties: *mut cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES,
- array: cuda_types::CUarray,
- ) -> cuda_types::CUresult;
+ sparseProperties: *mut cuda_types::cuda::CUDA_ARRAY_SPARSE_PROPERTIES,
+ array: cuda_types::cuda::CUarray,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the layout properties of a sparse CUDA mipmapped array
Returns the sparse array layout properties in \p sparseProperties
@@ -6609,9 +6633,9 @@ desc.Height = height;
\param[in] mipmap - CUDA mipmapped array to get the sparse properties of
\sa ::cuArrayGetSparseProperties, ::cuMemMapArrayAsync*/
fn cuMipmappedArrayGetSparseProperties(
- sparseProperties: *mut cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES,
- mipmap: cuda_types::CUmipmappedArray,
- ) -> cuda_types::CUresult;
+ sparseProperties: *mut cuda_types::cuda::CUDA_ARRAY_SPARSE_PROPERTIES,
+ mipmap: cuda_types::cuda::CUmipmappedArray,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the memory requirements of a CUDA array
Returns the memory requirements of a CUDA array in \p memoryRequirements
@@ -6632,10 +6656,10 @@ desc.Height = height;
\param[in] device - Device to get the memory requirements for
\sa ::cuMipmappedArrayGetMemoryRequirements, ::cuMemMapArrayAsync*/
fn cuArrayGetMemoryRequirements(
- memoryRequirements: *mut cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS,
- array: cuda_types::CUarray,
- device: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ memoryRequirements: *mut cuda_types::cuda::CUDA_ARRAY_MEMORY_REQUIREMENTS,
+ array: cuda_types::cuda::CUarray,
+ device: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the memory requirements of a CUDA mipmapped array
Returns the memory requirements of a CUDA mipmapped array in \p memoryRequirements
@@ -6657,10 +6681,10 @@ desc.Height = height;
\param[in] device - Device to get the memory requirements for
\sa ::cuArrayGetMemoryRequirements, ::cuMemMapArrayAsync*/
fn cuMipmappedArrayGetMemoryRequirements(
- memoryRequirements: *mut cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS,
- mipmap: cuda_types::CUmipmappedArray,
- device: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ memoryRequirements: *mut cuda_types::cuda::CUDA_ARRAY_MEMORY_REQUIREMENTS,
+ mipmap: cuda_types::cuda::CUmipmappedArray,
+ device: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets a CUDA array plane from a CUDA array
Returns in \p pPlaneArray a CUDA array that represents a single format plane
@@ -6691,10 +6715,10 @@ desc.Height = height;
::cuArrayCreate,
::cudaArrayGetPlane*/
fn cuArrayGetPlane(
- pPlaneArray: *mut cuda_types::CUarray,
- hArray: cuda_types::CUarray,
+ pPlaneArray: *mut cuda_types::cuda::CUarray,
+ hArray: cuda_types::cuda::CUarray,
planeIdx: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a CUDA array
Destroys the CUDA array \p hArray.
@@ -6722,7 +6746,7 @@ desc.Height = height;
::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaFreeArray*/
- fn cuArrayDestroy(hArray: cuda_types::CUarray) -> cuda_types::CUresult;
+ fn cuArrayDestroy(hArray: cuda_types::cuda::CUarray) -> cuda_types::cuda::CUresult;
/** \brief Creates a 3D CUDA array
Creates a CUDA array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure
@@ -6900,9 +6924,9 @@ desc.Depth = depth;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMalloc3DArray*/
fn cuArray3DCreate_v2(
- pHandle: *mut cuda_types::CUarray,
- pAllocateArray: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR,
- ) -> cuda_types::CUresult;
+ pHandle: *mut cuda_types::cuda::CUarray,
+ pAllocateArray: *const cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get a 3D CUDA array descriptor
Returns in \p *pArrayDescriptor a descriptor containing information on the
@@ -6938,9 +6962,9 @@ desc.Depth = depth;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaArrayGetInfo*/
fn cuArray3DGetDescriptor_v2(
- pArrayDescriptor: *mut cuda_types::CUDA_ARRAY3D_DESCRIPTOR,
- hArray: cuda_types::CUarray,
- ) -> cuda_types::CUresult;
+ pArrayDescriptor: *mut cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR,
+ hArray: cuda_types::cuda::CUarray,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a CUDA mipmapped array
Creates a CUDA mipmapped array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure
@@ -7080,10 +7104,10 @@ CU_AD_FORMAT_FLOAT = 0x20
::cuArrayCreate,
::cudaMallocMipmappedArray*/
fn cuMipmappedArrayCreate(
- pHandle: *mut cuda_types::CUmipmappedArray,
- pMipmappedArrayDesc: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR,
+ pHandle: *mut cuda_types::cuda::CUmipmappedArray,
+ pMipmappedArrayDesc: *const cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR,
numMipmapLevels: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets a mipmap level of a CUDA mipmapped array
Returns in \p *pLevelArray a CUDA array that represents a single mipmap level
@@ -7111,10 +7135,10 @@ CU_AD_FORMAT_FLOAT = 0x20
::cuArrayCreate,
::cudaGetMipmappedArrayLevel*/
fn cuMipmappedArrayGetLevel(
- pLevelArray: *mut cuda_types::CUarray,
- hMipmappedArray: cuda_types::CUmipmappedArray,
+ pLevelArray: *mut cuda_types::cuda::CUarray,
+ hMipmappedArray: cuda_types::cuda::CUmipmappedArray,
level: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a CUDA mipmapped array
Destroys the CUDA mipmapped array \p hMipmappedArray.
@@ -7137,8 +7161,8 @@ CU_AD_FORMAT_FLOAT = 0x20
::cuArrayCreate,
::cudaFreeMipmappedArray*/
fn cuMipmappedArrayDestroy(
- hMipmappedArray: cuda_types::CUmipmappedArray,
- ) -> cuda_types::CUresult;
+ hMipmappedArray: cuda_types::cuda::CUmipmappedArray,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Retrieve handle for an address range
Get a handle of the specified type to an address range. The address range
@@ -7172,11 +7196,11 @@ CU_AD_FORMAT_FLOAT = 0x20
CUDA_ERROR_NOT_SUPPORTED*/
fn cuMemGetHandleForAddressRange(
handle: *mut ::core::ffi::c_void,
- dptr: cuda_types::CUdeviceptr,
+ dptr: cuda_types::cuda::CUdeviceptr,
size: usize,
- handleType: cuda_types::CUmemRangeHandleType,
+ handleType: cuda_types::cuda::CUmemRangeHandleType,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocate an address range reservation.
Reserves a virtual address range based on the given parameters, giving
@@ -7201,12 +7225,12 @@ CU_AD_FORMAT_FLOAT = 0x20
\sa ::cuMemAddressFree*/
fn cuMemAddressReserve(
- ptr: *mut cuda_types::CUdeviceptr,
+ ptr: *mut cuda_types::cuda::CUdeviceptr,
size: usize,
alignment: usize,
- addr: cuda_types::CUdeviceptr,
+ addr: cuda_types::cuda::CUdeviceptr,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Free an address range reservation.
Frees a virtual address range reserved by cuMemAddressReserve. The size
@@ -7225,9 +7249,9 @@ CU_AD_FORMAT_FLOAT = 0x20
\sa ::cuMemAddressReserve*/
fn cuMemAddressFree(
- ptr: cuda_types::CUdeviceptr,
+ ptr: cuda_types::cuda::CUdeviceptr,
size: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a CUDA memory handle representing a memory allocation of a given size described by the given properties
This creates a memory allocation on the target device specified through the
@@ -7280,11 +7304,11 @@ CU_AD_FORMAT_FLOAT = 0x20
\sa ::cuMemRelease, ::cuMemExportToShareableHandle, ::cuMemImportFromShareableHandle*/
fn cuMemCreate(
- handle: *mut cuda_types::CUmemGenericAllocationHandle,
+ handle: *mut cuda_types::cuda::CUmemGenericAllocationHandle,
size: usize,
- prop: *const cuda_types::CUmemAllocationProp,
+ prop: *const cuda_types::cuda::CUmemAllocationProp,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Release a memory handle representing a memory allocation which was previously allocated through cuMemCreate.
Frees the memory that was allocated on a device through cuMemCreate.
@@ -7309,8 +7333,8 @@ CU_AD_FORMAT_FLOAT = 0x20
\sa ::cuMemCreate*/
fn cuMemRelease(
- handle: cuda_types::CUmemGenericAllocationHandle,
- ) -> cuda_types::CUresult;
+ handle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Maps an allocation handle to a reserved virtual address range.
Maps bytes of memory represented by \p handle starting from byte \p offset to
@@ -7358,12 +7382,12 @@ CU_AD_FORMAT_FLOAT = 0x20
\sa ::cuMemUnmap, ::cuMemSetAccess, ::cuMemCreate, ::cuMemAddressReserve, ::cuMemImportFromShareableHandle*/
fn cuMemMap(
- ptr: cuda_types::CUdeviceptr,
+ ptr: cuda_types::cuda::CUdeviceptr,
size: usize,
offset: usize,
- handle: cuda_types::CUmemGenericAllocationHandle,
+ handle: cuda_types::cuda::CUmemGenericAllocationHandle,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Maps or unmaps subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays
Performs map or unmap operations on subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays.
@@ -7496,10 +7520,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMipmappedArrayCreate, ::cuArrayCreate, ::cuArray3DCreate, ::cuMemCreate, ::cuArrayGetSparseProperties, ::cuMipmappedArrayGetSparseProperties*/
fn cuMemMapArrayAsync_ptsz(
- mapInfoList: *mut cuda_types::CUarrayMapInfo,
+ mapInfoList: *mut cuda_types::cuda::CUarrayMapInfo,
count: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unmap the backing memory of a given address range.
The range must be the entire contiguous address range that was mapped to. In
@@ -7525,7 +7549,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\note_sync
\sa ::cuMemCreate, ::cuMemAddressReserve*/
- fn cuMemUnmap(ptr: cuda_types::CUdeviceptr, size: usize) -> cuda_types::CUresult;
+ fn cuMemUnmap(
+ ptr: cuda_types::cuda::CUdeviceptr,
+ size: usize,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Set the access flags for each location specified in \p desc for the given virtual address range
Given the virtual address range via \p ptr and \p size, and the locations
@@ -7557,11 +7584,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemSetAccess, ::cuMemCreate, :cuMemMap*/
fn cuMemSetAccess(
- ptr: cuda_types::CUdeviceptr,
+ ptr: cuda_types::cuda::CUdeviceptr,
size: usize,
- desc: *const cuda_types::CUmemAccessDesc,
+ desc: *const cuda_types::cuda::CUmemAccessDesc,
count: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get the access \p flags set for the given \p location and \p ptr
\param[out] flags - Flags set for this location
@@ -7579,9 +7606,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemSetAccess*/
fn cuMemGetAccess(
flags: *mut ::core::ffi::c_ulonglong,
- location: *const cuda_types::CUmemLocation,
- ptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ location: *const cuda_types::cuda::CUmemLocation,
+ ptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Exports an allocation to a requested shareable handle type
Given a CUDA memory handle, create a shareable memory
@@ -7613,10 +7640,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemImportFromShareableHandle*/
fn cuMemExportToShareableHandle(
shareableHandle: *mut ::core::ffi::c_void,
- handle: cuda_types::CUmemGenericAllocationHandle,
- handleType: cuda_types::CUmemAllocationHandleType,
+ handle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ handleType: cuda_types::cuda::CUmemAllocationHandleType,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Imports an allocation from a requested shareable handle type.
If the current process cannot support the memory described by this shareable
@@ -7645,10 +7672,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemExportToShareableHandle, ::cuMemMap, ::cuMemRelease*/
fn cuMemImportFromShareableHandle(
- handle: *mut cuda_types::CUmemGenericAllocationHandle,
+ handle: *mut cuda_types::cuda::CUmemGenericAllocationHandle,
osHandle: *mut ::core::ffi::c_void,
- shHandleType: cuda_types::CUmemAllocationHandleType,
- ) -> cuda_types::CUresult;
+ shHandleType: cuda_types::cuda::CUmemAllocationHandleType,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Calculates either the minimal or recommended granularity
Calculates either the minimal or recommended granularity
@@ -7669,9 +7696,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemCreate, ::cuMemMap*/
fn cuMemGetAllocationGranularity(
granularity: *mut usize,
- prop: *const cuda_types::CUmemAllocationProp,
- option: cuda_types::CUmemAllocationGranularity_flags,
- ) -> cuda_types::CUresult;
+ prop: *const cuda_types::cuda::CUmemAllocationProp,
+ option: cuda_types::cuda::CUmemAllocationGranularity_flags,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Retrieve the contents of the property structure defining properties for this handle
\param[out] prop - Pointer to a properties structure which will hold the information about this handle
@@ -7686,9 +7713,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemCreate, ::cuMemImportFromShareableHandle*/
fn cuMemGetAllocationPropertiesFromHandle(
- prop: *mut cuda_types::CUmemAllocationProp,
- handle: cuda_types::CUmemGenericAllocationHandle,
- ) -> cuda_types::CUresult;
+ prop: *mut cuda_types::cuda::CUmemAllocationProp,
+ handle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Given an address \p addr, returns the allocation handle of the backing memory allocation.
The handle is guaranteed to be the same handle value used to map the memory. If the address
@@ -7710,9 +7737,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemCreate, ::cuMemRelease, ::cuMemMap*/
fn cuMemRetainAllocationHandle(
- handle: *mut cuda_types::CUmemGenericAllocationHandle,
+ handle: *mut cuda_types::cuda::CUmemGenericAllocationHandle,
addr: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Frees memory with stream ordered semantics
Inserts a free operation into \p hStream.
@@ -7732,9 +7759,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context),
::CUDA_ERROR_NOT_SUPPORTED*/
fn cuMemFreeAsync_ptsz(
- dptr: cuda_types::CUdeviceptr,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ dptr: cuda_types::cuda::CUdeviceptr,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocates memory with stream ordered semantics
Inserts an allocation operation into \p hStream.
@@ -7765,10 +7792,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate,
::cuMemPoolSetAccess, ::cuMemPoolSetAttribute*/
fn cuMemAllocAsync_ptsz(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Tries to release memory back to the OS
Releases memory back to the OS until the pool contains fewer than minBytesToKeep
@@ -7792,9 +7819,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool,
::cuDeviceGetMemPool, ::cuMemPoolCreate*/
fn cuMemPoolTrimTo(
- pool: cuda_types::CUmemoryPool,
+ pool: cuda_types::cuda::CUmemoryPool,
minBytesToKeep: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets attributes of a memory pool
Supported attributes are:
@@ -7835,10 +7862,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool,
::cuDeviceGetMemPool, ::cuMemPoolCreate*/
fn cuMemPoolSetAttribute(
- pool: cuda_types::CUmemoryPool,
- attr: cuda_types::CUmemPool_attribute,
+ pool: cuda_types::cuda::CUmemoryPool,
+ attr: cuda_types::cuda::CUmemPool_attribute,
value: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets attributes of a memory pool
Supported attributes are:
@@ -7883,10 +7910,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool,
::cuDeviceGetMemPool, ::cuMemPoolCreate*/
fn cuMemPoolGetAttribute(
- pool: cuda_types::CUmemoryPool,
- attr: cuda_types::CUmemPool_attribute,
+ pool: cuda_types::cuda::CUmemoryPool,
+ attr: cuda_types::cuda::CUmemPool_attribute,
value: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Controls visibility of pools between devices
\param[in] pool - The pool being modified
@@ -7901,10 +7928,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool,
::cuDeviceGetMemPool, ::cuMemPoolCreate*/
fn cuMemPoolSetAccess(
- pool: cuda_types::CUmemoryPool,
- map: *const cuda_types::CUmemAccessDesc,
+ pool: cuda_types::cuda::CUmemoryPool,
+ map: *const cuda_types::cuda::CUmemAccessDesc,
count: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the accessibility of a pool from a device
Returns the accessibility of the pool's memory from the specified location.
@@ -7916,10 +7943,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool,
::cuDeviceGetMemPool, ::cuMemPoolCreate*/
fn cuMemPoolGetAccess(
- flags: *mut cuda_types::CUmemAccess_flags,
- memPool: cuda_types::CUmemoryPool,
- location: *mut cuda_types::CUmemLocation,
- ) -> cuda_types::CUresult;
+ flags: *mut cuda_types::cuda::CUmemAccess_flags,
+ memPool: cuda_types::cuda::CUmemoryPool,
+ location: *mut cuda_types::cuda::CUmemLocation,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a memory pool
Creates a CUDA memory pool and returns the handle in \p pool. The \p poolProps determines
@@ -7958,9 +7985,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuDeviceSetMemPool, ::cuDeviceGetMemPool, ::cuDeviceGetDefaultMemPool,
::cuMemAllocFromPoolAsync, ::cuMemPoolExportToShareableHandle*/
fn cuMemPoolCreate(
- pool: *mut cuda_types::CUmemoryPool,
- poolProps: *const cuda_types::CUmemPoolProps,
- ) -> cuda_types::CUresult;
+ pool: *mut cuda_types::cuda::CUmemoryPool,
+ poolProps: *const cuda_types::cuda::CUmemPoolProps,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys the specified memory pool
If any pointers obtained from this pool haven't been freed or
@@ -7980,7 +8007,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemFreeAsync, ::cuDeviceSetMemPool, ::cuDeviceGetMemPool,
::cuDeviceGetDefaultMemPool, ::cuMemPoolCreate*/
- fn cuMemPoolDestroy(pool: cuda_types::CUmemoryPool) -> cuda_types::CUresult;
+ fn cuMemPoolDestroy(
+ pool: cuda_types::cuda::CUmemoryPool,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocates memory from a specified pool with stream ordered semantics.
Inserts an allocation operation into \p hStream.
@@ -8016,11 +8045,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuDeviceGetMemPool, ::cuMemPoolCreate, ::cuMemPoolSetAccess,
::cuMemPoolSetAttribute*/
fn cuMemAllocFromPoolAsync_ptsz(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- pool: cuda_types::CUmemoryPool,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pool: cuda_types::cuda::CUmemoryPool,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Exports a memory pool to the requested handle type.
Given an IPC capable mempool, create an OS handle to share the pool with another process.
@@ -8048,10 +8077,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuMemPoolSetAccess, ::cuMemPoolSetAttribute*/
fn cuMemPoolExportToShareableHandle(
handle_out: *mut ::core::ffi::c_void,
- pool: cuda_types::CUmemoryPool,
- handleType: cuda_types::CUmemAllocationHandleType,
+ pool: cuda_types::cuda::CUmemoryPool,
+ handleType: cuda_types::cuda::CUmemAllocationHandleType,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief imports a memory pool from a shared handle.
Specific allocations can be imported from the imported pool with cuMemPoolImportPointer.
@@ -8078,11 +8107,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolExportPointer, ::cuMemPoolImportPointer*/
fn cuMemPoolImportFromShareableHandle(
- pool_out: *mut cuda_types::CUmemoryPool,
+ pool_out: *mut cuda_types::cuda::CUmemoryPool,
handle: *mut ::core::ffi::c_void,
- handleType: cuda_types::CUmemAllocationHandleType,
+ handleType: cuda_types::cuda::CUmemAllocationHandleType,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Export data to share a memory pool allocation between processes.
Constructs \p shareData_out for sharing a specific allocation from an already shared memory pool.
@@ -8100,9 +8129,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolImportFromShareableHandle, ::cuMemPoolImportPointer*/
fn cuMemPoolExportPointer(
- shareData_out: *mut cuda_types::CUmemPoolPtrExportData,
- ptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ shareData_out: *mut cuda_types::cuda::CUmemPoolPtrExportData,
+ ptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Import a memory pool allocation from another process.
Returns in \p ptr_out a pointer to the imported memory.
@@ -8129,10 +8158,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolImportFromShareableHandle, ::cuMemPoolExportPointer*/
fn cuMemPoolImportPointer(
- ptr_out: *mut cuda_types::CUdeviceptr,
- pool: cuda_types::CUmemoryPool,
- shareData: *mut cuda_types::CUmemPoolPtrExportData,
- ) -> cuda_types::CUresult;
+ ptr_out: *mut cuda_types::cuda::CUdeviceptr,
+ pool: cuda_types::cuda::CUmemoryPool,
+ shareData: *mut cuda_types::cuda::CUmemPoolPtrExportData,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a generic allocation handle representing a multicast object described by the given properties.
This creates a multicast object as described by \p prop. The number of
@@ -8173,9 +8202,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMulticastAddDevice, ::cuMulticastBindMem, ::cuMulticastBindAddr, ::cuMulticastUnbind
\sa ::cuMemCreate, ::cuMemRelease, ::cuMemExportToShareableHandle, ::cuMemImportFromShareableHandle*/
fn cuMulticastCreate(
- mcHandle: *mut cuda_types::CUmemGenericAllocationHandle,
- prop: *const cuda_types::CUmulticastObjectProp,
- ) -> cuda_types::CUresult;
+ mcHandle: *mut cuda_types::cuda::CUmemGenericAllocationHandle,
+ prop: *const cuda_types::cuda::CUmulticastObjectProp,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Associate a device to a multicast object.
Associates a device to a multicast object. The added device will be a part of
@@ -8206,9 +8235,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMulticastCreate, ::cuMulticastBindMem, ::cuMulticastBindAddr*/
fn cuMulticastAddDevice(
- mcHandle: cuda_types::CUmemGenericAllocationHandle,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ mcHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Bind a memory allocation represented by a handle to a multicast object.
Binds a memory allocation specified by \p memHandle and created via
@@ -8255,13 +8284,13 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMulticastCreate, ::cuMulticastAddDevice, ::cuMemCreate*/
fn cuMulticastBindMem(
- mcHandle: cuda_types::CUmemGenericAllocationHandle,
+ mcHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
mcOffset: usize,
- memHandle: cuda_types::CUmemGenericAllocationHandle,
+ memHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
memOffset: usize,
size: usize,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Bind a memory allocation represented by a virtual address to a multicast object.
Binds a memory allocation specified by its mapped address \p memptr to a
@@ -8306,12 +8335,12 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMulticastCreate, ::cuMulticastAddDevice, ::cuMemCreate*/
fn cuMulticastBindAddr(
- mcHandle: cuda_types::CUmemGenericAllocationHandle,
+ mcHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
mcOffset: usize,
- memptr: cuda_types::CUdeviceptr,
+ memptr: cuda_types::cuda::CUdeviceptr,
size: usize,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unbind any memory allocations bound to a multicast object at a given offset and upto a given size.
Unbinds any memory allocations hosted on \p dev and bound to a multicast
@@ -8343,11 +8372,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMulticastBindMem, ::cuMulticastBindAddr*/
fn cuMulticastUnbind(
- mcHandle: cuda_types::CUmemGenericAllocationHandle,
- dev: cuda_types::CUdevice,
+ mcHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ dev: cuda_types::cuda::CUdevice,
mcOffset: usize,
size: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Calculates either the minimal or recommended granularity for multicast object
Calculates either the minimal or recommended granularity for a given set of
@@ -8370,9 +8399,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMulticastCreate, ::cuMulticastBindMem, ::cuMulticastBindAddr, ::cuMulticastUnbind*/
fn cuMulticastGetGranularity(
granularity: *mut usize,
- prop: *const cuda_types::CUmulticastObjectProp,
- option: cuda_types::CUmulticastGranularity_flags,
- ) -> cuda_types::CUresult;
+ prop: *const cuda_types::cuda::CUmulticastObjectProp,
+ option: cuda_types::cuda::CUmulticastGranularity_flags,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns information about a pointer
The supported attributes are:
@@ -8561,9 +8590,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cudaPointerGetAttributes*/
fn cuPointerGetAttribute(
data: *mut ::core::ffi::c_void,
- attribute: cuda_types::CUpointer_attribute,
- ptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ attribute: cuda_types::cuda::CUpointer_attribute,
+ ptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Prefetches memory to the specified destination device
Note there is a later version of this API, ::cuMemPrefetchAsync_v2. It will
@@ -8632,11 +8661,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuMemcpy3DPeerAsync, ::cuMemAdvise, ::cuMemPrefetchAsync
::cudaMemPrefetchAsync_v2*/
fn cuMemPrefetchAsync_ptsz(
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- dstDevice: cuda_types::CUdevice,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ dstDevice: cuda_types::cuda::CUdevice,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Prefetches memory to the specified destination location
Prefetches memory to the specified destination location. \p devPtr is the
@@ -8711,12 +8740,12 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuMemcpy3DPeerAsync, ::cuMemAdvise, ::cuMemPrefetchAsync
::cudaMemPrefetchAsync_v2*/
fn cuMemPrefetchAsync_v2_ptsz(
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- location: cuda_types::CUmemLocation,
+ location: cuda_types::cuda::CUmemLocation,
flags: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Advise about the usage of a given memory range
Note there is a later version of this API, ::cuMemAdvise_v2. It will
@@ -8829,11 +8858,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuMemcpy3DPeerAsync, ::cuMemPrefetchAsync, ::cuMemAdvise_v2
::cudaMemAdvise*/
fn cuMemAdvise(
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- advice: cuda_types::CUmem_advise,
- device: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ advice: cuda_types::cuda::CUmem_advise,
+ device: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Advise about the usage of a given memory range
Advise the Unified Memory subsystem about the usage pattern for the memory range
@@ -8952,11 +8981,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuMemcpy3DPeerAsync, ::cuMemPrefetchAsync, ::cuMemAdvise
::cudaMemAdvise*/
fn cuMemAdvise_v2(
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- advice: cuda_types::CUmem_advise,
- location: cuda_types::CUmemLocation,
- ) -> cuda_types::CUresult;
+ advice: cuda_types::cuda::CUmem_advise,
+ location: cuda_types::cuda::CUmemLocation,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query an attribute of a given memory range
Query an attribute about the memory range starting at \p devPtr with a size of \p count bytes. The
@@ -9037,10 +9066,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
fn cuMemRangeGetAttribute(
data: *mut ::core::ffi::c_void,
dataSize: usize,
- attribute: cuda_types::CUmem_range_attribute,
- devPtr: cuda_types::CUdeviceptr,
+ attribute: cuda_types::cuda::CUmem_range_attribute,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query attributes of a given memory range.
Query attributes of the memory range starting at \p devPtr with a size of \p count bytes. The
@@ -9084,11 +9113,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
fn cuMemRangeGetAttributes(
data: *mut *mut ::core::ffi::c_void,
dataSizes: *mut usize,
- attributes: *mut cuda_types::CUmem_range_attribute,
+ attributes: *mut cuda_types::cuda::CUmem_range_attribute,
numAttributes: usize,
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Set attributes on a previously allocated memory region
The supported attributes are:
@@ -9129,9 +9158,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuMemHostUnregister*/
fn cuPointerSetAttribute(
value: *const ::core::ffi::c_void,
- attribute: cuda_types::CUpointer_attribute,
- ptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ attribute: cuda_types::cuda::CUpointer_attribute,
+ ptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns information about a pointer.
The supported attributes are (refer to ::cuPointerGetAttribute for attribute descriptions and restrictions):
@@ -9179,10 +9208,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cudaPointerGetAttributes*/
fn cuPointerGetAttributes(
numAttributes: ::core::ffi::c_uint,
- attributes: *mut cuda_types::CUpointer_attribute,
+ attributes: *mut cuda_types::cuda::CUpointer_attribute,
data: *mut *mut ::core::ffi::c_void,
- ptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ ptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a stream
Creates a stream and returns a handle in \p phStream. The \p Flags argument
@@ -9217,9 +9246,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cudaStreamCreate,
::cudaStreamCreateWithFlags*/
fn cuStreamCreate(
- phStream: *mut cuda_types::CUstream,
+ phStream: *mut cuda_types::cuda::CUstream,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a stream with the given priority
Creates a stream with the specified priority and returns a handle in \p phStream.
@@ -9268,10 +9297,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuStreamAddCallback,
::cudaStreamCreateWithPriority*/
fn cuStreamCreateWithPriority(
- phStream: *mut cuda_types::CUstream,
+ phStream: *mut cuda_types::cuda::CUstream,
flags: ::core::ffi::c_uint,
priority: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query the priority of a given stream
Query the priority of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority
@@ -9299,9 +9328,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuStreamGetFlags,
::cudaStreamGetPriority*/
fn cuStreamGetPriority_ptsz(
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
priority: *mut ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query the flags of a given stream
Query the flags of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority
@@ -9327,9 +9356,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuStreamGetPriority,
::cudaStreamGetFlags*/
fn cuStreamGetFlags_ptsz(
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
flags: *mut ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the unique Id associated with the stream handle supplied
Returns in \p streamId the unique Id which is associated with the given stream handle.
@@ -9360,9 +9389,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuStreamGetPriority,
::cudaStreamGetId*/
fn cuStreamGetId_ptsz(
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
streamId: *mut ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query the context associated with a stream
Returns the CUDA context that the stream is associated with.
@@ -9404,9 +9433,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cudaStreamCreate,
::cudaStreamCreateWithFlags*/
fn cuStreamGetCtx_ptsz(
- hStream: cuda_types::CUstream,
- pctx: *mut cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ pctx: *mut cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Make a compute stream wait on an event
Makes all future work submitted to \p hStream wait for all work captured in
@@ -9441,10 +9470,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuStreamDestroy,
::cudaStreamWaitEvent*/
fn cuStreamWaitEvent_ptsz(
- hStream: cuda_types::CUstream,
- hEvent: cuda_types::CUevent,
+ hStream: cuda_types::cuda::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Add a callback to a compute stream
\note This function is slated for eventual deprecation and removal. If
@@ -9517,11 +9546,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuLaunchHostFunc,
::cudaStreamAddCallback*/
fn cuStreamAddCallback_ptsz(
- hStream: cuda_types::CUstream,
- callback: cuda_types::CUstreamCallback,
+ hStream: cuda_types::cuda::CUstream,
+ callback: cuda_types::cuda::CUstreamCallback,
userData: *mut ::core::ffi::c_void,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Begins graph capture on a stream
Begin graph capture on \p hStream. When a stream is in capture mode, all operations
@@ -9557,9 +9586,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuStreamEndCapture,
::cuThreadExchangeStreamCaptureMode*/
fn cuStreamBeginCapture_v2_ptsz(
- hStream: cuda_types::CUstream,
- mode: cuda_types::CUstreamCaptureMode,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ mode: cuda_types::cuda::CUstreamCaptureMode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Begins graph capture on a stream to an existing graph
Begin graph capture on \p hStream, placing new nodes into an existing graph. When a stream is
@@ -9603,13 +9632,13 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuThreadExchangeStreamCaptureMode,
::cuGraphAddNode,*/
fn cuStreamBeginCaptureToGraph_ptsz(
- hStream: cuda_types::CUstream,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- mode: cuda_types::CUstreamCaptureMode,
- ) -> cuda_types::CUresult;
+ mode: cuda_types::cuda::CUstreamCaptureMode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Swaps the stream capture interaction mode for a thread
Sets the calling thread's stream capture interaction mode to the value contained
@@ -9660,8 +9689,8 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
\sa
::cuStreamBeginCapture*/
fn cuThreadExchangeStreamCaptureMode(
- mode: *mut cuda_types::CUstreamCaptureMode,
- ) -> cuda_types::CUresult;
+ mode: *mut cuda_types::cuda::CUstreamCaptureMode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Ends capture on a stream, returning the captured graph
End capture on \p hStream, returning the captured graph via \p phGraph.
@@ -9690,9 +9719,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamIsCapturing,
::cuGraphDestroy*/
fn cuStreamEndCapture_ptsz(
- hStream: cuda_types::CUstream,
- phGraph: *mut cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ phGraph: *mut cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a stream's capture status
Return the capture status of \p hStream via \p captureStatus. After a successful
@@ -9730,9 +9759,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamBeginCapture,
::cuStreamEndCapture*/
fn cuStreamIsCapturing_ptsz(
- hStream: cuda_types::CUstream,
- captureStatus: *mut cuda_types::CUstreamCaptureStatus,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query a stream's capture state
Query stream state related to stream capture.
@@ -9779,13 +9808,13 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamIsCapturing,
::cuStreamUpdateCaptureDependencies*/
fn cuStreamGetCaptureInfo_v2_ptsz(
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- graph_out: *mut cuda_types::CUgraph,
- dependencies_out: *mut *const cuda_types::CUgraphNode,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ graph_out: *mut cuda_types::cuda::CUgraph,
+ dependencies_out: *mut *const cuda_types::cuda::CUgraphNode,
numDependencies_out: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query a stream's capture state (12.3+)
Query stream state related to stream capture.
@@ -9843,14 +9872,14 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamIsCapturing,
::cuStreamUpdateCaptureDependencies*/
fn cuStreamGetCaptureInfo_v3_ptsz(
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- graph_out: *mut cuda_types::CUgraph,
- dependencies_out: *mut *const cuda_types::CUgraphNode,
- edgeData_out: *mut *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ graph_out: *mut cuda_types::cuda::CUgraph,
+ dependencies_out: *mut *const cuda_types::cuda::CUgraphNode,
+ edgeData_out: *mut *const cuda_types::cuda::CUgraphEdgeData,
numDependencies_out: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Update the set of dependencies in a capturing stream (11.3+)
Modifies the dependency set of a capturing stream. The dependency set is the set
@@ -9884,11 +9913,11 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamBeginCapture,
::cuStreamGetCaptureInfo,*/
fn cuStreamUpdateCaptureDependencies_ptsz(
- hStream: cuda_types::CUstream,
- dependencies: *mut cuda_types::CUgraphNode,
+ hStream: cuda_types::cuda::CUstream,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
numDependencies: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Update the set of dependencies in a capturing stream (12.3+)
Modifies the dependency set of a capturing stream. The dependency set is the set
@@ -9921,12 +9950,12 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamBeginCapture,
::cuStreamGetCaptureInfo,*/
fn cuStreamUpdateCaptureDependencies_v2_ptsz(
- hStream: cuda_types::CUstream,
- dependencies: *mut cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Attach memory to a stream asynchronously
Enqueues an operation in \p hStream to specify stream association of
@@ -10012,11 +10041,11 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuMemAllocManaged,
::cudaStreamAttachMemAsync*/
fn cuStreamAttachMemAsync_ptsz(
- hStream: cuda_types::CUstream,
- dptr: cuda_types::CUdeviceptr,
+ hStream: cuda_types::cuda::CUstream,
+ dptr: cuda_types::cuda::CUdeviceptr,
length: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Determine status of a compute stream
Returns ::CUDA_SUCCESS if all operations in the stream specified by
@@ -10043,7 +10072,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamSynchronize,
::cuStreamAddCallback,
::cudaStreamQuery*/
- fn cuStreamQuery_ptsz(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
+ fn cuStreamQuery_ptsz(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Wait until a stream's tasks are completed
Waits until the device has completed all operations in the stream specified
@@ -10069,7 +10100,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamQuery,
::cuStreamAddCallback,
::cudaStreamSynchronize*/
- fn cuStreamSynchronize_ptsz(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
+ fn cuStreamSynchronize_ptsz(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a stream
Destroys the stream specified by \p hStream.
@@ -10096,7 +10129,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamSynchronize,
::cuStreamAddCallback,
::cudaStreamDestroy*/
- fn cuStreamDestroy_v2(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
+ fn cuStreamDestroy_v2(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies attributes from source stream to destination stream.
Copies attributes from source stream \p src to destination stream \p dst.
@@ -10114,9 +10149,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
\sa
::CUaccessPolicyWindow*/
fn cuStreamCopyAttributes_ptsz(
- dst: cuda_types::CUstream,
- src: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ dst: cuda_types::cuda::CUstream,
+ src: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Queries stream attribute.
Queries attribute \p attr from \p hStream and stores it in corresponding
@@ -10135,10 +10170,10 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
\sa
::CUaccessPolicyWindow*/
fn cuStreamGetAttribute_ptsz(
- hStream: cuda_types::CUstream,
- attr: cuda_types::CUstreamAttrID,
- value_out: *mut cuda_types::CUstreamAttrValue,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ attr: cuda_types::cuda::CUstreamAttrID,
+ value_out: *mut cuda_types::cuda::CUstreamAttrValue,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets stream attribute.
Sets attribute \p attr on \p hStream from corresponding attribute of
@@ -10158,10 +10193,10 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
\sa
::CUaccessPolicyWindow*/
fn cuStreamSetAttribute_ptsz(
- hStream: cuda_types::CUstream,
- attr: cuda_types::CUstreamAttrID,
- value: *const cuda_types::CUstreamAttrValue,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ attr: cuda_types::cuda::CUstreamAttrID,
+ value: *const cuda_types::cuda::CUstreamAttrValue,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an event
Creates an event *phEvent for the current context with the flags specified via
@@ -10200,9 +10235,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cudaEventCreate,
::cudaEventCreateWithFlags*/
fn cuEventCreate(
- phEvent: *mut cuda_types::CUevent,
+ phEvent: *mut cuda_types::cuda::CUevent,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Records an event
Captures in \p hEvent the contents of \p hStream at the time of this call.
@@ -10242,9 +10277,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cudaEventRecord,
::cuEventRecordWithFlags*/
fn cuEventRecord_ptsz(
- hEvent: cuda_types::CUevent,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hEvent: cuda_types::cuda::CUevent,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Records an event
Captures in \p hEvent the contents of \p hStream at the time of this call.
@@ -10291,10 +10326,10 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuEventRecord,
::cudaEventRecord*/
fn cuEventRecordWithFlags_ptsz(
- hEvent: cuda_types::CUevent,
- hStream: cuda_types::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
+ hStream: cuda_types::cuda::CUstream,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Queries an event's status
Queries the status of all work currently captured by \p hEvent. See
@@ -10323,7 +10358,7 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuEventDestroy,
::cuEventElapsedTime,
::cudaEventQuery*/
- fn cuEventQuery(hEvent: cuda_types::CUevent) -> cuda_types::CUresult;
+ fn cuEventQuery(hEvent: cuda_types::cuda::CUevent) -> cuda_types::cuda::CUresult;
/** \brief Waits for an event to complete
Waits until the completion of all work currently captured in \p hEvent.
@@ -10351,7 +10386,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuEventDestroy,
::cuEventElapsedTime,
::cudaEventSynchronize*/
- fn cuEventSynchronize(hEvent: cuda_types::CUevent) -> cuda_types::CUresult;
+ fn cuEventSynchronize(
+ hEvent: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys an event
Destroys the event specified by \p hEvent.
@@ -10377,7 +10414,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuEventSynchronize,
::cuEventElapsedTime,
::cudaEventDestroy*/
- fn cuEventDestroy_v2(hEvent: cuda_types::CUevent) -> cuda_types::CUresult;
+ fn cuEventDestroy_v2(
+ hEvent: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Computes the elapsed time between two events
Computes the elapsed time between two events (in milliseconds with a
@@ -10421,9 +10460,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cudaEventElapsedTime*/
fn cuEventElapsedTime(
pMilliseconds: *mut f32,
- hStart: cuda_types::CUevent,
- hEnd: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hStart: cuda_types::cuda::CUevent,
+ hEnd: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Imports an external memory object
Imports an externally allocated memory object and returns
@@ -10586,9 +10625,9 @@ CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8
::cuExternalMemoryGetMappedBuffer,
::cuExternalMemoryGetMappedMipmappedArray*/
fn cuImportExternalMemory(
- extMem_out: *mut cuda_types::CUexternalMemory,
- memHandleDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_HANDLE_DESC,
- ) -> cuda_types::CUresult;
+ extMem_out: *mut cuda_types::cuda::CUexternalMemory,
+ memHandleDesc: *const cuda_types::cuda::CUDA_EXTERNAL_MEMORY_HANDLE_DESC,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Maps a buffer onto an imported memory object
Maps a buffer onto an imported memory object and returns a device
@@ -10640,10 +10679,10 @@ unsigned int flags;
::cuDestroyExternalMemory,
::cuExternalMemoryGetMappedMipmappedArray*/
fn cuExternalMemoryGetMappedBuffer(
- devPtr: *mut cuda_types::CUdeviceptr,
- extMem: cuda_types::CUexternalMemory,
- bufferDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_BUFFER_DESC,
- ) -> cuda_types::CUresult;
+ devPtr: *mut cuda_types::cuda::CUdeviceptr,
+ extMem: cuda_types::cuda::CUexternalMemory,
+ bufferDesc: *const cuda_types::cuda::CUDA_EXTERNAL_MEMORY_BUFFER_DESC,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Maps a CUDA mipmapped array onto an external memory object
Maps a CUDA mipmapped array onto an external object and returns a
@@ -10694,10 +10733,10 @@ unsigned int numLevels;
::cuDestroyExternalMemory,
::cuExternalMemoryGetMappedBuffer*/
fn cuExternalMemoryGetMappedMipmappedArray(
- mipmap: *mut cuda_types::CUmipmappedArray,
- extMem: cuda_types::CUexternalMemory,
- mipmapDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC,
- ) -> cuda_types::CUresult;
+ mipmap: *mut cuda_types::cuda::CUmipmappedArray,
+ extMem: cuda_types::cuda::CUexternalMemory,
+ mipmapDesc: *const cuda_types::cuda::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys an external memory object.
Destroys the specified external memory object. Any existing buffers
@@ -10717,8 +10756,8 @@ unsigned int numLevels;
::cuExternalMemoryGetMappedBuffer,
::cuExternalMemoryGetMappedMipmappedArray*/
fn cuDestroyExternalMemory(
- extMem: cuda_types::CUexternalMemory,
- ) -> cuda_types::CUresult;
+ extMem: cuda_types::cuda::CUexternalMemory,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Imports an external semaphore
Imports an externally allocated synchronization object and returns
@@ -10874,9 +10913,9 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuSignalExternalSemaphoresAsync,
::cuWaitExternalSemaphoresAsync*/
fn cuImportExternalSemaphore(
- extSem_out: *mut cuda_types::CUexternalSemaphore,
- semHandleDesc: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC,
- ) -> cuda_types::CUresult;
+ extSem_out: *mut cuda_types::cuda::CUexternalSemaphore,
+ semHandleDesc: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Signals a set of external semaphore objects
Enqueues a signal operation on a set of externally allocated
@@ -10956,11 +10995,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuDestroyExternalSemaphore,
::cuWaitExternalSemaphoresAsync*/
fn cuSignalExternalSemaphoresAsync_ptsz(
- extSemArray: *const cuda_types::CUexternalSemaphore,
- paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
+ extSemArray: *const cuda_types::cuda::CUexternalSemaphore,
+ paramsArray: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
numExtSems: ::core::ffi::c_uint,
- stream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ stream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Waits on a set of external semaphore objects
Enqueues a wait operation on a set of externally allocated
@@ -11034,11 +11073,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuDestroyExternalSemaphore,
::cuSignalExternalSemaphoresAsync*/
fn cuWaitExternalSemaphoresAsync_ptsz(
- extSemArray: *const cuda_types::CUexternalSemaphore,
- paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
+ extSemArray: *const cuda_types::cuda::CUexternalSemaphore,
+ paramsArray: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
numExtSems: ::core::ffi::c_uint,
- stream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ stream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys an external semaphore
Destroys an external semaphore object and releases any references
@@ -11057,8 +11096,8 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuSignalExternalSemaphoresAsync,
::cuWaitExternalSemaphoresAsync*/
fn cuDestroyExternalSemaphore(
- extSem: cuda_types::CUexternalSemaphore,
- ) -> cuda_types::CUresult;
+ extSem: cuda_types::cuda::CUexternalSemaphore,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Wait on a memory location
Enqueues a synchronization of the stream on the given memory location. Work
@@ -11100,11 +11139,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuMemHostRegister,
::cuStreamWaitEvent*/
fn cuStreamWaitValue32_v2_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Wait on a memory location
Enqueues a synchronization of the stream on the given memory location. Work
@@ -11145,11 +11184,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuMemHostRegister,
::cuStreamWaitEvent*/
fn cuStreamWaitValue64_v2_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Write a value to memory
Write a value to memory.
@@ -11176,11 +11215,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuMemHostRegister,
::cuEventRecord*/
fn cuStreamWriteValue32_v2_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Write a value to memory
Write a value to memory.
@@ -11209,11 +11248,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuMemHostRegister,
::cuEventRecord*/
fn cuStreamWriteValue64_v2_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Batch operations to synchronize the stream via memory operations
This is a batch version of ::cuStreamWaitValue32() and ::cuStreamWriteValue32().
@@ -11254,11 +11293,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuStreamWriteValue64,
::cuMemHostRegister*/
fn cuStreamBatchMemOp_v2_ptsz(
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
count: ::core::ffi::c_uint,
- paramArray: *mut cuda_types::CUstreamBatchMemOpParams,
+ paramArray: *mut cuda_types::cuda::CUstreamBatchMemOpParams,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns information about a function
Returns in \p *pi the integer value of the attribute \p attrib on the kernel
@@ -11353,9 +11392,9 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuKernelGetAttribute*/
fn cuFuncGetAttribute(
pi: *mut ::core::ffi::c_int,
- attrib: cuda_types::CUfunction_attribute,
- hfunc: cuda_types::CUfunction,
- ) -> cuda_types::CUresult;
+ attrib: cuda_types::cuda::CUfunction_attribute,
+ hfunc: cuda_types::cuda::CUfunction,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets information about a function
This call sets the value of a specified attribute \p attrib on the kernel given
@@ -11417,10 +11456,10 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cudaFuncSetAttribute,
::cuKernelSetAttribute*/
fn cuFuncSetAttribute(
- hfunc: cuda_types::CUfunction,
- attrib: cuda_types::CUfunction_attribute,
+ hfunc: cuda_types::cuda::CUfunction,
+ attrib: cuda_types::cuda::CUfunction_attribute,
value: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the preferred cache configuration for a device function
On devices where the L1 cache and shared memory use the same hardware
@@ -11463,9 +11502,9 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cudaFuncSetCacheConfig,
::cuKernelSetCacheConfig*/
fn cuFuncSetCacheConfig(
- hfunc: cuda_types::CUfunction,
- config: cuda_types::CUfunc_cache,
- ) -> cuda_types::CUresult;
+ hfunc: cuda_types::cuda::CUfunction,
+ config: cuda_types::cuda::CUfunc_cache,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a module handle
Returns in \p *hmod the handle of the module that function \p hfunc
@@ -11489,9 +11528,9 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
\notefnerr
*/
fn cuFuncGetModule(
- hmod: *mut cuda_types::CUmodule,
- hfunc: cuda_types::CUfunction,
- ) -> cuda_types::CUresult;
+ hmod: *mut cuda_types::cuda::CUmodule,
+ hfunc: cuda_types::cuda::CUfunction,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the function name for a ::CUfunction handle
Returns in \p **name the function name associated with the function handle \p hfunc .
@@ -11511,8 +11550,8 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
*/
fn cuFuncGetName(
name: *mut *const ::core::ffi::c_char,
- hfunc: cuda_types::CUfunction,
- ) -> cuda_types::CUresult;
+ hfunc: cuda_types::cuda::CUfunction,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the offset and size of a kernel parameter in the device-side parameter layout
Queries the kernel parameter at \p paramIndex into \p func's list of parameters, and returns
@@ -11534,11 +11573,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
\sa ::cuKernelGetParamInfo*/
fn cuFuncGetParamInfo(
- func: cuda_types::CUfunction,
+ func: cuda_types::cuda::CUfunction,
paramIndex: usize,
paramOffset: *mut usize,
paramSize: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns if the function is loaded
Returns in \p state the loading state of \p function.
@@ -11554,9 +11593,9 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
\sa ::cuFuncLoad,
::cuModuleEnumerateFunctions*/
fn cuFuncIsLoaded(
- state: *mut cuda_types::CUfunctionLoadingState,
- function: cuda_types::CUfunction,
- ) -> cuda_types::CUresult;
+ state: *mut cuda_types::cuda::CUfunctionLoadingState,
+ function: cuda_types::cuda::CUfunction,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Loads a function
Finalizes function loading for \p function. Calling this API with a
@@ -11571,7 +11610,7 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
\sa ::cuModuleEnumerateFunctions,
::cuFuncIsLoaded*/
- fn cuFuncLoad(function: cuda_types::CUfunction) -> cuda_types::CUresult;
+ fn cuFuncLoad(function: cuda_types::cuda::CUfunction) -> cuda_types::cuda::CUresult;
/** \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel
Invokes the function ::CUfunction or the kernel ::CUkernel \p f
@@ -11690,7 +11729,7 @@ status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config);
::cuKernelGetAttribute,
::cuKernelSetAttribute*/
fn cuLaunchKernel_ptsz(
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
gridDimX: ::core::ffi::c_uint,
gridDimY: ::core::ffi::c_uint,
gridDimZ: ::core::ffi::c_uint,
@@ -11698,10 +11737,10 @@ status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config);
blockDimY: ::core::ffi::c_uint,
blockDimZ: ::core::ffi::c_uint,
sharedMemBytes: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
kernelParams: *mut *mut ::core::ffi::c_void,
extra: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel with launch-time configuration
Invokes the function ::CUfunction or the kernel ::CUkernel \p f with the specified launch-time configuration
@@ -11936,11 +11975,11 @@ status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config);
::cuKernelGetAttribute,
::cuKernelSetAttribute*/
fn cuLaunchKernelEx_ptsz(
- config: *const cuda_types::CUlaunchConfig,
- f: cuda_types::CUfunction,
+ config: *const cuda_types::cuda::CUlaunchConfig,
+ f: cuda_types::cuda::CUfunction,
kernelParams: *mut *mut ::core::ffi::c_void,
extra: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel where thread blocks
can cooperate and synchronize as they execute
@@ -12034,7 +12073,7 @@ status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config);
::cuKernelGetAttribute,
::cuKernelSetAttribute*/
fn cuLaunchCooperativeKernel_ptsz(
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
gridDimX: ::core::ffi::c_uint,
gridDimY: ::core::ffi::c_uint,
gridDimZ: ::core::ffi::c_uint,
@@ -12042,9 +12081,9 @@ status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config);
blockDimY: ::core::ffi::c_uint,
blockDimZ: ::core::ffi::c_uint,
sharedMemBytes: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
kernelParams: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Launches CUDA functions on multiple devices where thread blocks can cooperate and synchronize as they execute
\deprecated This function is deprecated as of CUDA 11.3.
@@ -12182,10 +12221,10 @@ void **kernelParams;
::cuLaunchCooperativeKernel,
::cudaLaunchCooperativeKernelMultiDevice*/
fn cuLaunchCooperativeKernelMultiDevice(
- launchParamsList: *mut cuda_types::CUDA_LAUNCH_PARAMS,
+ launchParamsList: *mut cuda_types::cuda::CUDA_LAUNCH_PARAMS,
numDevices: ::core::ffi::c_uint,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Enqueues a host function call in a stream
Enqueues a host function to run in a stream. The function will be called
@@ -12248,10 +12287,10 @@ void **kernelParams;
::cuStreamAttachMemAsync,
::cuStreamAddCallback*/
fn cuLaunchHostFunc_ptsz(
- hStream: cuda_types::CUstream,
- fn_: cuda_types::CUhostFn,
+ hStream: cuda_types::cuda::CUstream,
+ fn_: cuda_types::cuda::CUhostFn,
userData: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the block-dimensions for the function
\deprecated
@@ -12285,11 +12324,11 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuFuncSetBlockShape(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
x: ::core::ffi::c_int,
y: ::core::ffi::c_int,
z: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the dynamic shared-memory size for the function
\deprecated
@@ -12321,9 +12360,9 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuFuncSetSharedSize(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
bytes: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameter size for the function
\deprecated
@@ -12353,9 +12392,9 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuParamSetSize(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
numbytes: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds an integer parameter to the function's argument list
\deprecated
@@ -12386,10 +12425,10 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuParamSeti(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
offset: ::core::ffi::c_int,
value: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds a floating-point parameter to the function's argument list
\deprecated
@@ -12420,10 +12459,10 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuParamSetf(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
offset: ::core::ffi::c_int,
value: f32,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds arbitrary data to the function's argument list
\deprecated
@@ -12456,11 +12495,11 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuParamSetv(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
offset: ::core::ffi::c_int,
ptr: *mut ::core::ffi::c_void,
numbytes: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Launches a CUDA function
\deprecated
@@ -12509,7 +12548,7 @@ void **kernelParams;
::cuLaunchGrid,
::cuLaunchGridAsync,
::cuLaunchKernel*/
- fn cuLaunch(f: cuda_types::CUfunction) -> cuda_types::CUresult;
+ fn cuLaunch(f: cuda_types::cuda::CUfunction) -> cuda_types::cuda::CUresult;
/** \brief Launches a CUDA function
\deprecated
@@ -12561,10 +12600,10 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuLaunchGrid(
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
grid_width: ::core::ffi::c_int,
grid_height: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Launches a CUDA function
\deprecated
@@ -12624,11 +12663,11 @@ void **kernelParams;
::cuLaunchGrid,
::cuLaunchKernel*/
fn cuLaunchGridAsync(
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
grid_width: ::core::ffi::c_int,
grid_height: ::core::ffi::c_int,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds a texture-reference to the function's argument list
\deprecated
@@ -12650,10 +12689,10 @@ void **kernelParams;
::CUDA_ERROR_INVALID_VALUE
\notefnerr*/
fn cuParamSetTexRef(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
texunit: ::core::ffi::c_int,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the shared memory configuration for a device function.
\deprecated
@@ -12705,9 +12744,9 @@ void **kernelParams;
::cuLaunchKernel,
::cudaFuncSetSharedMemConfig*/
fn cuFuncSetSharedMemConfig(
- hfunc: cuda_types::CUfunction,
- config: cuda_types::CUsharedconfig,
- ) -> cuda_types::CUresult;
+ hfunc: cuda_types::cuda::CUfunction,
+ config: cuda_types::cuda::CUsharedconfig,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a graph
Creates an empty graph, which is returned via \p phGraph.
@@ -12738,9 +12777,9 @@ void **kernelParams;
::cuGraphGetEdges,
::cuGraphClone*/
fn cuGraphCreate(
- phGraph: *mut cuda_types::CUgraph,
+ phGraph: *mut cuda_types::cuda::CUgraph,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a kernel execution node and adds it to a graph
Creates a new kernel execution node and adds it to \p hGraph with \p numDependencies
@@ -12841,12 +12880,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddKernelNode_v2(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a kernel node's parameters
Returns the parameters of kernel node \p hNode in \p nodeParams.
@@ -12876,9 +12915,9 @@ void **kernelParams;
::cuGraphAddKernelNode,
::cuGraphKernelNodeSetParams*/
fn cuGraphKernelNodeGetParams_v2(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_KERNEL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets a kernel node's parameters
Sets the parameters of kernel node \p hNode to \p nodeParams.
@@ -12900,9 +12939,9 @@ void **kernelParams;
::cuGraphAddKernelNode,
::cuGraphKernelNodeGetParams*/
fn cuGraphKernelNodeSetParams_v2(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a memcpy node and adds it to a graph
Creates a new memcpy node and adds it to \p hGraph with \p numDependencies
@@ -12949,13 +12988,13 @@ void **kernelParams;
::cuGraphAddHostNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddMemcpyNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- copyParams: *const cuda_types::CUDA_MEMCPY3D,
- ctx: cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ copyParams: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ ctx: cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a memcpy node's parameters
Returns the parameters of memcpy node \p hNode in \p nodeParams.
@@ -12976,9 +13015,9 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphMemcpyNodeSetParams*/
fn cuGraphMemcpyNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_MEMCPY3D,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_MEMCPY3D,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets a memcpy node's parameters
Sets the parameters of memcpy node \p hNode to \p nodeParams.
@@ -13000,9 +13039,9 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphMemcpyNodeGetParams*/
fn cuGraphMemcpyNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_MEMCPY3D,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a memset node and adds it to a graph
Creates a new memset node and adds it to \p hGraph with \p numDependencies
@@ -13043,13 +13082,13 @@ void **kernelParams;
::cuGraphAddHostNode,
::cuGraphAddMemcpyNode*/
fn cuGraphAddMemsetNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- memsetParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS,
- ctx: cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ memsetParams: *const cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS,
+ ctx: cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a memset node's parameters
Returns the parameters of memset node \p hNode in \p nodeParams.
@@ -13070,9 +13109,9 @@ void **kernelParams;
::cuGraphAddMemsetNode,
::cuGraphMemsetNodeSetParams*/
fn cuGraphMemsetNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_MEMSET_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets a memset node's parameters
Sets the parameters of memset node \p hNode to \p nodeParams.
@@ -13094,9 +13133,9 @@ void **kernelParams;
::cuGraphAddMemsetNode,
::cuGraphMemsetNodeGetParams*/
fn cuGraphMemsetNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a host execution node and adds it to a graph
Creates a new CPU execution node and adds it to \p hGraph with \p numDependencies
@@ -13136,12 +13175,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddHostNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ nodeParams: *const cuda_types::cuda::CUDA_HOST_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a host node's parameters
Returns the parameters of host node \p hNode in \p nodeParams.
@@ -13162,9 +13201,9 @@ void **kernelParams;
::cuGraphAddHostNode,
::cuGraphHostNodeSetParams*/
fn cuGraphHostNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_HOST_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_HOST_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets a host node's parameters
Sets the parameters of host node \p hNode to \p nodeParams.
@@ -13186,9 +13225,9 @@ void **kernelParams;
::cuGraphAddHostNode,
::cuGraphHostNodeGetParams*/
fn cuGraphHostNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_HOST_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a child graph node and adds it to a graph
Creates a new node which executes an embedded graph, and adds it to \p hGraph with
@@ -13227,12 +13266,12 @@ void **kernelParams;
::cuGraphAddMemsetNode,
::cuGraphClone*/
fn cuGraphAddChildGraphNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- childGraph: cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ childGraph: cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets a handle to the embedded graph of a child graph node
Gets a handle to the embedded graph in a child graph node. This call
@@ -13257,9 +13296,9 @@ void **kernelParams;
::cuGraphAddChildGraphNode,
::cuGraphNodeFindInClone*/
fn cuGraphChildGraphNodeGetGraph(
- hNode: cuda_types::CUgraphNode,
- phGraph: *mut cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ phGraph: *mut cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an empty node and adds it to a graph
Creates a new node which performs no operation, and adds it to \p hGraph with
@@ -13296,11 +13335,11 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddEmptyNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an event record node and adds it to a graph
Creates a new event record node and adds it to \p hGraph with \p numDependencies
@@ -13340,12 +13379,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddEventRecordNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the event associated with an event record node
Returns the event of event record node \p hNode in \p event_out.
@@ -13368,9 +13407,9 @@ void **kernelParams;
::cuEventRecordWithFlags,
::cuStreamWaitEvent*/
fn cuGraphEventRecordNodeGetEvent(
- hNode: cuda_types::CUgraphNode,
- event_out: *mut cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ event_out: *mut cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets an event record node's event
Sets the event of event record node \p hNode to \p event.
@@ -13394,9 +13433,9 @@ void **kernelParams;
::cuEventRecordWithFlags,
::cuStreamWaitEvent*/
fn cuGraphEventRecordNodeSetEvent(
- hNode: cuda_types::CUgraphNode,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an event wait node and adds it to a graph
Creates a new event wait node and adds it to \p hGraph with \p numDependencies
@@ -13437,12 +13476,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddEventWaitNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the event associated with an event wait node
Returns the event of event wait node \p hNode in \p event_out.
@@ -13465,9 +13504,9 @@ void **kernelParams;
::cuEventRecordWithFlags,
::cuStreamWaitEvent*/
fn cuGraphEventWaitNodeGetEvent(
- hNode: cuda_types::CUgraphNode,
- event_out: *mut cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ event_out: *mut cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets an event wait node's event
Sets the event of event wait node \p hNode to \p event.
@@ -13491,9 +13530,9 @@ void **kernelParams;
::cuEventRecordWithFlags,
::cuStreamWaitEvent*/
fn cuGraphEventWaitNodeSetEvent(
- hNode: cuda_types::CUgraphNode,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an external semaphore signal node and adds it to a graph
Creates a new external semaphore signal node and adds it to \p hGraph with \p
@@ -13540,12 +13579,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddExternalSemaphoresSignalNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns an external semaphore signal node's parameters
Returns the parameters of an external semaphore signal node \p hNode in \p params_out.
@@ -13574,9 +13613,9 @@ void **kernelParams;
::cuSignalExternalSemaphoresAsync,
::cuWaitExternalSemaphoresAsync*/
fn cuGraphExternalSemaphoresSignalNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- params_out: *mut cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ params_out: *mut cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets an external semaphore signal node's parameters
Sets the parameters of an external semaphore signal node \p hNode to \p nodeParams.
@@ -13600,9 +13639,9 @@ void **kernelParams;
::cuSignalExternalSemaphoresAsync,
::cuWaitExternalSemaphoresAsync*/
fn cuGraphExternalSemaphoresSignalNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an external semaphore wait node and adds it to a graph
Creates a new external semaphore wait node and adds it to \p hGraph with \p numDependencies
@@ -13649,12 +13688,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddExternalSemaphoresWaitNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns an external semaphore wait node's parameters
Returns the parameters of an external semaphore wait node \p hNode in \p params_out.
@@ -13683,9 +13722,9 @@ void **kernelParams;
::cuSignalExternalSemaphoresAsync,
::cuWaitExternalSemaphoresAsync*/
fn cuGraphExternalSemaphoresWaitNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- params_out: *mut cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ params_out: *mut cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets an external semaphore wait node's parameters
Sets the parameters of an external semaphore wait node \p hNode to \p nodeParams.
@@ -13709,9 +13748,9 @@ void **kernelParams;
::cuSignalExternalSemaphoresAsync,
::cuWaitExternalSemaphoresAsync*/
fn cuGraphExternalSemaphoresWaitNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a batch memory operation node and adds it to a graph
Creates a new batch memory operation node and adds it to \p hGraph with \p
@@ -13765,12 +13804,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddBatchMemOpNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ nodeParams: *const cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a batch mem op node's parameters
Returns the parameters of batch mem op node \p hNode in \p nodeParams_out.
@@ -13796,9 +13835,9 @@ void **kernelParams;
::cuGraphAddBatchMemOpNode,
::cuGraphBatchMemOpNodeSetParams*/
fn cuGraphBatchMemOpNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams_out: *mut cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams_out: *mut cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets a batch mem op node's parameters
Sets the parameters of batch mem op node \p hNode to \p nodeParams.
@@ -13823,9 +13862,9 @@ void **kernelParams;
::cuGraphAddBatchMemOpNode,
::cuGraphBatchMemOpNodeGetParams*/
fn cuGraphBatchMemOpNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for a batch mem op node in the given graphExec
Sets the parameters of a batch mem op node in an executable graph \p hGraphExec.
@@ -13870,10 +13909,10 @@ void **kernelParams;
::cuGraphBatchMemOpNodeSetParams,
::cuGraphInstantiate*/
fn cuGraphExecBatchMemOpNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an allocation node and adds it to a graph
Creates a new allocation node and adds it to \p hGraph with \p numDependencies
@@ -13946,12 +13985,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddMemAllocNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *mut cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ nodeParams: *mut cuda_types::cuda::CUDA_MEM_ALLOC_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a memory alloc node's parameters
Returns the parameters of a memory alloc node \p hNode in \p params_out.
@@ -13974,9 +14013,9 @@ void **kernelParams;
::cuGraphAddMemAllocNode,
::cuGraphMemFreeNodeGetParams*/
fn cuGraphMemAllocNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- params_out: *mut cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ params_out: *mut cuda_types::cuda::CUDA_MEM_ALLOC_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a memory free node and adds it to a graph
Creates a new memory free node and adds it to \p hGraph with \p numDependencies
@@ -14032,12 +14071,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddMemFreeNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- dptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ dptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a memory free node's parameters
Returns the address of a memory free node \p hNode in \p dptr_out.
@@ -14057,9 +14096,9 @@ void **kernelParams;
::cuGraphAddMemFreeNode,
::cuGraphMemAllocNodeGetParams*/
fn cuGraphMemFreeNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- dptr_out: *mut cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ dptr_out: *mut cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Free unused memory that was cached on the specified device for use with graphs back to the OS.
Blocks which are not in use by a graph that is either currently executing or scheduled to execute are
@@ -14076,7 +14115,9 @@ void **kernelParams;
::cuGraphAddMemFreeNode,
::cuDeviceSetGraphMemAttribute,
::cuDeviceGetGraphMemAttribute*/
- fn cuDeviceGraphMemTrim(device: cuda_types::CUdevice) -> cuda_types::CUresult;
+ fn cuDeviceGraphMemTrim(
+ device: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query asynchronous allocation attributes related to graphs
Valid attributes are:
@@ -14102,10 +14143,10 @@ void **kernelParams;
::cuGraphAddMemAllocNode,
::cuGraphAddMemFreeNode*/
fn cuDeviceGetGraphMemAttribute(
- device: cuda_types::CUdevice,
- attr: cuda_types::CUgraphMem_attribute,
+ device: cuda_types::cuda::CUdevice,
+ attr: cuda_types::cuda::CUgraphMem_attribute,
value: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Set asynchronous allocation attributes related to graphs
Valid attributes are:
@@ -14128,10 +14169,10 @@ void **kernelParams;
::cuGraphAddMemAllocNode,
::cuGraphAddMemFreeNode*/
fn cuDeviceSetGraphMemAttribute(
- device: cuda_types::CUdevice,
- attr: cuda_types::CUgraphMem_attribute,
+ device: cuda_types::cuda::CUdevice,
+ attr: cuda_types::cuda::CUgraphMem_attribute,
value: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Clones a graph
This function creates a copy of \p originalGraph and returns it in \p phGraphClone.
@@ -14154,9 +14195,9 @@ void **kernelParams;
::cuGraphCreate,
::cuGraphNodeFindInClone*/
fn cuGraphClone(
- phGraphClone: *mut cuda_types::CUgraph,
- originalGraph: cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ phGraphClone: *mut cuda_types::cuda::CUgraph,
+ originalGraph: cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Finds a cloned version of a node
This function returns the node in \p hClonedGraph corresponding to \p hOriginalNode
@@ -14180,10 +14221,10 @@ void **kernelParams;
\sa
::cuGraphClone*/
fn cuGraphNodeFindInClone(
- phNode: *mut cuda_types::CUgraphNode,
- hOriginalNode: cuda_types::CUgraphNode,
- hClonedGraph: cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ phNode: *mut cuda_types::cuda::CUgraphNode,
+ hOriginalNode: cuda_types::cuda::CUgraphNode,
+ hClonedGraph: cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a node's type
Returns the node type of \p hNode in \p type.
@@ -14212,9 +14253,9 @@ void **kernelParams;
::cuGraphMemsetNodeGetParams,
::cuGraphMemsetNodeSetParams*/
fn cuGraphNodeGetType(
- hNode: cuda_types::CUgraphNode,
- type_: *mut cuda_types::CUgraphNodeType,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ type_: *mut cuda_types::cuda::CUgraphNodeType,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a graph's nodes
Returns a list of \p hGraph's nodes. \p nodes may be NULL, in which case this
@@ -14243,10 +14284,10 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphGetNodes(
- hGraph: cuda_types::CUgraph,
- nodes: *mut cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ nodes: *mut cuda_types::cuda::CUgraphNode,
numNodes: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a graph's root nodes
Returns a list of \p hGraph's root nodes. \p rootNodes may be NULL, in which case this
@@ -14275,10 +14316,10 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphGetRootNodes(
- hGraph: cuda_types::CUgraph,
- rootNodes: *mut cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ rootNodes: *mut cuda_types::cuda::CUgraphNode,
numRootNodes: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a graph's dependency edges
Returns a list of \p hGraph's dependency edges. Edges are returned via corresponding
@@ -14310,11 +14351,11 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphGetEdges(
- hGraph: cuda_types::CUgraph,
- from: *mut cuda_types::CUgraphNode,
- to: *mut cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *mut cuda_types::cuda::CUgraphNode,
+ to: *mut cuda_types::cuda::CUgraphNode,
numEdges: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a graph's dependency edges (12.3+)
Returns a list of \p hGraph's dependency edges. Edges are returned via corresponding
@@ -14352,12 +14393,12 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphGetEdges_v2(
- hGraph: cuda_types::CUgraph,
- from: *mut cuda_types::CUgraphNode,
- to: *mut cuda_types::CUgraphNode,
- edgeData: *mut cuda_types::CUgraphEdgeData,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *mut cuda_types::cuda::CUgraphNode,
+ to: *mut cuda_types::cuda::CUgraphNode,
+ edgeData: *mut cuda_types::cuda::CUgraphEdgeData,
numEdges: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a node's dependencies
Returns a list of \p node's dependencies. \p dependencies may be NULL, in which case this
@@ -14386,10 +14427,10 @@ void **kernelParams;
::cuGraphAddDependencies,
::cuGraphRemoveDependencies*/
fn cuGraphNodeGetDependencies(
- hNode: cuda_types::CUgraphNode,
- dependencies: *mut cuda_types::CUgraphNode,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
numDependencies: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a node's dependencies (12.3+)
Returns a list of \p node's dependencies. \p dependencies may be NULL, in which case this
@@ -14424,11 +14465,11 @@ void **kernelParams;
::cuGraphAddDependencies,
::cuGraphRemoveDependencies*/
fn cuGraphNodeGetDependencies_v2(
- hNode: cuda_types::CUgraphNode,
- dependencies: *mut cuda_types::CUgraphNode,
- edgeData: *mut cuda_types::CUgraphEdgeData,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
+ edgeData: *mut cuda_types::cuda::CUgraphEdgeData,
numDependencies: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a node's dependent nodes
Returns a list of \p node's dependent nodes. \p dependentNodes may be NULL, in which
@@ -14458,10 +14499,10 @@ void **kernelParams;
::cuGraphAddDependencies,
::cuGraphRemoveDependencies*/
fn cuGraphNodeGetDependentNodes(
- hNode: cuda_types::CUgraphNode,
- dependentNodes: *mut cuda_types::CUgraphNode,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dependentNodes: *mut cuda_types::cuda::CUgraphNode,
numDependentNodes: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a node's dependent nodes (12.3+)
Returns a list of \p node's dependent nodes. \p dependentNodes may be NULL, in which
@@ -14497,11 +14538,11 @@ void **kernelParams;
::cuGraphAddDependencies,
::cuGraphRemoveDependencies*/
fn cuGraphNodeGetDependentNodes_v2(
- hNode: cuda_types::CUgraphNode,
- dependentNodes: *mut cuda_types::CUgraphNode,
- edgeData: *mut cuda_types::CUgraphEdgeData,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dependentNodes: *mut cuda_types::cuda::CUgraphNode,
+ edgeData: *mut cuda_types::cuda::CUgraphEdgeData,
numDependentNodes: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds dependency edges to a graph
The number of dependencies to be added is defined by \p numDependencies
@@ -14528,11 +14569,11 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphAddDependencies(
- hGraph: cuda_types::CUgraph,
- from: *const cuda_types::CUgraphNode,
- to: *const cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *const cuda_types::cuda::CUgraphNode,
+ to: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds dependency edges to a graph (12.3+)
The number of dependencies to be added is defined by \p numDependencies
@@ -14560,12 +14601,12 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphAddDependencies_v2(
- hGraph: cuda_types::CUgraph,
- from: *const cuda_types::CUgraphNode,
- to: *const cuda_types::CUgraphNode,
- edgeData: *const cuda_types::CUgraphEdgeData,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *const cuda_types::cuda::CUgraphNode,
+ to: *const cuda_types::cuda::CUgraphNode,
+ edgeData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Removes dependency edges from a graph
The number of \p dependencies to be removed is defined by \p numDependencies.
@@ -14595,11 +14636,11 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphRemoveDependencies(
- hGraph: cuda_types::CUgraph,
- from: *const cuda_types::CUgraphNode,
- to: *const cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *const cuda_types::cuda::CUgraphNode,
+ to: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Removes dependency edges from a graph (12.3+)
The number of \p dependencies to be removed is defined by \p numDependencies.
@@ -14633,12 +14674,12 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphRemoveDependencies_v2(
- hGraph: cuda_types::CUgraph,
- from: *const cuda_types::CUgraphNode,
- to: *const cuda_types::CUgraphNode,
- edgeData: *const cuda_types::CUgraphEdgeData,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *const cuda_types::cuda::CUgraphNode,
+ to: *const cuda_types::cuda::CUgraphNode,
+ edgeData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Remove a node from the graph
Removes \p hNode from its graph. This operation also severs any dependencies of other nodes
@@ -14662,7 +14703,9 @@ void **kernelParams;
::cuGraphAddHostNode,
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
- fn cuGraphDestroyNode(hNode: cuda_types::CUgraphNode) -> cuda_types::CUresult;
+ fn cuGraphDestroyNode(
+ hNode: cuda_types::cuda::CUgraphNode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an executable graph from a graph
Instantiates \p hGraph as an executable graph. The graph is validated for any
@@ -14732,10 +14775,10 @@ void **kernelParams;
::cuGraphLaunch,
::cuGraphExecDestroy*/
fn cuGraphInstantiateWithFlags(
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an executable graph from a graph
Instantiates \p hGraph as an executable graph according to the \p instantiateParams structure.
@@ -14836,10 +14879,10 @@ CUgraphInstantiateResult result_out;
::cuGraphInstantiate,
::cuGraphExecDestroy*/
fn cuGraphInstantiateWithParams_ptsz(
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- instantiateParams: *mut cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS,
- ) -> cuda_types::CUresult;
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ instantiateParams: *mut cuda_types::cuda::CUDA_GRAPH_INSTANTIATE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query the instantiation flags of an executable graph
Returns the flags that were passed to instantiation for the given executable graph.
@@ -14859,9 +14902,9 @@ CUgraphInstantiateResult result_out;
::cuGraphInstantiate,
::cuGraphInstantiateWithParams*/
fn cuGraphExecGetFlags(
- hGraphExec: cuda_types::CUgraphExec,
- flags: *mut cuda_types::cuuint64_t,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ flags: *mut cuda_types::cuda::cuuint64_t,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for a kernel node in the given graphExec
Sets the parameters of a kernel node in an executable graph \p hGraphExec.
@@ -14916,10 +14959,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecKernelNodeSetParams_v2(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for a memcpy node in the given graphExec.
Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had
@@ -14964,11 +15007,11 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecMemcpyNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- copyParams: *const cuda_types::CUDA_MEMCPY3D,
- ctx: cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ copyParams: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ ctx: cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for a memset node in the given graphExec.
Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had
@@ -15013,11 +15056,11 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecMemsetNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- memsetParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS,
- ctx: cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ memsetParams: *const cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS,
+ ctx: cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for a host node in the given graphExec.
Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had
@@ -15053,10 +15096,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecHostNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_HOST_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Updates node parameters in the child graph node in the given graphExec.
Updates the work represented by \p hNode in \p hGraphExec as though the nodes contained
@@ -15098,10 +15141,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecChildGraphNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- childGraph: cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ childGraph: cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the event for an event record node in the given graphExec
Sets the event of an event record node in an executable graph \p hGraphExec.
@@ -15140,10 +15183,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecEventRecordNodeSetEvent(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the event for an event wait node in the given graphExec
Sets the event of an event wait node in an executable graph \p hGraphExec.
@@ -15182,10 +15225,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecEventWaitNodeSetEvent(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for an external semaphore signal node in the given graphExec
Sets the parameters of an external semaphore signal node in an executable graph \p hGraphExec.
@@ -15227,10 +15270,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecExternalSemaphoresSignalNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for an external semaphore wait node in the given graphExec
Sets the parameters of an external semaphore wait node in an executable graph \p hGraphExec.
@@ -15272,10 +15315,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecExternalSemaphoresWaitNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Enables or disables the specified node in the given graphExec
Sets \p hNode to be either enabled or disabled. Disabled nodes are functionally equivalent
@@ -15314,10 +15357,10 @@ CUgraphInstantiateResult result_out;
::cuGraphInstantiate
::cuGraphLaunch*/
fn cuGraphNodeSetEnabled(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
isEnabled: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query whether a node in the given graphExec is enabled
Sets isEnabled to 1 if \p hNode is enabled, or 0 if \p hNode is disabled.
@@ -15346,10 +15389,10 @@ CUgraphInstantiateResult result_out;
::cuGraphInstantiate
::cuGraphLaunch*/
fn cuGraphNodeGetEnabled(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
isEnabled: *mut ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Uploads an executable graph in a stream
Uploads \p hGraphExec to the device in \p hStream without executing it. Uploads of
@@ -15373,9 +15416,9 @@ CUgraphInstantiateResult result_out;
::cuGraphLaunch,
::cuGraphExecDestroy*/
fn cuGraphUpload_ptsz(
- hGraphExec: cuda_types::CUgraphExec,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Launches an executable graph in a stream
Executes \p hGraphExec in \p hStream. Only one instance of \p hGraphExec may be executing
@@ -15403,9 +15446,9 @@ CUgraphInstantiateResult result_out;
::cuGraphUpload,
::cuGraphExecDestroy*/
fn cuGraphLaunch_ptsz(
- hGraphExec: cuda_types::CUgraphExec,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys an executable graph
Destroys the executable graph specified by \p hGraphExec, as well
@@ -15427,7 +15470,9 @@ CUgraphInstantiateResult result_out;
::cuGraphInstantiate,
::cuGraphUpload,
::cuGraphLaunch*/
- fn cuGraphExecDestroy(hGraphExec: cuda_types::CUgraphExec) -> cuda_types::CUresult;
+ fn cuGraphExecDestroy(
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a graph
Destroys the graph specified by \p hGraph, as well as all of its nodes.
@@ -15444,7 +15489,7 @@ CUgraphInstantiateResult result_out;
\sa
::cuGraphCreate*/
- fn cuGraphDestroy(hGraph: cuda_types::CUgraph) -> cuda_types::CUresult;
+ fn cuGraphDestroy(hGraph: cuda_types::cuda::CUgraph) -> cuda_types::cuda::CUresult;
/** \brief Check whether an executable graph can be updated with a graph and perform the update if possible
Updates the node parameters in the instantiated graph specified by \p hGraphExec with the
@@ -15532,10 +15577,10 @@ CUgraphInstantiateResult result_out;
\sa
::cuGraphInstantiate*/
fn cuGraphExecUpdate_v2(
- hGraphExec: cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- resultInfo: *mut cuda_types::CUgraphExecUpdateResultInfo,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ resultInfo: *mut cuda_types::cuda::CUgraphExecUpdateResultInfo,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies attributes from source node to destination node.
Copies attributes from source node \p src to destination node \p dst.
@@ -15553,9 +15598,9 @@ CUgraphInstantiateResult result_out;
\sa
::CUaccessPolicyWindow*/
fn cuGraphKernelNodeCopyAttributes(
- dst: cuda_types::CUgraphNode,
- src: cuda_types::CUgraphNode,
- ) -> cuda_types::CUresult;
+ dst: cuda_types::cuda::CUgraphNode,
+ src: cuda_types::cuda::CUgraphNode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Queries node attribute.
Queries attribute \p attr from node \p hNode and stores it in corresponding
@@ -15574,10 +15619,10 @@ CUgraphInstantiateResult result_out;
\sa
::CUaccessPolicyWindow*/
fn cuGraphKernelNodeGetAttribute(
- hNode: cuda_types::CUgraphNode,
- attr: cuda_types::CUkernelNodeAttrID,
- value_out: *mut cuda_types::CUkernelNodeAttrValue,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ attr: cuda_types::cuda::CUkernelNodeAttrID,
+ value_out: *mut cuda_types::cuda::CUkernelNodeAttrValue,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets node attribute.
Sets attribute \p attr on node \p hNode from corresponding attribute of
@@ -15596,10 +15641,10 @@ CUgraphInstantiateResult result_out;
\sa
::CUaccessPolicyWindow*/
fn cuGraphKernelNodeSetAttribute(
- hNode: cuda_types::CUgraphNode,
- attr: cuda_types::CUkernelNodeAttrID,
- value: *const cuda_types::CUkernelNodeAttrValue,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ attr: cuda_types::cuda::CUkernelNodeAttrID,
+ value: *const cuda_types::cuda::CUkernelNodeAttrValue,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Write a DOT file describing graph structure
Using the provided \p hGraph, write to \p path a DOT formatted description of the graph.
@@ -15616,10 +15661,10 @@ CUgraphInstantiateResult result_out;
::CUDA_ERROR_INVALID_VALUE,
::CUDA_ERROR_OPERATING_SYSTEM*/
fn cuGraphDebugDotPrint(
- hGraph: cuda_types::CUgraph,
+ hGraph: cuda_types::cuda::CUgraph,
path: *const ::core::ffi::c_char,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a user object
Create a user object with the specified destructor callback and initial reference count. The
@@ -15653,12 +15698,12 @@ CUgraphInstantiateResult result_out;
::cuGraphReleaseUserObject,
::cuGraphCreate*/
fn cuUserObjectCreate(
- object_out: *mut cuda_types::CUuserObject,
+ object_out: *mut cuda_types::cuda::CUuserObject,
ptr: *mut ::core::ffi::c_void,
- destroy: cuda_types::CUhostFn,
+ destroy: cuda_types::cuda::CUhostFn,
initialRefcount: ::core::ffi::c_uint,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Retain a reference to a user object
Retains new references to a user object. The new references are owned by the caller.
@@ -15680,9 +15725,9 @@ CUgraphInstantiateResult result_out;
::cuGraphReleaseUserObject,
::cuGraphCreate*/
fn cuUserObjectRetain(
- object: cuda_types::CUuserObject,
+ object: cuda_types::cuda::CUuserObject,
count: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Release a reference to a user object
Releases user object references owned by the caller. The object's destructor is invoked if
@@ -15708,9 +15753,9 @@ CUgraphInstantiateResult result_out;
::cuGraphReleaseUserObject,
::cuGraphCreate*/
fn cuUserObjectRelease(
- object: cuda_types::CUuserObject,
+ object: cuda_types::cuda::CUuserObject,
count: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Retain a reference to a user object from a graph
Creates or moves user object references that will be owned by a CUDA graph.
@@ -15736,11 +15781,11 @@ CUgraphInstantiateResult result_out;
::cuGraphReleaseUserObject,
::cuGraphCreate*/
fn cuGraphRetainUserObject(
- graph: cuda_types::CUgraph,
- object: cuda_types::CUuserObject,
+ graph: cuda_types::cuda::CUgraph,
+ object: cuda_types::cuda::CUuserObject,
count: ::core::ffi::c_uint,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Release a user object reference from a graph
Releases user object references owned by a graph.
@@ -15763,10 +15808,10 @@ CUgraphInstantiateResult result_out;
::cuGraphRetainUserObject,
::cuGraphCreate*/
fn cuGraphReleaseUserObject(
- graph: cuda_types::CUgraph,
- object: cuda_types::CUuserObject,
+ graph: cuda_types::cuda::CUgraph,
+ object: cuda_types::cuda::CUuserObject,
count: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds a node of arbitrary type to a graph
Creates a new node in \p hGraph described by \p nodeParams with \p numDependencies
@@ -15804,12 +15849,12 @@ CUgraphInstantiateResult result_out;
::cuGraphNodeSetParams,
::cuGraphExecNodeSetParams*/
fn cuGraphAddNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *mut cuda_types::CUgraphNodeParams,
- ) -> cuda_types::CUresult;
+ nodeParams: *mut cuda_types::cuda::CUgraphNodeParams,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds a node of arbitrary type to a graph (12.3+)
Creates a new node in \p hGraph described by \p nodeParams with \p numDependencies
@@ -15849,13 +15894,13 @@ CUgraphInstantiateResult result_out;
::cuGraphNodeSetParams,
::cuGraphExecNodeSetParams*/
fn cuGraphAddNode_v2(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- nodeParams: *mut cuda_types::CUgraphNodeParams,
- ) -> cuda_types::CUresult;
+ nodeParams: *mut cuda_types::cuda::CUgraphNodeParams,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Update's a graph node's parameters
Sets the parameters of graph node \p hNode to \p nodeParams. The node type specified by
@@ -15879,9 +15924,9 @@ CUgraphInstantiateResult result_out;
::cuGraphAddNode,
::cuGraphExecNodeSetParams*/
fn cuGraphNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUgraphNodeParams,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUgraphNodeParams,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Update's a graph node's parameters in an instantiated graph
Sets the parameters of a node in an executable graph \p hGraphExec. The node is identified
@@ -15926,10 +15971,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUgraphNodeParams,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUgraphNodeParams,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a conditional handle
Creates a conditional handle associated with \p hGraph.
@@ -15956,12 +16001,12 @@ CUgraphInstantiateResult result_out;
\sa
::cuGraphAddNode*/
fn cuGraphConditionalHandleCreate(
- pHandle_out: *mut cuda_types::CUgraphConditionalHandle,
- hGraph: cuda_types::CUgraph,
- ctx: cuda_types::CUcontext,
+ pHandle_out: *mut cuda_types::cuda::CUgraphConditionalHandle,
+ hGraph: cuda_types::cuda::CUgraph,
+ ctx: cuda_types::cuda::CUcontext,
defaultLaunchValue: ::core::ffi::c_uint,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns occupancy of a function
Returns in \p *numBlocks the number of the maximum active blocks per
@@ -15985,10 +16030,10 @@ CUgraphInstantiateResult result_out;
::cudaOccupancyMaxActiveBlocksPerMultiprocessor*/
fn cuOccupancyMaxActiveBlocksPerMultiprocessor(
numBlocks: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
+ func: cuda_types::cuda::CUfunction,
blockSize: ::core::ffi::c_int,
dynamicSMemSize: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns occupancy of a function
Returns in \p *numBlocks the number of the maximum active blocks per
@@ -16029,11 +16074,11 @@ CUgraphInstantiateResult result_out;
::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags*/
fn cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
numBlocks: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
+ func: cuda_types::cuda::CUfunction,
blockSize: ::core::ffi::c_int,
dynamicSMemSize: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Suggest a launch configuration with reasonable occupancy
Returns in \p *blockSize a reasonable block size that can achieve
@@ -16085,11 +16130,11 @@ CUgraphInstantiateResult result_out;
fn cuOccupancyMaxPotentialBlockSize(
minGridSize: *mut ::core::ffi::c_int,
blockSize: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
- blockSizeToDynamicSMemSize: cuda_types::CUoccupancyB2DSize,
+ func: cuda_types::cuda::CUfunction,
+ blockSizeToDynamicSMemSize: cuda_types::cuda::CUoccupancyB2DSize,
dynamicSMemSize: usize,
blockSizeLimit: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Suggest a launch configuration with reasonable occupancy
An extended version of ::cuOccupancyMaxPotentialBlockSize. In
@@ -16135,12 +16180,12 @@ CUgraphInstantiateResult result_out;
fn cuOccupancyMaxPotentialBlockSizeWithFlags(
minGridSize: *mut ::core::ffi::c_int,
blockSize: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
- blockSizeToDynamicSMemSize: cuda_types::CUoccupancyB2DSize,
+ func: cuda_types::cuda::CUfunction,
+ blockSizeToDynamicSMemSize: cuda_types::cuda::CUoccupancyB2DSize,
dynamicSMemSize: usize,
blockSizeLimit: ::core::ffi::c_int,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM
Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM.
@@ -16160,10 +16205,10 @@ CUgraphInstantiateResult result_out;
\notefnerr*/
fn cuOccupancyAvailableDynamicSMemPerBlock(
dynamicSmemSize: *mut usize,
- func: cuda_types::CUfunction,
+ func: cuda_types::cuda::CUfunction,
numBlocks: ::core::ffi::c_int,
blockSize: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Given the kernel function (\p func) and launch configuration
(\p config), return the maximum cluster size in \p *clusterSize.
@@ -16197,9 +16242,9 @@ CUgraphInstantiateResult result_out;
::cuFuncGetAttribute*/
fn cuOccupancyMaxPotentialClusterSize(
clusterSize: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
- config: *const cuda_types::CUlaunchConfig,
- ) -> cuda_types::CUresult;
+ func: cuda_types::cuda::CUfunction,
+ config: *const cuda_types::cuda::CUlaunchConfig,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Given the kernel function (\p func) and launch configuration
(\p config), return the maximum number of clusters that could co-exist
on the target device in \p *numClusters.
@@ -16235,9 +16280,9 @@ CUgraphInstantiateResult result_out;
::cuFuncGetAttribute*/
fn cuOccupancyMaxActiveClusters(
numClusters: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
- config: *const cuda_types::CUlaunchConfig,
- ) -> cuda_types::CUresult;
+ func: cuda_types::cuda::CUfunction,
+ config: *const cuda_types::cuda::CUlaunchConfig,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Binds an array as a texture reference
\deprecated
@@ -16266,10 +16311,10 @@ CUgraphInstantiateResult result_out;
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetArray(
- hTexRef: cuda_types::CUtexref,
- hArray: cuda_types::CUarray,
+ hTexRef: cuda_types::cuda::CUtexref,
+ hArray: cuda_types::cuda::CUarray,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Binds a mipmapped array to a texture reference
\deprecated
@@ -16297,10 +16342,10 @@ CUgraphInstantiateResult result_out;
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetMipmappedArray(
- hTexRef: cuda_types::CUtexref,
- hMipmappedArray: cuda_types::CUmipmappedArray,
+ hTexRef: cuda_types::cuda::CUtexref,
+ hMipmappedArray: cuda_types::cuda::CUmipmappedArray,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Binds an address as a texture reference
\deprecated
@@ -16345,10 +16390,10 @@ CUgraphInstantiateResult result_out;
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetAddress_v2(
ByteOffset: *mut usize,
- hTexRef: cuda_types::CUtexref,
- dptr: cuda_types::CUdeviceptr,
+ hTexRef: cuda_types::cuda::CUtexref,
+ dptr: cuda_types::cuda::CUdeviceptr,
bytes: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Binds an address as a 2D texture reference
\deprecated
@@ -16401,11 +16446,11 @@ CUgraphInstantiateResult result_out;
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetAddress2D_v3(
- hTexRef: cuda_types::CUtexref,
- desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR,
- dptr: cuda_types::CUdeviceptr,
+ hTexRef: cuda_types::cuda::CUtexref,
+ desc: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR,
+ dptr: cuda_types::cuda::CUdeviceptr,
Pitch: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the format for a texture reference
\deprecated
@@ -16435,10 +16480,10 @@ CUgraphInstantiateResult result_out;
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
::cudaCreateChannelDesc*/
fn cuTexRefSetFormat(
- hTexRef: cuda_types::CUtexref,
- fmt: cuda_types::CUarray_format,
+ hTexRef: cuda_types::cuda::CUtexref,
+ fmt: cuda_types::cuda::CUarray_format,
NumPackedComponents: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the addressing mode for a texture reference
\deprecated
@@ -16479,10 +16524,10 @@ CU_TR_ADDRESS_MODE_BORDER = 3
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetAddressMode(
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
dim: ::core::ffi::c_int,
- am: cuda_types::CUaddress_mode,
- ) -> cuda_types::CUresult;
+ am: cuda_types::cuda::CUaddress_mode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the filtering mode for a texture reference
\deprecated
@@ -16516,9 +16561,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetFilterMode(
- hTexRef: cuda_types::CUtexref,
- fm: cuda_types::CUfilter_mode,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ fm: cuda_types::cuda::CUfilter_mode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the mipmap filtering mode for a texture reference
\deprecated
@@ -16552,9 +16597,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetMipmapFilterMode(
- hTexRef: cuda_types::CUtexref,
- fm: cuda_types::CUfilter_mode,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ fm: cuda_types::cuda::CUfilter_mode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the mipmap level bias for a texture reference
\deprecated
@@ -16581,9 +16626,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetMipmapLevelBias(
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
bias: f32,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the mipmap min/max mipmap level clamps for a texture reference
\deprecated
@@ -16612,10 +16657,10 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetMipmapLevelClamp(
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
minMipmapLevelClamp: f32,
maxMipmapLevelClamp: f32,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the maximum anisotropy for a texture reference
\deprecated
@@ -16642,9 +16687,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetMaxAnisotropy(
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
maxAniso: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the border color for a texture reference
\deprecated
@@ -16675,9 +16720,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefSetAddressMode,
::cuTexRefGetAddressMode, ::cuTexRefGetBorderColor*/
fn cuTexRefSetBorderColor(
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
pBorderColor: *mut f32,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the flags for a texture reference
\deprecated
@@ -16717,9 +16762,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetFlags(
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the address associated with a texture reference
\deprecated
@@ -16744,9 +16789,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetAddress_v2(
- pdptr: *mut cuda_types::CUdeviceptr,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ pdptr: *mut cuda_types::cuda::CUdeviceptr,
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the array bound to a texture reference
\deprecated
@@ -16771,9 +16816,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetArray(
- phArray: *mut cuda_types::CUarray,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ phArray: *mut cuda_types::cuda::CUarray,
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the mipmapped array bound to a texture reference
\deprecated
@@ -16798,9 +16843,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetMipmappedArray(
- phMipmappedArray: *mut cuda_types::CUmipmappedArray,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ phMipmappedArray: *mut cuda_types::cuda::CUmipmappedArray,
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the addressing mode used by a texture reference
\deprecated
@@ -16826,10 +16871,10 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetAddressMode(
- pam: *mut cuda_types::CUaddress_mode,
- hTexRef: cuda_types::CUtexref,
+ pam: *mut cuda_types::cuda::CUaddress_mode,
+ hTexRef: cuda_types::cuda::CUtexref,
dim: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the filter-mode used by a texture reference
\deprecated
@@ -16853,9 +16898,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetFilterMode(
- pfm: *mut cuda_types::CUfilter_mode,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ pfm: *mut cuda_types::cuda::CUfilter_mode,
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the format used by a texture reference
\deprecated
@@ -16881,10 +16926,10 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags*/
fn cuTexRefGetFormat(
- pFormat: *mut cuda_types::CUarray_format,
+ pFormat: *mut cuda_types::cuda::CUarray_format,
pNumChannels: *mut ::core::ffi::c_int,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the mipmap filtering mode for a texture reference
\deprecated
@@ -16908,9 +16953,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetMipmapFilterMode(
- pfm: *mut cuda_types::CUfilter_mode,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ pfm: *mut cuda_types::cuda::CUfilter_mode,
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the mipmap level bias for a texture reference
\deprecated
@@ -16935,8 +16980,8 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetMipmapLevelBias(
pbias: *mut f32,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the min/max mipmap level clamps for a texture reference
\deprecated
@@ -16963,8 +17008,8 @@ CU_TR_FILTER_MODE_LINEAR = 1
fn cuTexRefGetMipmapLevelClamp(
pminMipmapLevelClamp: *mut f32,
pmaxMipmapLevelClamp: *mut f32,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the maximum anisotropy for a texture reference
\deprecated
@@ -16989,8 +17034,8 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetMaxAnisotropy(
pmaxAniso: *mut ::core::ffi::c_int,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the border color used by a texture reference
\deprecated
@@ -17018,8 +17063,8 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefSetAddressMode, ::cuTexRefSetBorderColor*/
fn cuTexRefGetBorderColor(
pBorderColor: *mut f32,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the flags used by a texture reference
\deprecated
@@ -17043,8 +17088,8 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetFilterMode, ::cuTexRefGetFormat*/
fn cuTexRefGetFlags(
pFlags: *mut ::core::ffi::c_uint,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a texture reference
\deprecated
@@ -17066,7 +17111,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::CUDA_ERROR_INVALID_VALUE
\sa ::cuTexRefDestroy*/
- fn cuTexRefCreate(pTexRef: *mut cuda_types::CUtexref) -> cuda_types::CUresult;
+ fn cuTexRefCreate(
+ pTexRef: *mut cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a texture reference
\deprecated
@@ -17083,7 +17130,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::CUDA_ERROR_INVALID_VALUE
\sa ::cuTexRefCreate*/
- fn cuTexRefDestroy(hTexRef: cuda_types::CUtexref) -> cuda_types::CUresult;
+ fn cuTexRefDestroy(
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the CUDA array for a surface reference.
\deprecated
@@ -17109,10 +17158,10 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuModuleGetSurfRef,
::cuSurfRefGetArray*/
fn cuSurfRefSetArray(
- hSurfRef: cuda_types::CUsurfref,
- hArray: cuda_types::CUarray,
+ hSurfRef: cuda_types::cuda::CUsurfref,
+ hArray: cuda_types::cuda::CUarray,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Passes back the CUDA array bound to a surface reference.
\deprecated
@@ -17133,9 +17182,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
\sa ::cuModuleGetSurfRef, ::cuSurfRefSetArray*/
fn cuSurfRefGetArray(
- phArray: *mut cuda_types::CUarray,
- hSurfRef: cuda_types::CUsurfref,
- ) -> cuda_types::CUresult;
+ phArray: *mut cuda_types::cuda::CUarray,
+ hSurfRef: cuda_types::cuda::CUsurfref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a texture object
Creates a texture object and returns it in \p pTexObject. \p pResDesc describes
@@ -17358,11 +17407,11 @@ unsigned int lastLayer;
::cuTexObjectDestroy,
::cudaCreateTextureObject*/
fn cuTexObjectCreate(
- pTexObject: *mut cuda_types::CUtexObject,
- pResDesc: *const cuda_types::CUDA_RESOURCE_DESC,
- pTexDesc: *const cuda_types::CUDA_TEXTURE_DESC,
- pResViewDesc: *const cuda_types::CUDA_RESOURCE_VIEW_DESC,
- ) -> cuda_types::CUresult;
+ pTexObject: *mut cuda_types::cuda::CUtexObject,
+ pResDesc: *const cuda_types::cuda::CUDA_RESOURCE_DESC,
+ pTexDesc: *const cuda_types::cuda::CUDA_TEXTURE_DESC,
+ pResViewDesc: *const cuda_types::cuda::CUDA_RESOURCE_VIEW_DESC,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a texture object
Destroys the texture object specified by \p texObject.
@@ -17379,7 +17428,9 @@ unsigned int lastLayer;
\sa
::cuTexObjectCreate,
::cudaDestroyTextureObject*/
- fn cuTexObjectDestroy(texObject: cuda_types::CUtexObject) -> cuda_types::CUresult;
+ fn cuTexObjectDestroy(
+ texObject: cuda_types::cuda::CUtexObject,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a texture object's resource descriptor
Returns the resource descriptor for the texture object specified by \p texObject.
@@ -17398,9 +17449,9 @@ unsigned int lastLayer;
::cuTexObjectCreate,
::cudaGetTextureObjectResourceDesc,*/
fn cuTexObjectGetResourceDesc(
- pResDesc: *mut cuda_types::CUDA_RESOURCE_DESC,
- texObject: cuda_types::CUtexObject,
- ) -> cuda_types::CUresult;
+ pResDesc: *mut cuda_types::cuda::CUDA_RESOURCE_DESC,
+ texObject: cuda_types::cuda::CUtexObject,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a texture object's texture descriptor
Returns the texture descriptor for the texture object specified by \p texObject.
@@ -17419,9 +17470,9 @@ unsigned int lastLayer;
::cuTexObjectCreate,
::cudaGetTextureObjectTextureDesc*/
fn cuTexObjectGetTextureDesc(
- pTexDesc: *mut cuda_types::CUDA_TEXTURE_DESC,
- texObject: cuda_types::CUtexObject,
- ) -> cuda_types::CUresult;
+ pTexDesc: *mut cuda_types::cuda::CUDA_TEXTURE_DESC,
+ texObject: cuda_types::cuda::CUtexObject,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a texture object's resource view descriptor
Returns the resource view descriptor for the texture object specified by \p texObject.
@@ -17441,9 +17492,9 @@ unsigned int lastLayer;
::cuTexObjectCreate,
::cudaGetTextureObjectResourceViewDesc*/
fn cuTexObjectGetResourceViewDesc(
- pResViewDesc: *mut cuda_types::CUDA_RESOURCE_VIEW_DESC,
- texObject: cuda_types::CUtexObject,
- ) -> cuda_types::CUresult;
+ pResViewDesc: *mut cuda_types::cuda::CUDA_RESOURCE_VIEW_DESC,
+ texObject: cuda_types::cuda::CUtexObject,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a surface object
Creates a surface object and returns it in \p pSurfObject. \p pResDesc describes
@@ -17469,9 +17520,9 @@ unsigned int lastLayer;
::cuSurfObjectDestroy,
::cudaCreateSurfaceObject*/
fn cuSurfObjectCreate(
- pSurfObject: *mut cuda_types::CUsurfObject,
- pResDesc: *const cuda_types::CUDA_RESOURCE_DESC,
- ) -> cuda_types::CUresult;
+ pSurfObject: *mut cuda_types::cuda::CUsurfObject,
+ pResDesc: *const cuda_types::cuda::CUDA_RESOURCE_DESC,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a surface object
Destroys the surface object specified by \p surfObject.
@@ -17488,7 +17539,9 @@ unsigned int lastLayer;
\sa
::cuSurfObjectCreate,
::cudaDestroySurfaceObject*/
- fn cuSurfObjectDestroy(surfObject: cuda_types::CUsurfObject) -> cuda_types::CUresult;
+ fn cuSurfObjectDestroy(
+ surfObject: cuda_types::cuda::CUsurfObject,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a surface object's resource descriptor
Returns the resource descriptor for the surface object specified by \p surfObject.
@@ -17507,9 +17560,9 @@ unsigned int lastLayer;
::cuSurfObjectCreate,
::cudaGetSurfaceObjectResourceDesc*/
fn cuSurfObjectGetResourceDesc(
- pResDesc: *mut cuda_types::CUDA_RESOURCE_DESC,
- surfObject: cuda_types::CUsurfObject,
- ) -> cuda_types::CUresult;
+ pResDesc: *mut cuda_types::cuda::CUDA_RESOURCE_DESC,
+ surfObject: cuda_types::cuda::CUsurfObject,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a tensor map descriptor object representing tiled memory region
Creates a descriptor for Tensor Memory Access (TMA) object specified
@@ -17649,19 +17702,19 @@ CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA
::cuTensorMapEncodeIm2col,
::cuTensorMapReplaceAddress*/
fn cuTensorMapEncodeTiled(
- tensorMap: *mut cuda_types::CUtensorMap,
- tensorDataType: cuda_types::CUtensorMapDataType,
- tensorRank: cuda_types::cuuint32_t,
+ tensorMap: *mut cuda_types::cuda::CUtensorMap,
+ tensorDataType: cuda_types::cuda::CUtensorMapDataType,
+ tensorRank: cuda_types::cuda::cuuint32_t,
globalAddress: *mut ::core::ffi::c_void,
- globalDim: *const cuda_types::cuuint64_t,
- globalStrides: *const cuda_types::cuuint64_t,
- boxDim: *const cuda_types::cuuint32_t,
- elementStrides: *const cuda_types::cuuint32_t,
- interleave: cuda_types::CUtensorMapInterleave,
- swizzle: cuda_types::CUtensorMapSwizzle,
- l2Promotion: cuda_types::CUtensorMapL2promotion,
- oobFill: cuda_types::CUtensorMapFloatOOBfill,
- ) -> cuda_types::CUresult;
+ globalDim: *const cuda_types::cuda::cuuint64_t,
+ globalStrides: *const cuda_types::cuda::cuuint64_t,
+ boxDim: *const cuda_types::cuda::cuuint32_t,
+ elementStrides: *const cuda_types::cuda::cuuint32_t,
+ interleave: cuda_types::cuda::CUtensorMapInterleave,
+ swizzle: cuda_types::cuda::CUtensorMapSwizzle,
+ l2Promotion: cuda_types::cuda::CUtensorMapL2promotion,
+ oobFill: cuda_types::cuda::CUtensorMapFloatOOBfill,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a tensor map descriptor object representing im2col memory region
Creates a descriptor for Tensor Memory Access (TMA) object specified
@@ -17816,22 +17869,22 @@ CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA
::cuTensorMapEncodeTiled,
::cuTensorMapReplaceAddress*/
fn cuTensorMapEncodeIm2col(
- tensorMap: *mut cuda_types::CUtensorMap,
- tensorDataType: cuda_types::CUtensorMapDataType,
- tensorRank: cuda_types::cuuint32_t,
+ tensorMap: *mut cuda_types::cuda::CUtensorMap,
+ tensorDataType: cuda_types::cuda::CUtensorMapDataType,
+ tensorRank: cuda_types::cuda::cuuint32_t,
globalAddress: *mut ::core::ffi::c_void,
- globalDim: *const cuda_types::cuuint64_t,
- globalStrides: *const cuda_types::cuuint64_t,
+ globalDim: *const cuda_types::cuda::cuuint64_t,
+ globalStrides: *const cuda_types::cuda::cuuint64_t,
pixelBoxLowerCorner: *const ::core::ffi::c_int,
pixelBoxUpperCorner: *const ::core::ffi::c_int,
- channelsPerPixel: cuda_types::cuuint32_t,
- pixelsPerColumn: cuda_types::cuuint32_t,
- elementStrides: *const cuda_types::cuuint32_t,
- interleave: cuda_types::CUtensorMapInterleave,
- swizzle: cuda_types::CUtensorMapSwizzle,
- l2Promotion: cuda_types::CUtensorMapL2promotion,
- oobFill: cuda_types::CUtensorMapFloatOOBfill,
- ) -> cuda_types::CUresult;
+ channelsPerPixel: cuda_types::cuda::cuuint32_t,
+ pixelsPerColumn: cuda_types::cuda::cuuint32_t,
+ elementStrides: *const cuda_types::cuda::cuuint32_t,
+ interleave: cuda_types::cuda::CUtensorMapInterleave,
+ swizzle: cuda_types::cuda::CUtensorMapSwizzle,
+ l2Promotion: cuda_types::cuda::CUtensorMapL2promotion,
+ oobFill: cuda_types::cuda::CUtensorMapFloatOOBfill,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Modify an existing tensor map descriptor with an updated global address
Modifies the descriptor for Tensor Memory Access (TMA) object passed in \p tensorMap with
@@ -17855,9 +17908,9 @@ CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA
::cuTensorMapEncodeTiled,
::cuTensorMapEncodeIm2col*/
fn cuTensorMapReplaceAddress(
- tensorMap: *mut cuda_types::CUtensorMap,
+ tensorMap: *mut cuda_types::cuda::CUtensorMap,
globalAddress: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Queries if a device may directly access a peer device's memory.
Returns in \p *canAccessPeer a value of 1 if contexts on \p dev are capable of
@@ -17884,9 +17937,9 @@ CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA
::cudaDeviceCanAccessPeer*/
fn cuDeviceCanAccessPeer(
canAccessPeer: *mut ::core::ffi::c_int,
- dev: cuda_types::CUdevice,
- peerDev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ peerDev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Enables direct access to memory allocations in a peer context.
If both the current context and \p peerContext are on devices which support unified
@@ -17937,9 +17990,9 @@ CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA
::cuCtxDisablePeerAccess,
::cudaDeviceEnablePeerAccess*/
fn cuCtxEnablePeerAccess(
- peerContext: cuda_types::CUcontext,
+ peerContext: cuda_types::cuda::CUcontext,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Disables direct access to memory allocations in a peer context and
unregisters any registered allocations.
@@ -17964,8 +18017,8 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCtxEnablePeerAccess,
::cudaDeviceDisablePeerAccess*/
fn cuCtxDisablePeerAccess(
- peerContext: cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ peerContext: cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Queries attributes of the link between two devices.
Returns in \p *value the value of the requested attribute \p attrib of the
@@ -18004,10 +18057,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cudaDeviceGetP2PAttribute*/
fn cuDeviceGetP2PAttribute(
value: *mut ::core::ffi::c_int,
- attrib: cuda_types::CUdevice_P2PAttribute,
- srcDevice: cuda_types::CUdevice,
- dstDevice: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ attrib: cuda_types::cuda::CUdevice_P2PAttribute,
+ srcDevice: cuda_types::cuda::CUdevice,
+ dstDevice: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unregisters a graphics resource for access by CUDA
Unregisters the graphics resource \p resource so it is not accessible by
@@ -18035,8 +18088,8 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsGLRegisterImage,
::cudaGraphicsUnregisterResource*/
fn cuGraphicsUnregisterResource(
- resource: cuda_types::CUgraphicsResource,
- ) -> cuda_types::CUresult;
+ resource: cuda_types::cuda::CUgraphicsResource,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get an array through which to access a subresource of a mapped graphics resource.
Returns in \p *pArray an array through which the subresource of the mapped
@@ -18074,11 +18127,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsResourceGetMappedPointer,
::cudaGraphicsSubResourceGetMappedArray*/
fn cuGraphicsSubResourceGetMappedArray(
- pArray: *mut cuda_types::CUarray,
- resource: cuda_types::CUgraphicsResource,
+ pArray: *mut cuda_types::cuda::CUarray,
+ resource: cuda_types::cuda::CUgraphicsResource,
arrayIndex: ::core::ffi::c_uint,
mipLevel: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get a mipmapped array through which to access a mapped graphics resource.
Returns in \p *pMipmappedArray a mipmapped array through which the mapped graphics
@@ -18107,9 +18160,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsResourceGetMappedPointer,
::cudaGraphicsResourceGetMappedMipmappedArray*/
fn cuGraphicsResourceGetMappedMipmappedArray(
- pMipmappedArray: *mut cuda_types::CUmipmappedArray,
- resource: cuda_types::CUgraphicsResource,
- ) -> cuda_types::CUresult;
+ pMipmappedArray: *mut cuda_types::cuda::CUmipmappedArray,
+ resource: cuda_types::cuda::CUgraphicsResource,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get a device pointer through which to access a mapped graphics resource.
Returns in \p *pDevPtr a pointer through which the mapped graphics resource
@@ -18141,10 +18194,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsSubResourceGetMappedArray,
::cudaGraphicsResourceGetMappedPointer*/
fn cuGraphicsResourceGetMappedPointer_v2(
- pDevPtr: *mut cuda_types::CUdeviceptr,
+ pDevPtr: *mut cuda_types::cuda::CUdeviceptr,
pSize: *mut usize,
- resource: cuda_types::CUgraphicsResource,
- ) -> cuda_types::CUresult;
+ resource: cuda_types::cuda::CUgraphicsResource,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Set usage flags for mapping a graphics resource
Set \p flags for mapping the graphics resource \p resource.
@@ -18183,9 +18236,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsMapResources,
::cudaGraphicsResourceSetMapFlags*/
fn cuGraphicsResourceSetMapFlags_v2(
- resource: cuda_types::CUgraphicsResource,
+ resource: cuda_types::cuda::CUgraphicsResource,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Map graphics resources for access by CUDA
Maps the \p count graphics resources in \p resources for access by CUDA.
@@ -18224,9 +18277,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cudaGraphicsMapResources*/
fn cuGraphicsMapResources_ptsz(
count: ::core::ffi::c_uint,
- resources: *mut cuda_types::CUgraphicsResource,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ resources: *mut cuda_types::cuda::CUgraphicsResource,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unmap graphics resources.
Unmaps the \p count graphics resources in \p resources.
@@ -18262,9 +18315,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cudaGraphicsUnmapResources*/
fn cuGraphicsUnmapResources_ptsz(
count: ::core::ffi::c_uint,
- resources: *mut cuda_types::CUgraphicsResource,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ resources: *mut cuda_types::cuda::CUgraphicsResource,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the requested driver API function pointer
Returns in \p **pfn the address of the CUDA driver function for the requested
@@ -18328,9 +18381,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
symbol: *const ::core::ffi::c_char,
pfn: *mut *mut ::core::ffi::c_void,
cudaVersion: ::core::ffi::c_int,
- flags: cuda_types::cuuint64_t,
- symbolStatus: *mut cuda_types::CUdriverProcAddressQueryResult,
- ) -> cuda_types::CUresult;
+ flags: cuda_types::cuda::cuuint64_t,
+ symbolStatus: *mut cuda_types::cuda::CUdriverProcAddressQueryResult,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allows caller to fetch a coredump attribute value for the current context
Returns in \p *value the requested value specified by \p attrib. It is up to the caller
@@ -18380,10 +18433,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCoredumpSetAttribute,
::cuCoredumpSetAttributeGlobal*/
fn cuCoredumpGetAttribute(
- attrib: cuda_types::CUcoredumpSettings,
+ attrib: cuda_types::cuda::CUcoredumpSettings,
value: *mut ::core::ffi::c_void,
size: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allows caller to fetch a coredump attribute value for the entire application
Returns in \p *value the requested value specified by \p attrib. It is up to the caller
@@ -18426,10 +18479,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCoredumpSetAttribute,
::cuCoredumpSetAttributeGlobal*/
fn cuCoredumpGetAttributeGlobal(
- attrib: cuda_types::CUcoredumpSettings,
+ attrib: cuda_types::cuda::CUcoredumpSettings,
value: *mut ::core::ffi::c_void,
size: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allows caller to set a coredump attribute value for the current context
This function should be considered an alternate interface to the CUDA-GDB environment
@@ -18485,10 +18538,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCoredumpGetAttribute,
::cuCoredumpSetAttributeGlobal*/
fn cuCoredumpSetAttribute(
- attrib: cuda_types::CUcoredumpSettings,
+ attrib: cuda_types::cuda::CUcoredumpSettings,
value: *mut ::core::ffi::c_void,
size: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allows caller to set a coredump attribute value globally
This function should be considered an alternate interface to the CUDA-GDB environment
@@ -18541,15 +18594,15 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCoredumpGetAttributeGlobal,
::cuCoredumpSetAttribute*/
fn cuCoredumpSetAttributeGlobal(
- attrib: cuda_types::CUcoredumpSettings,
+ attrib: cuda_types::cuda::CUcoredumpSettings,
value: *mut ::core::ffi::c_void,
size: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/// @}
fn cuGetExportTable(
ppExportTable: *mut *const ::core::ffi::c_void,
- pExportTableId: *const cuda_types::CUuuid,
- ) -> cuda_types::CUresult;
+ pExportTableId: *const cuda_types::cuda::CUuuid,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a green context with a specified set of resources.
This API creates a green context with the resources specified in the descriptor \p desc and
@@ -18593,11 +18646,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCtxCreate,
::cuCtxCreate_v3*/
fn cuGreenCtxCreate(
- phCtx: *mut cuda_types::CUgreenCtx,
- desc: cuda_types::CUdevResourceDesc,
- dev: cuda_types::CUdevice,
+ phCtx: *mut cuda_types::cuda::CUgreenCtx,
+ desc: cuda_types::cuda::CUdevResourceDesc,
+ dev: cuda_types::cuda::CUdevice,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a green context
Destroys the green context, releasing the primary context of the device that this green context was created for.
@@ -18615,7 +18668,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa
::cuGreenCtxCreate,
::cuCtxDestroy*/
- fn cuGreenCtxDestroy(hCtx: cuda_types::CUgreenCtx) -> cuda_types::CUresult;
+ fn cuGreenCtxDestroy(
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Converts a green context into the primary context
The API converts a green context into the primary context returned in \p pContext. It is important
@@ -18640,9 +18695,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa
::cuGreenCtxCreate*/
fn cuCtxFromGreenCtx(
- pContext: *mut cuda_types::CUcontext,
- hCtx: cuda_types::CUgreenCtx,
- ) -> cuda_types::CUresult;
+ pContext: *mut cuda_types::cuda::CUcontext,
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get device resources
Get the \p type resources available to the \p device.
@@ -18665,10 +18720,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa
::cuDevResourceGenerateDesc*/
fn cuDeviceGetDevResource(
- device: cuda_types::CUdevice,
- resource: *mut cuda_types::CUdevResource,
- type_: cuda_types::CUdevResourceType,
- ) -> cuda_types::CUresult;
+ device: cuda_types::cuda::CUdevice,
+ resource: *mut cuda_types::cuda::CUdevResource,
+ type_: cuda_types::cuda::CUdevResourceType,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get context resources
Get the \p type resources available to the context represented by \p hCtx
@@ -18690,10 +18745,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa
::cuDevResourceGenerateDesc*/
fn cuCtxGetDevResource(
- hCtx: cuda_types::CUcontext,
- resource: *mut cuda_types::CUdevResource,
- type_: cuda_types::CUdevResourceType,
- ) -> cuda_types::CUresult;
+ hCtx: cuda_types::cuda::CUcontext,
+ resource: *mut cuda_types::cuda::CUdevResource,
+ type_: cuda_types::cuda::CUdevResourceType,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get green context resources
Get the \p type resources available to the green context represented by \p hCtx
@@ -18712,10 +18767,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa
::cuDevResourceGenerateDesc*/
fn cuGreenCtxGetDevResource(
- hCtx: cuda_types::CUgreenCtx,
- resource: *mut cuda_types::CUdevResource,
- type_: cuda_types::CUdevResourceType,
- ) -> cuda_types::CUresult;
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ resource: *mut cuda_types::cuda::CUdevResource,
+ type_: cuda_types::cuda::CUdevResourceType,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Splits \p CU_DEV_RESOURCE_TYPE_SM resources.
Splits \p CU_DEV_RESOURCE_TYPE_SM resources into \p nbGroups, adhering to the minimum SM count specified in \p minCount
@@ -18768,13 +18823,13 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCtxGetDevResource,
::cuDeviceGetDevResource*/
fn cuDevSmResourceSplitByCount(
- result: *mut cuda_types::CUdevResource,
+ result: *mut cuda_types::cuda::CUdevResource,
nbGroups: *mut ::core::ffi::c_uint,
- input: *const cuda_types::CUdevResource,
- remaining: *mut cuda_types::CUdevResource,
+ input: *const cuda_types::cuda::CUdevResource,
+ remaining: *mut cuda_types::cuda::CUdevResource,
useFlags: ::core::ffi::c_uint,
minCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Generate a resource descriptor
Generates a resource descriptor with the set of resources specified in \p resources.
@@ -18799,10 +18854,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa
::cuDevSmResourceSplitByCount*/
fn cuDevResourceGenerateDesc(
- phDesc: *mut cuda_types::CUdevResourceDesc,
- resources: *mut cuda_types::CUdevResource,
+ phDesc: *mut cuda_types::cuda::CUdevResourceDesc,
+ resources: *mut cuda_types::cuda::CUdevResource,
nbResources: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Records an event.
Captures in \phEvent all the activities of the green context of \phCtx
@@ -18829,9 +18884,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGreenCtxWaitEvent,
::cuEventRecord*/
fn cuGreenCtxRecordEvent(
- hCtx: cuda_types::CUgreenCtx,
- hEvent: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ hEvent: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Make a green context wait on an event
Makes all future work submitted to green context \phCtx wait for all work
@@ -18856,9 +18911,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGreenCtxRecordEvent,
::cuStreamWaitEvent*/
fn cuGreenCtxWaitEvent(
- hCtx: cuda_types::CUgreenCtx,
- hEvent: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ hEvent: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query the green context associated with a stream
Returns the CUDA green context that the stream is associated with, or NULL if the stream
@@ -18903,516 +18958,534 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cudaStreamCreate,
::cudaStreamCreateWithFlags*/
fn cuStreamGetGreenCtx(
- hStream: cuda_types::CUstream,
- phCtx: *mut cuda_types::CUgreenCtx,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ phCtx: *mut cuda_types::cuda::CUgreenCtx,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemHostRegister(
p: *mut ::core::ffi::c_void,
bytesize: usize,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphicsResourceSetMapFlags(
- resource: cuda_types::CUgraphicsResource,
+ resource: cuda_types::cuda::CUgraphicsResource,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuLinkCreate(
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- stateOut: *mut cuda_types::CUlinkState,
- ) -> cuda_types::CUresult;
+ stateOut: *mut cuda_types::cuda::CUlinkState,
+ ) -> cuda_types::cuda::CUresult;
fn cuLinkAddData(
- state: cuda_types::CUlinkState,
- type_: cuda_types::CUjitInputType,
+ state: cuda_types::cuda::CUlinkState,
+ type_: cuda_types::cuda::CUjitInputType,
data: *mut ::core::ffi::c_void,
size: usize,
name: *const ::core::ffi::c_char,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuLinkAddFile(
- state: cuda_types::CUlinkState,
- type_: cuda_types::CUjitInputType,
+ state: cuda_types::cuda::CUlinkState,
+ type_: cuda_types::cuda::CUjitInputType,
path: *const ::core::ffi::c_char,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuTexRefSetAddress2D_v2(
- hTexRef: cuda_types::CUtexref,
- desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR,
- dptr: cuda_types::CUdeviceptr,
+ hTexRef: cuda_types::cuda::CUtexref,
+ desc: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR,
+ dptr: cuda_types::cuda::CUdeviceptr,
Pitch: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuDeviceTotalMem(
bytes: *mut ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
fn cuCtxCreate(
- pctx: *mut cuda_types::CUcontext,
+ pctx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
fn cuModuleGetGlobal(
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
bytes: *mut ::core::ffi::c_uint,
- hmod: cuda_types::CUmodule,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemGetInfo(
free: *mut ::core::ffi::c_uint,
total: *mut ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemAlloc(
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
bytesize: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemAllocPitch(
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
pPitch: *mut ::core::ffi::c_uint,
WidthInBytes: ::core::ffi::c_uint,
Height: ::core::ffi::c_uint,
ElementSizeBytes: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
- fn cuMemFree(dptr: cuda_types::CUdeviceptr_v1) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
+ fn cuMemFree(dptr: cuda_types::cuda::CUdeviceptr_v1) -> cuda_types::cuda::CUresult;
fn cuMemGetAddressRange(
- pbase: *mut cuda_types::CUdeviceptr_v1,
+ pbase: *mut cuda_types::cuda::CUdeviceptr_v1,
psize: *mut ::core::ffi::c_uint,
- dptr: cuda_types::CUdeviceptr_v1,
- ) -> cuda_types::CUresult;
+ dptr: cuda_types::cuda::CUdeviceptr_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemAllocHost(
pp: *mut *mut ::core::ffi::c_void,
bytesize: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemHostGetDevicePointer(
- pdptr: *mut cuda_types::CUdeviceptr_v1,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr_v1,
p: *mut ::core::ffi::c_void,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoD(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
srcHost: *const ::core::ffi::c_void,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoH(
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoD(
- dstDevice: cuda_types::CUdeviceptr_v1,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoA(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: ::core::ffi::c_uint,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoD(
- dstDevice: cuda_types::CUdeviceptr_v1,
- srcArray: cuda_types::CUarray,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: ::core::ffi::c_uint,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoA(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: ::core::ffi::c_uint,
srcHost: *const ::core::ffi::c_void,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoH(
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: ::core::ffi::c_uint,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoA(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: ::core::ffi::c_uint,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: ::core::ffi::c_uint,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoAAsync(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: ::core::ffi::c_uint,
srcHost: *const ::core::ffi::c_void,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoHAsync(
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: ::core::ffi::c_uint,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
- fn cuMemcpy2D(pCopy: *const cuda_types::CUDA_MEMCPY2D_v1) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuMemcpy2D(
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy2DUnaligned(
- pCopy: *const cuda_types::CUDA_MEMCPY2D_v1,
- ) -> cuda_types::CUresult;
- fn cuMemcpy3D(pCopy: *const cuda_types::CUDA_MEMCPY3D_v1) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D_v1,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuMemcpy3D(
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoDAsync(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
srcHost: *const ::core::ffi::c_void,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoHAsync(
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoDAsync(
- dstDevice: cuda_types::CUdeviceptr_v1,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy2DAsync(
- pCopy: *const cuda_types::CUDA_MEMCPY2D_v1,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D_v1,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy3DAsync(
- pCopy: *const cuda_types::CUDA_MEMCPY3D_v1,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_v1,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD8(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
uc: ::core::ffi::c_uchar,
N: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD16(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
us: ::core::ffi::c_ushort,
N: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD32(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
ui: ::core::ffi::c_uint,
N: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D8(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
dstPitch: ::core::ffi::c_uint,
uc: ::core::ffi::c_uchar,
Width: ::core::ffi::c_uint,
Height: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D16(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
dstPitch: ::core::ffi::c_uint,
us: ::core::ffi::c_ushort,
Width: ::core::ffi::c_uint,
Height: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D32(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
dstPitch: ::core::ffi::c_uint,
ui: ::core::ffi::c_uint,
Width: ::core::ffi::c_uint,
Height: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuArrayCreate(
- pHandle: *mut cuda_types::CUarray,
- pAllocateArray: *const cuda_types::CUDA_ARRAY_DESCRIPTOR_v1,
- ) -> cuda_types::CUresult;
+ pHandle: *mut cuda_types::cuda::CUarray,
+ pAllocateArray: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuArrayGetDescriptor(
- pArrayDescriptor: *mut cuda_types::CUDA_ARRAY_DESCRIPTOR_v1,
- hArray: cuda_types::CUarray,
- ) -> cuda_types::CUresult;
+ pArrayDescriptor: *mut cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR_v1,
+ hArray: cuda_types::cuda::CUarray,
+ ) -> cuda_types::cuda::CUresult;
fn cuArray3DCreate(
- pHandle: *mut cuda_types::CUarray,
- pAllocateArray: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1,
- ) -> cuda_types::CUresult;
+ pHandle: *mut cuda_types::cuda::CUarray,
+ pAllocateArray: *const cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuArray3DGetDescriptor(
- pArrayDescriptor: *mut cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1,
- hArray: cuda_types::CUarray,
- ) -> cuda_types::CUresult;
+ pArrayDescriptor: *mut cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR_v1,
+ hArray: cuda_types::cuda::CUarray,
+ ) -> cuda_types::cuda::CUresult;
fn cuTexRefSetAddress(
ByteOffset: *mut ::core::ffi::c_uint,
- hTexRef: cuda_types::CUtexref,
- dptr: cuda_types::CUdeviceptr_v1,
+ hTexRef: cuda_types::cuda::CUtexref,
+ dptr: cuda_types::cuda::CUdeviceptr_v1,
bytes: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuTexRefSetAddress2D(
- hTexRef: cuda_types::CUtexref,
- desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR_v1,
- dptr: cuda_types::CUdeviceptr_v1,
+ hTexRef: cuda_types::cuda::CUtexref,
+ desc: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR_v1,
+ dptr: cuda_types::cuda::CUdeviceptr_v1,
Pitch: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuTexRefGetAddress(
- pdptr: *mut cuda_types::CUdeviceptr_v1,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ pdptr: *mut cuda_types::cuda::CUdeviceptr_v1,
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphicsResourceGetMappedPointer(
- pDevPtr: *mut cuda_types::CUdeviceptr_v1,
+ pDevPtr: *mut cuda_types::cuda::CUdeviceptr_v1,
pSize: *mut ::core::ffi::c_uint,
- resource: cuda_types::CUgraphicsResource,
- ) -> cuda_types::CUresult;
- fn cuCtxDestroy(ctx: cuda_types::CUcontext) -> cuda_types::CUresult;
- fn cuCtxPopCurrent(pctx: *mut cuda_types::CUcontext) -> cuda_types::CUresult;
- fn cuCtxPushCurrent(ctx: cuda_types::CUcontext) -> cuda_types::CUresult;
- fn cuStreamDestroy(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
- fn cuEventDestroy(hEvent: cuda_types::CUevent) -> cuda_types::CUresult;
- fn cuDevicePrimaryCtxRelease(dev: cuda_types::CUdevice) -> cuda_types::CUresult;
- fn cuDevicePrimaryCtxReset(dev: cuda_types::CUdevice) -> cuda_types::CUresult;
+ resource: cuda_types::cuda::CUgraphicsResource,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuCtxDestroy(ctx: cuda_types::cuda::CUcontext) -> cuda_types::cuda::CUresult;
+ fn cuCtxPopCurrent(
+ pctx: *mut cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuCtxPushCurrent(ctx: cuda_types::cuda::CUcontext) -> cuda_types::cuda::CUresult;
+ fn cuStreamDestroy(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuEventDestroy(hEvent: cuda_types::cuda::CUevent) -> cuda_types::cuda::CUresult;
+ fn cuDevicePrimaryCtxRelease(
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuDevicePrimaryCtxReset(
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
fn cuDevicePrimaryCtxSetFlags(
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoD_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoH_v2(
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoD_v2(
- dstDevice: cuda_types::CUdeviceptr,
- srcDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoA_v2(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoD_v2(
- dstDevice: cuda_types::CUdeviceptr,
- srcArray: cuda_types::CUarray,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoA_v2(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoH_v2(
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoA_v2(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoAAsync_v2(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoHAsync_v2(
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
- fn cuMemcpy2D_v2(pCopy: *const cuda_types::CUDA_MEMCPY2D) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuMemcpy2D_v2(
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy2DUnaligned_v2(
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
- ) -> cuda_types::CUresult;
- fn cuMemcpy3D_v2(pCopy: *const cuda_types::CUDA_MEMCPY3D) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuMemcpy3D_v2(
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoDAsync_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoHAsync_v2(
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoDAsync_v2(
- dstDevice: cuda_types::CUdeviceptr,
- srcDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy2DAsync_v2(
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy3DAsync_v2(
- pCopy: *const cuda_types::CUDA_MEMCPY3D,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD8_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
uc: ::core::ffi::c_uchar,
N: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD16_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
us: ::core::ffi::c_ushort,
N: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD32_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
ui: ::core::ffi::c_uint,
N: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D8_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
uc: ::core::ffi::c_uchar,
Width: usize,
Height: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D16_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
us: ::core::ffi::c_ushort,
Width: usize,
Height: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D32_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
ui: ::core::ffi::c_uint,
Width: usize,
Height: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy(
- dst: cuda_types::CUdeviceptr,
- src: cuda_types::CUdeviceptr,
+ dst: cuda_types::cuda::CUdeviceptr,
+ src: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAsync(
- dst: cuda_types::CUdeviceptr,
- src: cuda_types::CUdeviceptr,
+ dst: cuda_types::cuda::CUdeviceptr,
+ src: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyPeer(
- dstDevice: cuda_types::CUdeviceptr,
- dstContext: cuda_types::CUcontext,
- srcDevice: cuda_types::CUdeviceptr,
- srcContext: cuda_types::CUcontext,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ dstContext: cuda_types::cuda::CUcontext,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
+ srcContext: cuda_types::cuda::CUcontext,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyPeerAsync(
- dstDevice: cuda_types::CUdeviceptr,
- dstContext: cuda_types::CUcontext,
- srcDevice: cuda_types::CUdeviceptr,
- srcContext: cuda_types::CUcontext,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ dstContext: cuda_types::cuda::CUcontext,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
+ srcContext: cuda_types::cuda::CUcontext,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy3DPeer(
- pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_PEER,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy3DPeerAsync(
- pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_PEER,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD8Async(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
uc: ::core::ffi::c_uchar,
N: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD16Async(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
us: ::core::ffi::c_ushort,
N: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD32Async(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
ui: ::core::ffi::c_uint,
N: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D8Async(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
uc: ::core::ffi::c_uchar,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D16Async(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
us: ::core::ffi::c_ushort,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D32Async(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
ui: ::core::ffi::c_uint,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetPriority(
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
priority: *mut ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetId(
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
streamId: *mut ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetFlags(
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
flags: *mut ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetCtx(
- hStream: cuda_types::CUstream,
- pctx: *mut cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ pctx: *mut cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitEvent(
- hStream: cuda_types::CUstream,
- hEvent: cuda_types::CUevent,
+ hStream: cuda_types::cuda::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamAddCallback(
- hStream: cuda_types::CUstream,
- callback: cuda_types::CUstreamCallback,
+ hStream: cuda_types::cuda::CUstream,
+ callback: cuda_types::cuda::CUstreamCallback,
userData: *mut ::core::ffi::c_void,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamAttachMemAsync(
- hStream: cuda_types::CUstream,
- dptr: cuda_types::CUdeviceptr,
+ hStream: cuda_types::cuda::CUstream,
+ dptr: cuda_types::cuda::CUdeviceptr,
length: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
- fn cuStreamQuery(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
- fn cuStreamSynchronize(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
+ fn cuStreamQuery(hStream: cuda_types::cuda::CUstream) -> cuda_types::cuda::CUresult;
+ fn cuStreamSynchronize(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuEventRecord(
- hEvent: cuda_types::CUevent,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hEvent: cuda_types::cuda::CUevent,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuEventRecordWithFlags(
- hEvent: cuda_types::CUevent,
- hStream: cuda_types::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
+ hStream: cuda_types::cuda::CUstream,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuLaunchKernel(
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
gridDimX: ::core::ffi::c_uint,
gridDimY: ::core::ffi::c_uint,
gridDimZ: ::core::ffi::c_uint,
@@ -19420,136 +19493,136 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
blockDimY: ::core::ffi::c_uint,
blockDimZ: ::core::ffi::c_uint,
sharedMemBytes: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
kernelParams: *mut *mut ::core::ffi::c_void,
extra: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuLaunchKernelEx(
- config: *const cuda_types::CUlaunchConfig,
- f: cuda_types::CUfunction,
+ config: *const cuda_types::cuda::CUlaunchConfig,
+ f: cuda_types::cuda::CUfunction,
kernelParams: *mut *mut ::core::ffi::c_void,
extra: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuLaunchHostFunc(
- hStream: cuda_types::CUstream,
- fn_: cuda_types::CUhostFn,
+ hStream: cuda_types::cuda::CUstream,
+ fn_: cuda_types::cuda::CUhostFn,
userData: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphicsMapResources(
count: ::core::ffi::c_uint,
- resources: *mut cuda_types::CUgraphicsResource,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ resources: *mut cuda_types::cuda::CUgraphicsResource,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphicsUnmapResources(
count: ::core::ffi::c_uint,
- resources: *mut cuda_types::CUgraphicsResource,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ resources: *mut cuda_types::cuda::CUgraphicsResource,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWriteValue32(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitValue32(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWriteValue64(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitValue64(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamBatchMemOp(
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
count: ::core::ffi::c_uint,
- paramArray: *mut cuda_types::CUstreamBatchMemOpParams,
+ paramArray: *mut cuda_types::cuda::CUstreamBatchMemOpParams,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWriteValue32_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitValue32_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWriteValue64_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitValue64_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamBatchMemOp_ptsz(
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
count: ::core::ffi::c_uint,
- paramArray: *mut cuda_types::CUstreamBatchMemOpParams,
+ paramArray: *mut cuda_types::cuda::CUstreamBatchMemOpParams,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWriteValue32_v2(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitValue32_v2(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWriteValue64_v2(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitValue64_v2(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamBatchMemOp_v2(
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
count: ::core::ffi::c_uint,
- paramArray: *mut cuda_types::CUstreamBatchMemOpParams,
+ paramArray: *mut cuda_types::cuda::CUstreamBatchMemOpParams,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemPrefetchAsync(
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- dstDevice: cuda_types::CUdevice,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ dstDevice: cuda_types::cuda::CUdevice,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemPrefetchAsync_v2(
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- location: cuda_types::CUmemLocation,
+ location: cuda_types::cuda::CUmemLocation,
flags: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuLaunchCooperativeKernel(
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
gridDimX: ::core::ffi::c_uint,
gridDimY: ::core::ffi::c_uint,
gridDimZ: ::core::ffi::c_uint,
@@ -19557,181 +19630,185 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
blockDimY: ::core::ffi::c_uint,
blockDimZ: ::core::ffi::c_uint,
sharedMemBytes: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
kernelParams: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuSignalExternalSemaphoresAsync(
- extSemArray: *const cuda_types::CUexternalSemaphore,
- paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
+ extSemArray: *const cuda_types::cuda::CUexternalSemaphore,
+ paramsArray: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
numExtSems: ::core::ffi::c_uint,
- stream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ stream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuWaitExternalSemaphoresAsync(
- extSemArray: *const cuda_types::CUexternalSemaphore,
- paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
+ extSemArray: *const cuda_types::cuda::CUexternalSemaphore,
+ paramsArray: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
numExtSems: ::core::ffi::c_uint,
- stream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
- fn cuStreamBeginCapture(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
- fn cuStreamBeginCapture_ptsz(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
+ stream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuStreamBeginCapture(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuStreamBeginCapture_ptsz(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamBeginCapture_v2(
- hStream: cuda_types::CUstream,
- mode: cuda_types::CUstreamCaptureMode,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ mode: cuda_types::cuda::CUstreamCaptureMode,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamBeginCaptureToGraph(
- hStream: cuda_types::CUstream,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- mode: cuda_types::CUstreamCaptureMode,
- ) -> cuda_types::CUresult;
+ mode: cuda_types::cuda::CUstreamCaptureMode,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamEndCapture(
- hStream: cuda_types::CUstream,
- phGraph: *mut cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ phGraph: *mut cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamIsCapturing(
- hStream: cuda_types::CUstream,
- captureStatus: *mut cuda_types::CUstreamCaptureStatus,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetCaptureInfo(
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetCaptureInfo_ptsz(
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetCaptureInfo_v2(
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- graph_out: *mut cuda_types::CUgraph,
- dependencies_out: *mut *const cuda_types::CUgraphNode,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ graph_out: *mut cuda_types::cuda::CUgraph,
+ dependencies_out: *mut *const cuda_types::cuda::CUgraphNode,
numDependencies_out: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetCaptureInfo_v3(
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- graph_out: *mut cuda_types::CUgraph,
- dependencies_out: *mut *const cuda_types::CUgraphNode,
- edgeData_out: *mut *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ graph_out: *mut cuda_types::cuda::CUgraph,
+ dependencies_out: *mut *const cuda_types::cuda::CUgraphNode,
+ edgeData_out: *mut *const cuda_types::cuda::CUgraphEdgeData,
numDependencies_out: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphAddKernelNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1,
- ) -> cuda_types::CUresult;
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphKernelNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_KERNEL_NODE_PARAMS_v1,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphKernelNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphExecKernelNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphInstantiateWithParams(
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- instantiateParams: *mut cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS,
- ) -> cuda_types::CUresult;
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ instantiateParams: *mut cuda_types::cuda::CUDA_GRAPH_INSTANTIATE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphExecUpdate(
- hGraphExec: cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- hErrorNode_out: *mut cuda_types::CUgraphNode,
- updateResult_out: *mut cuda_types::CUgraphExecUpdateResult,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ hErrorNode_out: *mut cuda_types::cuda::CUgraphNode,
+ updateResult_out: *mut cuda_types::cuda::CUgraphExecUpdateResult,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphUpload(
- hGraph: cuda_types::CUgraphExec,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hGraph: cuda_types::cuda::CUgraphExec,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphLaunch(
- hGraph: cuda_types::CUgraphExec,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hGraph: cuda_types::cuda::CUgraphExec,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamCopyAttributes(
- dstStream: cuda_types::CUstream,
- srcStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ dstStream: cuda_types::cuda::CUstream,
+ srcStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetAttribute(
- hStream: cuda_types::CUstream,
- attr: cuda_types::CUstreamAttrID,
- value: *mut cuda_types::CUstreamAttrValue,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ attr: cuda_types::cuda::CUstreamAttrID,
+ value: *mut cuda_types::cuda::CUstreamAttrValue,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamSetAttribute(
- hStream: cuda_types::CUstream,
- attr: cuda_types::CUstreamAttrID,
- param: *const cuda_types::CUstreamAttrValue,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ attr: cuda_types::cuda::CUstreamAttrID,
+ param: *const cuda_types::cuda::CUstreamAttrValue,
+ ) -> cuda_types::cuda::CUresult;
fn cuIpcOpenMemHandle(
- pdptr: *mut cuda_types::CUdeviceptr,
- handle: cuda_types::CUipcMemHandle,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr,
+ handle: cuda_types::cuda::CUipcMemHandle,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphInstantiate(
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- phErrorNode: *mut cuda_types::CUgraphNode,
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ phErrorNode: *mut cuda_types::cuda::CUgraphNode,
logBuffer: *mut ::core::ffi::c_char,
bufferSize: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphInstantiate_v2(
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- phErrorNode: *mut cuda_types::CUgraphNode,
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ phErrorNode: *mut cuda_types::cuda::CUgraphNode,
logBuffer: *mut ::core::ffi::c_char,
bufferSize: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemMapArrayAsync(
- mapInfoList: *mut cuda_types::CUarrayMapInfo,
+ mapInfoList: *mut cuda_types::cuda::CUarrayMapInfo,
count: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemFreeAsync(
- dptr: cuda_types::CUdeviceptr,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ dptr: cuda_types::cuda::CUdeviceptr,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemAllocAsync(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemAllocFromPoolAsync(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- pool: cuda_types::CUmemoryPool,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pool: cuda_types::cuda::CUmemoryPool,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamUpdateCaptureDependencies(
- hStream: cuda_types::CUstream,
- dependencies: *mut cuda_types::CUgraphNode,
+ hStream: cuda_types::cuda::CUstream,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
numDependencies: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamUpdateCaptureDependencies_v2(
- hStream: cuda_types::CUstream,
- dependencies: *mut cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuGetProcAddress(
symbol: *const ::core::ffi::c_char,
pfn: *mut *mut ::core::ffi::c_void,
cudaVersion: ::core::ffi::c_int,
- flags: cuda_types::cuuint64_t,
- ) -> cuda_types::CUresult;
+ flags: cuda_types::cuda::cuuint64_t,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initialize the profiling.
\deprecated
@@ -19783,8 +19860,8 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
fn cuProfilerInitialize(
configFile: *const ::core::ffi::c_char,
outputFile: *const ::core::ffi::c_char,
- outputMode: cuda_types::CUoutput_mode,
- ) -> cuda_types::CUresult;
+ outputMode: cuda_types::cuda::CUoutput_mode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Enable profiling.
Enables profile collection by the active profiling tool for the
@@ -19805,7 +19882,7 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuProfilerInitialize,
::cuProfilerStop,
::cudaProfilerStart*/
- fn cuProfilerStart() -> cuda_types::CUresult;
+ fn cuProfilerStart() -> cuda_types::cuda::CUresult;
/** \brief Disable profiling.
Disables profile collection by the active profiling tool for the
@@ -19825,7 +19902,7 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuProfilerInitialize,
::cuProfilerStart,
::cudaProfilerStop*/
- fn cuProfilerStop() -> cuda_types::CUresult;
+ fn cuProfilerStop() -> cuda_types::cuda::CUresult;
/** \brief Registers an OpenGL buffer object
Registers the buffer object specified by \p buffer for access by
@@ -19861,10 +19938,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsResourceGetMappedPointer,
::cudaGraphicsGLRegisterBuffer*/
fn cuGraphicsGLRegisterBuffer(
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- buffer: cuda_types::GLuint,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ buffer: cuda_types::cuda::GLuint,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Register an OpenGL texture or renderbuffer object
Registers the texture or renderbuffer object specified by \p image for access by CUDA.
@@ -19921,11 +19998,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsSubResourceGetMappedArray,
::cudaGraphicsGLRegisterImage*/
fn cuGraphicsGLRegisterImage(
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- image: cuda_types::GLuint,
- target: cuda_types::GLenum,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ image: cuda_types::cuda::GLuint,
+ target: cuda_types::cuda::GLenum,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the CUDA devices associated with the current OpenGL context
Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices
@@ -19962,10 +20039,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cudaGLGetDevices*/
fn cuGLGetDevices_v2(
pCudaDeviceCount: *mut ::core::ffi::c_uint,
- pCudaDevices: *mut cuda_types::CUdevice,
+ pCudaDevices: *mut cuda_types::cuda::CUdevice,
cudaDeviceCount: ::core::ffi::c_uint,
- deviceList: cuda_types::CUGLDeviceList,
- ) -> cuda_types::CUresult;
+ deviceList: cuda_types::cuda::CUGLDeviceList,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a CUDA context for interoperability with OpenGL
\deprecated This function is deprecated as of Cuda 5.0.
@@ -19993,10 +20070,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags,
::cuWGLGetDevice*/
fn cuGLCtxCreate_v2(
- pCtx: *mut cuda_types::CUcontext,
+ pCtx: *mut cuda_types::cuda::CUcontext,
Flags: ::core::ffi::c_uint,
- device: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ device: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes OpenGL interoperability
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20018,7 +20095,7 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync,
::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags,
::cuWGLGetDevice*/
- fn cuGLInit() -> cuda_types::CUresult;
+ fn cuGLInit() -> cuda_types::cuda::CUresult;
/** \brief Registers an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20040,7 +20117,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\notefnerr
\sa ::cuGraphicsGLRegisterBuffer*/
- fn cuGLRegisterBufferObject(buffer: cuda_types::GLuint) -> cuda_types::CUresult;
+ fn cuGLRegisterBufferObject(
+ buffer: cuda_types::cuda::GLuint,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Maps an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20072,10 +20151,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa ::cuGraphicsMapResources*/
fn cuGLMapBufferObject_v2_ptds(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
size: *mut usize,
- buffer: cuda_types::GLuint,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unmaps an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20101,7 +20180,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\notefnerr
\sa ::cuGraphicsUnmapResources*/
- fn cuGLUnmapBufferObject(buffer: cuda_types::GLuint) -> cuda_types::CUresult;
+ fn cuGLUnmapBufferObject(
+ buffer: cuda_types::cuda::GLuint,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unregister an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20127,7 +20208,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\notefnerr
\sa ::cuGraphicsUnregisterResource*/
- fn cuGLUnregisterBufferObject(buffer: cuda_types::GLuint) -> cuda_types::CUresult;
+ fn cuGLUnregisterBufferObject(
+ buffer: cuda_types::cuda::GLuint,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Set the map flags for an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20168,9 +20251,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa ::cuGraphicsResourceSetMapFlags*/
fn cuGLSetBufferObjectMapFlags(
- buffer: cuda_types::GLuint,
+ buffer: cuda_types::cuda::GLuint,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Maps an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20203,11 +20286,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa ::cuGraphicsMapResources*/
fn cuGLMapBufferObjectAsync_v2_ptsz(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
size: *mut usize,
- buffer: cuda_types::GLuint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unmaps an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20235,42 +20318,42 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa ::cuGraphicsUnmapResources*/
fn cuGLUnmapBufferObjectAsync(
- buffer: cuda_types::GLuint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuGLGetDevices(
pCudaDeviceCount: *mut ::core::ffi::c_uint,
- pCudaDevices: *mut cuda_types::CUdevice,
+ pCudaDevices: *mut cuda_types::cuda::CUdevice,
cudaDeviceCount: ::core::ffi::c_uint,
- deviceList: cuda_types::CUGLDeviceList,
- ) -> cuda_types::CUresult;
+ deviceList: cuda_types::cuda::CUGLDeviceList,
+ ) -> cuda_types::cuda::CUresult;
fn cuGLMapBufferObject_v2(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
size: *mut usize,
- buffer: cuda_types::GLuint,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ ) -> cuda_types::cuda::CUresult;
fn cuGLMapBufferObjectAsync_v2(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
size: *mut usize,
- buffer: cuda_types::GLuint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuGLCtxCreate(
- pCtx: *mut cuda_types::CUcontext,
+ pCtx: *mut cuda_types::cuda::CUcontext,
Flags: ::core::ffi::c_uint,
- device: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ device: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
fn cuGLMapBufferObject(
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
size: *mut ::core::ffi::c_uint,
- buffer: cuda_types::GLuint,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ ) -> cuda_types::cuda::CUresult;
fn cuGLMapBufferObjectAsync(
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
size: *mut ::core::ffi::c_uint,
- buffer: cuda_types::GLuint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Registers an EGL image
Registers the EGLImageKHR specified by \p image for access by
@@ -20317,10 +20400,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsUnmapResources,
::cudaGraphicsEGLRegisterImage*/
fn cuGraphicsEGLRegisterImage(
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- image: cuda_types::EGLImageKHR,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ image: cuda_types::cuda::EGLImageKHR,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Connect CUDA to EGLStream as a consumer.
Connect CUDA as a consumer to EGLStreamKHR specified by \p stream.
@@ -20340,9 +20423,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
::cudaEGLStreamConsumerConnect*/
fn cuEGLStreamConsumerConnect(
- conn: *mut cuda_types::CUeglStreamConnection,
- stream: cuda_types::EGLStreamKHR,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ stream: cuda_types::cuda::EGLStreamKHR,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Connect CUDA to EGLStream as a consumer with given flags.
Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by CUeglResourceLocationFlags.
@@ -20363,10 +20446,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
::cudaEGLStreamConsumerConnectWithFlags*/
fn cuEGLStreamConsumerConnectWithFlags(
- conn: *mut cuda_types::CUeglStreamConnection,
- stream: cuda_types::EGLStreamKHR,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ stream: cuda_types::cuda::EGLStreamKHR,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Disconnect CUDA as a consumer to EGLStream .
Disconnect CUDA as a consumer to EGLStreamKHR.
@@ -20382,8 +20465,8 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
::cudaEGLStreamConsumerDisconnect*/
fn cuEGLStreamConsumerDisconnect(
- conn: *mut cuda_types::CUeglStreamConnection,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Acquire an image frame from the EGLStream with CUDA as a consumer.
Acquire an image frame from EGLStreamKHR. This API can also acquire an old frame presented
@@ -20410,11 +20493,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
::cudaEGLStreamConsumerAcquireFrame*/
fn cuEGLStreamConsumerAcquireFrame(
- conn: *mut cuda_types::CUeglStreamConnection,
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- pStream: *mut cuda_types::CUstream,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ pStream: *mut cuda_types::cuda::CUstream,
timeout: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Releases the last frame acquired from the EGLStream.
Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR.
@@ -20434,10 +20517,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
::cudaEGLStreamConsumerReleaseFrame*/
fn cuEGLStreamConsumerReleaseFrame(
- conn: *mut cuda_types::CUeglStreamConnection,
- pCudaResource: cuda_types::CUgraphicsResource,
- pStream: *mut cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ pCudaResource: cuda_types::cuda::CUgraphicsResource,
+ pStream: *mut cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Connect CUDA to EGLStream as a producer.
Connect CUDA as a producer to EGLStreamKHR specified by \p stream.
@@ -20459,11 +20542,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamProducerPresentFrame,
::cudaEGLStreamProducerConnect*/
fn cuEGLStreamProducerConnect(
- conn: *mut cuda_types::CUeglStreamConnection,
- stream: cuda_types::EGLStreamKHR,
- width: cuda_types::EGLint,
- height: cuda_types::EGLint,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ stream: cuda_types::cuda::EGLStreamKHR,
+ width: cuda_types::cuda::EGLint,
+ height: cuda_types::cuda::EGLint,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Disconnect CUDA as a producer to EGLStream .
Disconnect CUDA as a producer to EGLStreamKHR.
@@ -20479,8 +20562,8 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamProducerPresentFrame,
::cudaEGLStreamProducerDisconnect*/
fn cuEGLStreamProducerDisconnect(
- conn: *mut cuda_types::CUeglStreamConnection,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer.
When a frame is presented by the producer, it gets associated with the EGLStream
@@ -20526,10 +20609,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamProducerReturnFrame,
::cudaEGLStreamProducerPresentFrame*/
fn cuEGLStreamProducerPresentFrame(
- conn: *mut cuda_types::CUeglStreamConnection,
- eglframe: cuda_types::CUeglFrame,
- pStream: *mut cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ eglframe: cuda_types::cuda::CUeglFrame,
+ pStream: *mut cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Return the CUDA eglFrame to the EGLStream released by the consumer.
This API can potentially return CUDA_ERROR_LAUNCH_TIMEOUT if the consumer has not
@@ -20548,10 +20631,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamProducerPresentFrame,
::cudaEGLStreamProducerReturnFrame*/
fn cuEGLStreamProducerReturnFrame(
- conn: *mut cuda_types::CUeglStreamConnection,
- eglframe: *mut cuda_types::CUeglFrame,
- pStream: *mut cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ eglframe: *mut cuda_types::cuda::CUeglFrame,
+ pStream: *mut cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get an eglFrame through which to access a registered EGL graphics resource.
Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource
@@ -20599,11 +20682,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsResourceGetMappedPointer,
::cudaGraphicsResourceGetMappedEglFrame*/
fn cuGraphicsResourceGetMappedEglFrame(
- eglFrame: *mut cuda_types::CUeglFrame,
- resource: cuda_types::CUgraphicsResource,
+ eglFrame: *mut cuda_types::cuda::CUeglFrame,
+ resource: cuda_types::cuda::CUgraphicsResource,
index: ::core::ffi::c_uint,
mipLevel: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an event from EGLSync object
Creates an event *phEvent from an EGLSyncKHR eglSync with the flags specified
@@ -20639,10 +20722,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEventSynchronize,
::cuEventDestroy*/
fn cuEventCreateFromEGLSync(
- phEvent: *mut cuda_types::CUevent,
- eglSync: cuda_types::EGLSyncKHR,
+ phEvent: *mut cuda_types::cuda::CUevent,
+ eglSync: cuda_types::cuda::EGLSyncKHR,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the CUDA device associated with a VDPAU device
Returns in \p *pDevice the CUDA device associated with a \p vdpDevice, if
@@ -20666,10 +20749,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
::cudaVDPAUGetDevice*/
fn cuVDPAUGetDevice(
- pDevice: *mut cuda_types::CUdevice,
- vdpDevice: cuda_types::VdpDevice,
- vdpGetProcAddress: cuda_types::VdpGetProcAddress,
- ) -> cuda_types::CUresult;
+ pDevice: *mut cuda_types::cuda::CUdevice,
+ vdpDevice: cuda_types::cuda::VdpDevice,
+ vdpGetProcAddress: cuda_types::cuda::VdpGetProcAddress,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a CUDA context for interoperability with VDPAU
Creates a new CUDA context, initializes VDPAU interoperability, and
@@ -20699,12 +20782,12 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
::cuVDPAUGetDevice*/
fn cuVDPAUCtxCreate_v2(
- pCtx: *mut cuda_types::CUcontext,
+ pCtx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- device: cuda_types::CUdevice,
- vdpDevice: cuda_types::VdpDevice,
- vdpGetProcAddress: cuda_types::VdpGetProcAddress,
- ) -> cuda_types::CUresult;
+ device: cuda_types::cuda::CUdevice,
+ vdpDevice: cuda_types::cuda::VdpDevice,
+ vdpGetProcAddress: cuda_types::cuda::VdpGetProcAddress,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Registers a VDPAU VdpVideoSurface object
Registers the VdpVideoSurface specified by \p vdpSurface for access by
@@ -20776,10 +20859,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuVDPAUGetDevice,
::cudaGraphicsVDPAURegisterVideoSurface*/
fn cuGraphicsVDPAURegisterVideoSurface(
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- vdpSurface: cuda_types::VdpVideoSurface,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ vdpSurface: cuda_types::cuda::VdpVideoSurface,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Registers a VDPAU VdpOutputSurface object
Registers the VdpOutputSurface specified by \p vdpSurface for access by
@@ -20838,15 +20921,15 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuVDPAUGetDevice,
::cudaGraphicsVDPAURegisterOutputSurface*/
fn cuGraphicsVDPAURegisterOutputSurface(
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- vdpSurface: cuda_types::VdpOutputSurface,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ vdpSurface: cuda_types::cuda::VdpOutputSurface,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuVDPAUCtxCreate(
- pCtx: *mut cuda_types::CUcontext,
+ pCtx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- device: cuda_types::CUdevice,
- vdpDevice: cuda_types::VdpDevice,
- vdpGetProcAddress: cuda_types::VdpGetProcAddress,
- ) -> cuda_types::CUresult;
+ device: cuda_types::cuda::CUdevice,
+ vdpDevice: cuda_types::cuda::VdpDevice,
+ vdpGetProcAddress: cuda_types::cuda::VdpGetProcAddress,
+ ) -> cuda_types::cuda::CUresult;
}
diff --git a/cuda_base/src/lib.rs b/cuda_base/src/lib.rs
index 833d372..58f5eae 100644
--- a/cuda_base/src/lib.rs
+++ b/cuda_base/src/lib.rs
@@ -14,6 +14,7 @@ use syn::{
};
const CUDA_RS: &'static str = include_str! {"cuda.rs"};
+const NVML_RS: &'static str = include_str! {"nvml.rs"};
// This macro accepts following arguments:
// * `normal_macro`: ident for a normal macro
@@ -31,9 +32,13 @@ const CUDA_RS: &'static str = include_str! {"cuda.rs"};
// Additionally, it does a fixup of CUDA types so they get prefixed with `type_path`
#[proc_macro]
pub fn cuda_function_declarations(tokens: TokenStream) -> TokenStream {
+ function_declarations(tokens, CUDA_RS)
+}
+
+fn function_declarations(tokens: TokenStream, module: &str) -> TokenStream {
let input = parse_macro_input!(tokens as FnDeclInput);
+ let mut cuda_module = syn::parse_str::<File>(module).unwrap();
let mut choose_macro = ChooseMacro::new(input);
- let mut cuda_module = syn::parse_str::<File>(CUDA_RS).unwrap();
syn::visit_mut::visit_file_mut(&mut FixFnSignatures, &mut cuda_module);
let extern_ = if let Item::ForeignMod(extern_) = cuda_module.items.pop().unwrap() {
extern_
@@ -68,6 +73,11 @@ pub fn cuda_function_declarations(tokens: TokenStream) -> TokenStream {
}
result.into()
}
+
+#[proc_macro]
+pub fn nvml_function_declarations(tokens: TokenStream) -> TokenStream {
+ function_declarations(tokens, NVML_RS)
+}
struct FnDeclInput {
normal_macro: Path,
overrides: Punctuated<OverrideMacro, Token![,]>,
@@ -193,6 +203,7 @@ fn join(fn_: Vec<String>, find_module: bool) -> Punctuated<Ident, Token![::]> {
"func" => &["function"],
"mem" => &["memory"],
"memcpy" => &["memory", "copy"],
+ "memset" => &["memory", "set"],
_ => return None,
})
}
diff --git a/cuda_base/src/nvml.rs b/cuda_base/src/nvml.rs
new file mode 100644
index 0000000..b89ef7a
--- /dev/null
+++ b/cuda_base/src/nvml.rs
@@ -0,0 +1,7857 @@
+// Generated automatically by zluda_bindgen
+// DO NOT EDIT MANUALLY
+#![allow(warnings)]
+extern "system" {
+ #[must_use]
+ /** Initialize NVML, but don't initialize any GPUs yet.
+
+ \note nvmlInit_v3 introduces a "flags" argument, that allows passing boolean values
+ modifying the behaviour of nvmlInit().
+ \note In NVML 5.319 new nvmlInit_v2 has replaced nvmlInit"_v1" (default in NVML 4.304 and older) that
+ did initialize all GPU devices in the system.
+
+ This allows NVML to communicate with a GPU
+ when other GPUs in the system are unstable or in a bad state. When using this API, GPUs are
+ discovered and initialized in nvmlDeviceGetHandleBy* functions instead.
+
+ \note To contrast nvmlInit_v2 with nvmlInit"_v1", NVML 4.304 nvmlInit"_v1" will fail when any detected GPU is in
+ a bad or unstable state.
+
+ For all products.
+
+ This method, should be called once before invoking any other methods in the library.
+ A reference count of the number of initializations is maintained. Shutdown only occurs
+ when the reference count reaches zero.
+
+ @return
+ - \ref NVML_SUCCESS if NVML has been properly initialized
+ - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running
+ - \ref NVML_ERROR_NO_PERMISSION if NVML does not have permission to talk to the driver
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlInit_v2() -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** nvmlInitWithFlags is a variant of nvmlInit(), that allows passing a set of boolean values
+ modifying the behaviour of nvmlInit().
+ Other than the "flags" parameter it is completely similar to \ref nvmlInit_v2.
+
+ For all products.
+
+ @param flags behaviour modifier flags
+
+ @return
+ - \ref NVML_SUCCESS if NVML has been properly initialized
+ - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running
+ - \ref NVML_ERROR_NO_PERMISSION if NVML does not have permission to talk to the driver
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlInitWithFlags(flags: ::core::ffi::c_uint) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Shut down NVML by releasing all GPU resources previously allocated with \ref nvmlInit_v2().
+
+ For all products.
+
+ This method should be called after NVML work is done, once for each call to \ref nvmlInit_v2()
+ A reference count of the number of initializations is maintained. Shutdown only occurs
+ when the reference count reaches zero. For backwards compatibility, no error is reported if
+ nvmlShutdown() is called more times than nvmlInit().
+
+ @return
+ - \ref NVML_SUCCESS if NVML has been properly shut down
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlShutdown() -> cuda_types::nvml::nvmlReturn_t;
+ /** Helper method for converting NVML error codes into readable strings.
+
+ For all products.
+
+ @param result NVML error code to convert
+
+ @return String representation of the error.
+*/
+ fn nvmlErrorString(
+ result: cuda_types::nvml::nvmlReturn_t,
+ ) -> *const ::core::ffi::c_char;
+ #[must_use]
+ /** Retrieves the version of the system's graphics driver.
+
+ For all products.
+
+ The version identifier is an alphanumeric string. It will not exceed 80 characters in length
+ (including the NULL terminator). See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE.
+
+ @param version Reference in which to return the version identifier
+ @param length The maximum allowed length of the string returned in \a version
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small*/
+ fn nvmlSystemGetDriverVersion(
+ version: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the version of the NVML library.
+
+ For all products.
+
+ The version identifier is an alphanumeric string. It will not exceed 80 characters in length
+ (including the NULL terminator). See \ref nvmlConstants::NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE.
+
+ @param version Reference in which to return the version identifier
+ @param length The maximum allowed length of the string returned in \a version
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small*/
+ fn nvmlSystemGetNVMLVersion(
+ version: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the version of the CUDA driver.
+
+ For all products.
+
+ The CUDA driver version returned will be retreived from the currently installed version of CUDA.
+ If the cuda library is not found, this function will return a known supported version number.
+
+ @param cudaDriverVersion Reference in which to return the version identifier
+
+ @return
+ - \ref NVML_SUCCESS if \a cudaDriverVersion has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a cudaDriverVersion is NULL*/
+ fn nvmlSystemGetCudaDriverVersion(
+ cudaDriverVersion: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the version of the CUDA driver from the shared library.
+
+ For all products.
+
+ The returned CUDA driver version by calling cuDriverGetVersion()
+
+ @param cudaDriverVersion Reference in which to return the version identifier
+
+ @return
+ - \ref NVML_SUCCESS if \a cudaDriverVersion has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a cudaDriverVersion is NULL
+ - \ref NVML_ERROR_LIBRARY_NOT_FOUND if \a libcuda.so.1 or libcuda.dll is not found
+ - \ref NVML_ERROR_FUNCTION_NOT_FOUND if \a cuDriverGetVersion() is not found in the shared library*/
+ fn nvmlSystemGetCudaDriverVersion_v2(
+ cudaDriverVersion: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets name of the process with provided process id
+
+ For all products.
+
+ Returned process name is cropped to provided length.
+ name string is encoded in ANSI.
+
+ @param pid The identifier of the process
+ @param name Reference in which to return the process name
+ @param length The maximum allowed length of the string returned in \a name
+
+ @return
+ - \ref NVML_SUCCESS if \a name has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a name is NULL or \a length is 0.
+ - \ref NVML_ERROR_NOT_FOUND if process doesn't exists
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlSystemGetProcessName(
+ pid: ::core::ffi::c_uint,
+ name: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system.
+
+ For S-class products.
+
+ The \a hwbcCount argument is expected to be set to the size of the input \a hwbcEntries array.
+ The HIC must be connected to an S-class system for it to be reported by this function.
+
+ @param hwbcCount Size of hwbcEntries array
+ @param hwbcEntries Array holding information about hwbc
+
+ @return
+ - \ref NVML_SUCCESS if \a hwbcCount and \a hwbcEntries have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if either \a hwbcCount or \a hwbcEntries is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a hwbcCount indicates that the \a hwbcEntries array is too small*/
+ fn nvmlSystemGetHicVersion(
+ hwbcCount: *mut ::core::ffi::c_uint,
+ hwbcEntries: *mut cuda_types::nvml::nvmlHwbcEntry_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the set of GPUs that have a CPU affinity with the given CPU number
+ For all products.
+ Supported on Linux only.
+
+ @param cpuNumber The CPU number
+ @param count When zero, is set to the number of matching GPUs such that \a deviceArray
+ can be malloc'd. When non-zero, \a deviceArray will be filled with \a count
+ number of device handles.
+ @param deviceArray An array of device handles for GPUs found with affinity to \a cpuNumber
+
+ @return
+ - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a cpuNumber, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature
+ - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery*/
+ fn nvmlSystemGetTopologyGpuSet(
+ cpuNumber: ::core::ffi::c_uint,
+ count: *mut ::core::ffi::c_uint,
+ deviceArray: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the number of units in the system.
+
+ For S-class products.
+
+ @param unitCount Reference in which to return the number of units
+
+ @return
+ - \ref NVML_SUCCESS if \a unitCount has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unitCount is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlUnitGetCount(
+ unitCount: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Acquire the handle for a particular unit, based on its index.
+
+ For S-class products.
+
+ Valid indices are derived from the \a unitCount returned by \ref nvmlUnitGetCount().
+ For example, if \a unitCount is 2 the valid indices are 0 and 1, corresponding to UNIT 0 and UNIT 1.
+
+ The order in which NVML enumerates units has no guarantees of consistency between reboots.
+
+ @param index The index of the target unit, >= 0 and < \a unitCount
+ @param unit Reference in which to return the unit handle
+
+ @return
+ - \ref NVML_SUCCESS if \a unit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a unit is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlUnitGetHandleByIndex(
+ index: ::core::ffi::c_uint,
+ unit: *mut cuda_types::nvml::nvmlUnit_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the static information associated with a unit.
+
+ For S-class products.
+
+ See \ref nvmlUnitInfo_t for details on available unit info.
+
+ @param unit The identifier of the target unit
+ @param info Reference in which to return the unit information
+
+ @return
+ - \ref NVML_SUCCESS if \a info has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a info is NULL*/
+ fn nvmlUnitGetUnitInfo(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ info: *mut cuda_types::nvml::nvmlUnitInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the LED state associated with this unit.
+
+ For S-class products.
+
+ See \ref nvmlLedState_t for details on allowed states.
+
+ @param unit The identifier of the target unit
+ @param state Reference in which to return the current LED state
+
+ @return
+ - \ref NVML_SUCCESS if \a state has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a state is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlUnitSetLedState()*/
+ fn nvmlUnitGetLedState(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ state: *mut cuda_types::nvml::nvmlLedState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the PSU stats for the unit.
+
+ For S-class products.
+
+ See \ref nvmlPSUInfo_t for details on available PSU info.
+
+ @param unit The identifier of the target unit
+ @param psu Reference in which to return the PSU information
+
+ @return
+ - \ref NVML_SUCCESS if \a psu has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a psu is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlUnitGetPsuInfo(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ psu: *mut cuda_types::nvml::nvmlPSUInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the temperature readings for the unit, in degrees C.
+
+ For S-class products.
+
+ Depending on the product, readings may be available for intake (type=0),
+ exhaust (type=1) and board (type=2).
+
+ @param unit The identifier of the target unit
+ @param type The type of reading to take
+ @param temp Reference in which to return the intake temperature
+
+ @return
+ - \ref NVML_SUCCESS if \a temp has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a type is invalid or \a temp is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlUnitGetTemperature(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ type_: ::core::ffi::c_uint,
+ temp: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the fan speed readings for the unit.
+
+ For S-class products.
+
+ See \ref nvmlUnitFanSpeeds_t for details on available fan speed info.
+
+ @param unit The identifier of the target unit
+ @param fanSpeeds Reference in which to return the fan speed information
+
+ @return
+ - \ref NVML_SUCCESS if \a fanSpeeds has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a fanSpeeds is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlUnitGetFanSpeedInfo(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ fanSpeeds: *mut cuda_types::nvml::nvmlUnitFanSpeeds_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the set of GPU devices that are attached to the specified unit.
+
+ For S-class products.
+
+ The \a deviceCount argument is expected to be set to the size of the input \a devices array.
+
+ @param unit The identifier of the target unit
+ @param deviceCount Reference in which to provide the \a devices array size, and
+ to return the number of attached GPU devices
+ @param devices Reference in which to return the references to the attached GPU devices
+
+ @return
+ - \ref NVML_SUCCESS if \a deviceCount and \a devices have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a deviceCount indicates that the \a devices array is too small
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid, either of \a deviceCount or \a devices is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlUnitGetDevices(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ deviceCount: *mut ::core::ffi::c_uint,
+ devices: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the number of compute devices in the system. A compute device is a single GPU.
+
+ For all products.
+
+ Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system
+ even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device.
+ Update your code to handle this error, or use NVML 4.304 or older nvml header file.
+ For backward binary compatibility reasons _v1 version of the API is still present in the shared
+ library.
+ Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to.
+
+ @param deviceCount Reference in which to return the number of accessible devices
+
+ @return
+ - \ref NVML_SUCCESS if \a deviceCount has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCount_v2(
+ deviceCount: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get attributes (engine counts etc.) for the given NVML device handle.
+
+ @note This API currently only supports MIG device handles.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device NVML device handle
+ @param attributes Device attributes
+
+ @return
+ - \ref NVML_SUCCESS if \a device attributes were successfully retrieved
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device handle is invalid
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetAttributes_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ attributes: *mut cuda_types::nvml::nvmlDeviceAttributes_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Acquire the handle for a particular device, based on its index.
+
+ For all products.
+
+ Valid indices are derived from the \a accessibleDevices count returned by
+ \ref nvmlDeviceGetCount_v2(). For example, if \a accessibleDevices is 2 the valid indices
+ are 0 and 1, corresponding to GPU 0 and GPU 1.
+
+ The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it
+ is recommended that devices be looked up by their PCI ids or UUID. See
+ \ref nvmlDeviceGetHandleByUUID() and \ref nvmlDeviceGetHandleByPciBusId_v2().
+
+ Note: The NVML index may not correlate with other APIs, such as the CUDA device index.
+
+ Starting from NVML 5, this API causes NVML to initialize the target GPU
+ NVML may initialize additional GPUs if:
+ - The target GPU is an SLI slave
+
+ Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system
+ even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device.
+ Update your code to handle this error, or use NVML 4.304 or older nvml header file.
+ For backward binary compatibility reasons _v1 version of the API is still present in the shared
+ library.
+ Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to.
+
+ This means that nvmlDeviceGetHandleByIndex_v2 and _v1 can return different devices for the same index.
+ If you don't touch macros that map old (_v1) versions to _v2 versions at the top of the file you don't
+ need to worry about that.
+
+ @param index The index of the target GPU, >= 0 and < \a accessibleDevices
+ @param device Reference in which to return the device handle
+
+ @return
+ - \ref NVML_SUCCESS if \a device has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a device is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device
+ - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetIndex
+ @see nvmlDeviceGetCount*/
+ fn nvmlDeviceGetHandleByIndex_v2(
+ index: ::core::ffi::c_uint,
+ device: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Acquire the handle for a particular device, based on its board serial number.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ This number corresponds to the value printed directly on the board, and to the value returned by
+ \ref nvmlDeviceGetSerial().
+
+ @deprecated Since more than one GPU can exist on a single board this function is deprecated in favor
+ of \ref nvmlDeviceGetHandleByUUID.
+ For dual GPU boards this function will return NVML_ERROR_INVALID_ARGUMENT.
+
+ Starting from NVML 5, this API causes NVML to initialize the target GPU
+ NVML may initialize additional GPUs as it searches for the target GPU
+
+ @param serial The board serial number of the target GPU
+ @param device Reference in which to return the device handle
+
+ @return
+ - \ref NVML_SUCCESS if \a device has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a serial is invalid, \a device is NULL or more than one
+ device has the same serial (dual GPU boards)
+ - \ref NVML_ERROR_NOT_FOUND if \a serial does not match a valid device on the system
+ - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables
+ - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs
+ - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetSerial
+ @see nvmlDeviceGetHandleByUUID*/
+ fn nvmlDeviceGetHandleBySerial(
+ serial: *const ::core::ffi::c_char,
+ device: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Acquire the handle for a particular device, based on its globally unique immutable UUID associated with each device.
+
+ For all products.
+
+ @param uuid The UUID of the target GPU or MIG instance
+ @param device Reference in which to return the device handle or MIG device handle
+
+ Starting from NVML 5, this API causes NVML to initialize the target GPU
+ NVML may initialize additional GPUs as it searches for the target GPU
+
+ @return
+ - \ref NVML_SUCCESS if \a device has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a uuid is invalid or \a device is null
+ - \ref NVML_ERROR_NOT_FOUND if \a uuid does not match a valid device on the system
+ - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables
+ - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs
+ - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetUUID*/
+ fn nvmlDeviceGetHandleByUUID(
+ uuid: *const ::core::ffi::c_char,
+ device: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Acquire the handle for a particular device, based on its PCI bus id.
+
+ For all products.
+
+ This value corresponds to the nvmlPciInfo_t::busId returned by \ref nvmlDeviceGetPciInfo_v3().
+
+ Starting from NVML 5, this API causes NVML to initialize the target GPU
+ NVML may initialize additional GPUs if:
+ - The target GPU is an SLI slave
+
+ \note NVML 4.304 and older version of nvmlDeviceGetHandleByPciBusId"_v1" returns NVML_ERROR_NOT_FOUND
+ instead of NVML_ERROR_NO_PERMISSION.
+
+ @param pciBusId The PCI bus id of the target GPU
+ @param device Reference in which to return the device handle
+
+ @return
+ - \ref NVML_SUCCESS if \a device has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciBusId is invalid or \a device is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a pciBusId does not match a valid device on the system
+ - \ref NVML_ERROR_INSUFFICIENT_POWER if the attached device has improperly attached external power cables
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device
+ - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetHandleByPciBusId_v2(
+ pciBusId: *const ::core::ffi::c_char,
+ device: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the name of this device.
+
+ For all products.
+
+ The name is an alphanumeric string that denotes a particular product, e.g. Tesla &tm; C2070. It will not
+ exceed 96 characters in length (including the NULL terminator). See \ref
+ nvmlConstants::NVML_DEVICE_NAME_V2_BUFFER_SIZE.
+
+ When used with MIG device handles the API returns MIG device names which can be used to identify devices
+ based on their attributes.
+
+ @param device The identifier of the target device
+ @param name Reference in which to return the product name
+ @param length The maximum allowed length of the string returned in \a name
+
+ @return
+ - \ref NVML_SUCCESS if \a name has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a name is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetName(
+ device: cuda_types::nvml::nvmlDevice_t,
+ name: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the brand of this device.
+
+ For all products.
+
+ The type is a member of \ref nvmlBrandType_t defined above.
+
+ @param device The identifier of the target device
+ @param type Reference in which to return the product brand type
+
+ @return
+ - \ref NVML_SUCCESS if \a name has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a type is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetBrand(
+ device: cuda_types::nvml::nvmlDevice_t,
+ type_: *mut cuda_types::nvml::nvmlBrandType_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the NVML index of this device.
+
+ For all products.
+
+ Valid indices are derived from the \a accessibleDevices count returned by
+ \ref nvmlDeviceGetCount_v2(). For example, if \a accessibleDevices is 2 the valid indices
+ are 0 and 1, corresponding to GPU 0 and GPU 1.
+
+ The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it
+ is recommended that devices be looked up by their PCI ids or GPU UUID. See
+ \ref nvmlDeviceGetHandleByPciBusId_v2() and \ref nvmlDeviceGetHandleByUUID().
+
+ When used with MIG device handles this API returns indices that can be
+ passed to \ref nvmlDeviceGetMigDeviceHandleByIndex to retrieve an identical handle.
+ MIG device indices are unique within a device.
+
+ Note: The NVML index may not correlate with other APIs, such as the CUDA device index.
+
+ @param device The identifier of the target device
+ @param index Reference in which to return the NVML index of the device
+
+ @return
+ - \ref NVML_SUCCESS if \a index has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a index is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetHandleByIndex()
+ @see nvmlDeviceGetCount()*/
+ fn nvmlDeviceGetIndex(
+ device: cuda_types::nvml::nvmlDevice_t,
+ index: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the globally unique board serial number associated with this device's board.
+
+ For all products with an inforom.
+
+ The serial number is an alphanumeric string that will not exceed 30 characters (including the NULL terminator).
+ This number matches the serial number tag that is physically attached to the board. See \ref
+ nvmlConstants::NVML_DEVICE_SERIAL_BUFFER_SIZE.
+
+ @param device The identifier of the target device
+ @param serial Reference in which to return the board/module serial number
+ @param length The maximum allowed length of the string returned in \a serial
+
+ @return
+ - \ref NVML_SUCCESS if \a serial has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a serial is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetSerial(
+ device: cuda_types::nvml::nvmlDevice_t,
+ serial: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ fn nvmlDeviceGetModuleId(
+ device: cuda_types::nvml::nvmlDevice_t,
+ moduleId: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the Device's C2C Mode information
+
+ @param device The identifier of the target device
+ @param c2cModeInfo Output struct containing the device's C2C Mode info
+
+ @return
+ - \ref NVML_SUCCESS if \a C2C Mode Infor query is successful
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a serial is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetC2cModeInfoV(
+ device: cuda_types::nvml::nvmlDevice_t,
+ c2cModeInfo: *mut cuda_types::nvml::nvmlC2cModeInfo_v1_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves an array of unsigned ints (sized to nodeSetSize) of bitmasks with
+ the ideal memory affinity within node or socket for the device.
+ For example, if NUMA node 0, 1 are ideal within the socket for the device and nodeSetSize == 1,
+ result[0] = 0x3
+
+ \note If requested scope is not applicable to the target topology, the API
+ will fall back to reporting the memory affinity for the immediate non-I/O
+ ancestor of the device.
+
+ For Kepler &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+ @param nodeSetSize The size of the nodeSet array that is safe to access
+ @param nodeSet Array reference in which to return a bitmask of NODEs, 64 NODEs per
+ unsigned long on 64-bit machines, 32 on 32-bit machines
+ @param scope Scope that change the default behavior
+
+ @return
+ - \ref NVML_SUCCESS if \a NUMA node Affinity has been filled
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, nodeSetSize == 0, nodeSet is NULL or scope is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMemoryAffinity(
+ device: cuda_types::nvml::nvmlDevice_t,
+ nodeSetSize: ::core::ffi::c_uint,
+ nodeSet: *mut ::core::ffi::c_ulong,
+ scope: cuda_types::nvml::nvmlAffinityScope_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves an array of unsigned ints (sized to cpuSetSize) of bitmasks with the
+ ideal CPU affinity within node or socket for the device.
+ For example, if processors 0, 1, 32, and 33 are ideal for the device and cpuSetSize == 2,
+ result[0] = 0x3, result[1] = 0x3
+
+ \note If requested scope is not applicable to the target topology, the API
+ will fall back to reporting the CPU affinity for the immediate non-I/O
+ ancestor of the device.
+
+ For Kepler &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+ @param cpuSetSize The size of the cpuSet array that is safe to access
+ @param cpuSet Array reference in which to return a bitmask of CPUs, 64 CPUs per
+ unsigned long on 64-bit machines, 32 on 32-bit machines
+ @param scope Scope that change the default behavior
+
+ @return
+ - \ref NVML_SUCCESS if \a cpuAffinity has been filled
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, cpuSetSize == 0, cpuSet is NULL or sope is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCpuAffinityWithinScope(
+ device: cuda_types::nvml::nvmlDevice_t,
+ cpuSetSize: ::core::ffi::c_uint,
+ cpuSet: *mut ::core::ffi::c_ulong,
+ scope: cuda_types::nvml::nvmlAffinityScope_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves an array of unsigned ints (sized to cpuSetSize) of bitmasks with the ideal CPU affinity for the device
+ For example, if processors 0, 1, 32, and 33 are ideal for the device and cpuSetSize == 2,
+ result[0] = 0x3, result[1] = 0x3
+ This is equivalent to calling \ref nvmlDeviceGetCpuAffinityWithinScope with \ref NVML_AFFINITY_SCOPE_NODE.
+
+ For Kepler &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+ @param cpuSetSize The size of the cpuSet array that is safe to access
+ @param cpuSet Array reference in which to return a bitmask of CPUs, 64 CPUs per
+ unsigned long on 64-bit machines, 32 on 32-bit machines
+
+ @return
+ - \ref NVML_SUCCESS if \a cpuAffinity has been filled
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, cpuSetSize == 0, or cpuSet is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCpuAffinity(
+ device: cuda_types::nvml::nvmlDevice_t,
+ cpuSetSize: ::core::ffi::c_uint,
+ cpuSet: *mut ::core::ffi::c_ulong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets the ideal affinity for the calling thread and device using the guidelines
+ given in nvmlDeviceGetCpuAffinity(). Note, this is a change as of version 8.0.
+ Older versions set the affinity for a calling process and all children.
+ Currently supports up to 1024 processors.
+
+ For Kepler &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if the calling process has been successfully bound
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetCpuAffinity(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Clear all affinity bindings for the calling thread. Note, this is a change as of version
+ 8.0 as older versions cleared the affinity for a calling process and all children.
+
+ For Kepler &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if the calling process has been successfully unbound
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceClearCpuAffinity(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the NUMA node of the given GPU device.
+ This only applies to platforms where the GPUs are NUMA nodes.
+
+ @param[in] device The device handle
+ @param[out] node NUMA node ID of the device
+
+ @returns
+ - \ref NVML_SUCCESS if the NUMA node is retrieved successfully
+ - \ref NVML_ERROR_NOT_SUPPORTED if request is not supported on the current platform
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device \a node is invalid*/
+ fn nvmlDeviceGetNumaNodeId(
+ device: cuda_types::nvml::nvmlDevice_t,
+ node: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /// @}
+ fn nvmlDeviceGetTopologyCommonAncestor(
+ device1: cuda_types::nvml::nvmlDevice_t,
+ device2: cuda_types::nvml::nvmlDevice_t,
+ pathInfo: *mut cuda_types::nvml::nvmlGpuTopologyLevel_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level
+ For all products.
+ Supported on Linux only.
+
+ @param device The identifier of the first device
+ @param level The \ref nvmlGpuTopologyLevel_t level to search for other GPUs
+ @param count When zero, is set to the number of matching GPUs such that \a deviceArray
+ can be malloc'd. When non-zero, \a deviceArray will be filled with \a count
+ number of device handles.
+ @param deviceArray An array of device handles for GPUs found at \a level
+
+ @return
+ - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a level, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature
+ - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery*/
+ fn nvmlDeviceGetTopologyNearestGpus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ level: cuda_types::nvml::nvmlGpuTopologyLevel_t,
+ count: *mut ::core::ffi::c_uint,
+ deviceArray: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the status for a given p2p capability index between a given pair of GPU
+
+ @param device1 The first device
+ @param device2 The second device
+ @param p2pIndex p2p Capability Index being looked for between \a device1 and \a device2
+ @param p2pStatus Reference in which to return the status of the \a p2pIndex
+ between \a device1 and \a device2
+ @return
+ - \ref NVML_SUCCESS if \a p2pStatus has been populated
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1 or \a device2 or \a p2pIndex is invalid or \a p2pStatus is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetP2PStatus(
+ device1: cuda_types::nvml::nvmlDevice_t,
+ device2: cuda_types::nvml::nvmlDevice_t,
+ p2pIndex: cuda_types::nvml::nvmlGpuP2PCapsIndex_t,
+ p2pStatus: *mut cuda_types::nvml::nvmlGpuP2PStatus_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the globally unique immutable UUID associated with this device, as a 5 part hexadecimal string,
+ that augments the immutable, board serial identifier.
+
+ For all products.
+
+ The UUID is a globally unique identifier. It is the only available identifier for pre-Fermi-architecture products.
+ It does NOT correspond to any identifier printed on the board. It will not exceed 96 characters in length
+ (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_UUID_V2_BUFFER_SIZE.
+
+ When used with MIG device handles the API returns globally unique UUIDs which can be used to identify MIG
+ devices across both GPU and MIG devices. UUIDs are immutable for the lifetime of a MIG device.
+
+ @param device The identifier of the target device
+ @param uuid Reference in which to return the GPU UUID
+ @param length The maximum allowed length of the string returned in \a uuid
+
+ @return
+ - \ref NVML_SUCCESS if \a uuid has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a uuid is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetUUID(
+ device: cuda_types::nvml::nvmlDevice_t,
+ uuid: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for
+ each GPU will have the form /dev/nvidia[minor number].
+
+ For all products.
+ Supported only for Linux
+
+ @param device The identifier of the target device
+ @param minorNumber Reference in which to return the minor number for the device
+ @return
+ - \ref NVML_SUCCESS if the minor number is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minorNumber is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMinorNumber(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minorNumber: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the the device board part number which is programmed into the board's InfoROM
+
+ For all products.
+
+ @param device Identifier of the target device
+ @param partNumber Reference to the buffer to return
+ @param length Length of the buffer reference
+
+ @return
+ - \ref NVML_SUCCESS if \a partNumber has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NOT_SUPPORTED if the needed VBIOS fields have not been filled
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a serial is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetBoardPartNumber(
+ device: cuda_types::nvml::nvmlDevice_t,
+ partNumber: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the version information for the device's infoROM object.
+
+ For all products with an inforom.
+
+ Fermi and higher parts have non-volatile on-board memory for persisting device info, such as aggregate
+ ECC counts. The version of the data structures in this memory may change from time to time. It will not
+ exceed 16 characters in length (including the NULL terminator).
+ See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE.
+
+ See \ref nvmlInforomObject_t for details on the available infoROM objects.
+
+ @param device The identifier of the target device
+ @param object The target infoROM object
+ @param version Reference in which to return the infoROM version
+ @param length The maximum allowed length of the string returned in \a version
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetInforomImageVersion*/
+ fn nvmlDeviceGetInforomVersion(
+ device: cuda_types::nvml::nvmlDevice_t,
+ object: cuda_types::nvml::nvmlInforomObject_t,
+ version: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the global infoROM image version
+
+ For all products with an inforom.
+
+ Image version just like VBIOS version uniquely describes the exact version of the infoROM flashed on the board
+ in contrast to infoROM object version which is only an indicator of supported features.
+ Version string will not exceed 16 characters in length (including the NULL terminator).
+ See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE.
+
+ @param device The identifier of the target device
+ @param version Reference in which to return the infoROM image version
+ @param length The maximum allowed length of the string returned in \a version
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetInforomVersion*/
+ fn nvmlDeviceGetInforomImageVersion(
+ device: cuda_types::nvml::nvmlDevice_t,
+ version: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the checksum of the configuration stored in the device's infoROM.
+
+ For all products with an inforom.
+
+ Can be used to make sure that two GPUs have the exact same configuration.
+ Current checksum takes into account configuration stored in PWR and ECC infoROM objects.
+ Checksum can change between driver releases or when user changes configuration (e.g. disable/enable ECC)
+
+ @param device The identifier of the target device
+ @param checksum Reference in which to return the infoROM configuration checksum
+
+ @return
+ - \ref NVML_SUCCESS if \a checksum has been set
+ - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's checksum couldn't be retrieved due to infoROM corruption
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a checksum is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetInforomConfigurationChecksum(
+ device: cuda_types::nvml::nvmlDevice_t,
+ checksum: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Reads the infoROM from the flash and verifies the checksums.
+
+ For all products with an inforom.
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if infoROM is not corrupted
+ - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's infoROM is corrupted
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceValidateInforom(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the timestamp and the duration of the last flush of the BBX (blackbox) infoROM object during the current run.
+
+ For all products with an inforom.
+
+ @param device The identifier of the target device
+ @param timestamp The start timestamp of the last BBX Flush
+ @param durationUs The duration (us) of the last BBX Flush
+
+ @return
+ - \ref NVML_SUCCESS if \a timestamp and \a durationUs are successfully retrieved
+ - \ref NVML_ERROR_NOT_READY if the BBX object has not been flushed yet
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetInforomVersion*/
+ fn nvmlDeviceGetLastBBXFlushTime(
+ device: cuda_types::nvml::nvmlDevice_t,
+ timestamp: *mut ::core::ffi::c_ulonglong,
+ durationUs: *mut ::core::ffi::c_ulong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the display mode for the device.
+
+ For all products.
+
+ This method indicates whether a physical display (e.g. monitor) is currently connected to
+ any of the device's connectors.
+
+ See \ref nvmlEnableState_t for details on allowed modes.
+
+ @param device The identifier of the target device
+ @param display Reference in which to return the display mode
+
+ @return
+ - \ref NVML_SUCCESS if \a display has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a display is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetDisplayMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ display: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the display active state for the device.
+
+ For all products.
+
+ This method indicates whether a display is initialized on the device.
+ For example whether X Server is attached to this device and has allocated memory for the screen.
+
+ Display can be active even when no monitor is physically attached.
+
+ See \ref nvmlEnableState_t for details on allowed modes.
+
+ @param device The identifier of the target device
+ @param isActive Reference in which to return the display active state
+
+ @return
+ - \ref NVML_SUCCESS if \a isActive has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isActive is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetDisplayActive(
+ device: cuda_types::nvml::nvmlDevice_t,
+ isActive: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the persistence mode associated with this device.
+
+ For all products.
+ For Linux only.
+
+ When driver persistence mode is enabled the driver software state is not torn down when the last
+ client disconnects. By default this feature is disabled.
+
+ See \ref nvmlEnableState_t for details on allowed modes.
+
+ @param device The identifier of the target device
+ @param mode Reference in which to return the current driver persistence mode
+
+ @return
+ - \ref NVML_SUCCESS if \a mode has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetPersistenceMode()*/
+ fn nvmlDeviceGetPersistenceMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves PCI attributes of this device.
+
+ For all products.
+
+ See \ref nvmlPciInfoExt_t for details on the available PCI info.
+
+ @param device The identifier of the target device
+ @param pci Reference in which to return the PCI info
+
+ @return
+ - \ref NVML_SUCCESS if \a pci has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPciInfoExt(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pci: *mut cuda_types::nvml::nvmlPciInfoExt_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the PCI attributes of this device.
+
+ For all products.
+
+ See \ref nvmlPciInfo_t for details on the available PCI info.
+
+ @param device The identifier of the target device
+ @param pci Reference in which to return the PCI info
+
+ @return
+ - \ref NVML_SUCCESS if \a pci has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPciInfo_v3(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pci: *mut cuda_types::nvml::nvmlPciInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the maximum PCIe link generation possible with this device and system
+
+ I.E. for a generation 2 PCIe device attached to a generation 1 PCIe bus the max link generation this function will
+ report is generation 1.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param maxLinkGen Reference in which to return the max PCIe link generation
+
+ @return
+ - \ref NVML_SUCCESS if \a maxLinkGen has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkGen is null
+ - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMaxPcieLinkGeneration(
+ device: cuda_types::nvml::nvmlDevice_t,
+ maxLinkGen: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the maximum PCIe link generation supported by this device
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param maxLinkGenDevice Reference in which to return the max PCIe link generation
+
+ @return
+ - \ref NVML_SUCCESS if \a maxLinkGenDevice has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkGenDevice is null
+ - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGpuMaxPcieLinkGeneration(
+ device: cuda_types::nvml::nvmlDevice_t,
+ maxLinkGenDevice: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the maximum PCIe link width possible with this device and system
+
+ I.E. for a device with a 16x PCIe bus width attached to a 8x PCIe system bus this function will report
+ a max link width of 8.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param maxLinkWidth Reference in which to return the max PCIe link generation
+
+ @return
+ - \ref NVML_SUCCESS if \a maxLinkWidth has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkWidth is null
+ - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMaxPcieLinkWidth(
+ device: cuda_types::nvml::nvmlDevice_t,
+ maxLinkWidth: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current PCIe link generation
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param currLinkGen Reference in which to return the current PCIe link generation
+
+ @return
+ - \ref NVML_SUCCESS if \a currLinkGen has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkGen is null
+ - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCurrPcieLinkGeneration(
+ device: cuda_types::nvml::nvmlDevice_t,
+ currLinkGen: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current PCIe link width
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param currLinkWidth Reference in which to return the current PCIe link generation
+
+ @return
+ - \ref NVML_SUCCESS if \a currLinkWidth has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkWidth is null
+ - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCurrPcieLinkWidth(
+ device: cuda_types::nvml::nvmlDevice_t,
+ currLinkWidth: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve PCIe utilization information.
+ This function is querying a byte counter over a 20ms interval and thus is the
+ PCIe throughput over that interval.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ This method is not supported in virtual machines running virtual GPU (vGPU).
+
+ @param device The identifier of the target device
+ @param counter The specific counter that should be queried \ref nvmlPcieUtilCounter_t
+ @param value Reference in which to return throughput in KB/s
+
+ @return
+ - \ref NVML_SUCCESS if \a value has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a counter is invalid, or \a value is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPcieThroughput(
+ device: cuda_types::nvml::nvmlDevice_t,
+ counter: cuda_types::nvml::nvmlPcieUtilCounter_t,
+ value: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the PCIe replay counter.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param value Reference in which to return the counter's value
+
+ @return
+ - \ref NVML_SUCCESS if \a value has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a value is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPcieReplayCounter(
+ device: cuda_types::nvml::nvmlDevice_t,
+ value: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current clock speeds for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ See \ref nvmlClockType_t for details on available clock information.
+
+ @param device The identifier of the target device
+ @param type Identify which clock domain to query
+ @param clock Reference in which to return the clock speed in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a clock has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetClockInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ type_: cuda_types::nvml::nvmlClockType_t,
+ clock: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the maximum clock speeds for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ See \ref nvmlClockType_t for details on available clock information.
+
+ \note On GPUs from Fermi family current P0 clocks (reported by \ref nvmlDeviceGetClockInfo) can differ from max clocks
+ by few MHz.
+
+ @param device The identifier of the target device
+ @param type Identify which clock domain to query
+ @param clock Reference in which to return the clock speed in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a clock has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMaxClockInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ type_: cuda_types::nvml::nvmlClockType_t,
+ clock: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the GPCCLK VF offset value
+ @param[in] device The identifier of the target device
+ @param[out] offset The retrieved GPCCLK VF offset value
+
+ @return
+ - \ref NVML_SUCCESS if \a offset has been successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGpcClkVfOffset(
+ device: cuda_types::nvml::nvmlDevice_t,
+ offset: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current setting of a clock that applications will use unless an overspec situation occurs.
+ Can be changed using \ref nvmlDeviceSetApplicationsClocks.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param clockType Identify which clock domain to query
+ @param clockMHz Reference in which to return the clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a clockMHz has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetApplicationsClock(
+ device: cuda_types::nvml::nvmlDevice_t,
+ clockType: cuda_types::nvml::nvmlClockType_t,
+ clockMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the default applications clock that GPU boots with or
+ defaults to after \ref nvmlDeviceResetApplicationsClocks call.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param clockType Identify which clock domain to query
+ @param clockMHz Reference in which to return the default clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a clockMHz has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ \see nvmlDeviceGetApplicationsClock*/
+ fn nvmlDeviceGetDefaultApplicationsClock(
+ device: cuda_types::nvml::nvmlDevice_t,
+ clockType: cuda_types::nvml::nvmlClockType_t,
+ clockMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the clock speed for the clock specified by the clock type and clock ID.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param clockType Identify which clock domain to query
+ @param clockId Identify which clock in the domain to query
+ @param clockMHz Reference in which to return the clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a clockMHz has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetClock(
+ device: cuda_types::nvml::nvmlDevice_t,
+ clockType: cuda_types::nvml::nvmlClockType_t,
+ clockId: cuda_types::nvml::nvmlClockId_t,
+ clockMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the customer defined maximum boost clock speed specified by the given clock type.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param clockType Identify which clock domain to query
+ @param clockMHz Reference in which to return the clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a clockMHz has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device or the \a clockType on this device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMaxCustomerBoostClock(
+ device: cuda_types::nvml::nvmlDevice_t,
+ clockType: cuda_types::nvml::nvmlClockType_t,
+ clockMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the list of possible memory clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param count Reference in which to provide the \a clocksMHz array size, and
+ to return the number of elements
+ @param clocksMHz Reference in which to return the clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to the number of
+ required elements)
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetApplicationsClocks
+ @see nvmlDeviceGetSupportedGraphicsClocks*/
+ fn nvmlDeviceGetSupportedMemoryClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ count: *mut ::core::ffi::c_uint,
+ clocksMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the list of possible graphics clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param memoryClockMHz Memory clock for which to return possible graphics clocks
+ @param count Reference in which to provide the \a clocksMHz array size, and
+ to return the number of elements
+ @param clocksMHz Reference in which to return the clocks in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NOT_FOUND if the specified \a memoryClockMHz is not a supported frequency
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetApplicationsClocks
+ @see nvmlDeviceGetSupportedMemoryClocks*/
+ fn nvmlDeviceGetSupportedGraphicsClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ memoryClockMHz: ::core::ffi::c_uint,
+ count: *mut ::core::ffi::c_uint,
+ clocksMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the current state of Auto Boosted clocks on a device and store it in \a isEnabled
+
+ For Kepler &tm; or newer fully supported devices.
+
+ Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates
+ to maximize performance as thermal limits allow.
+
+ On Pascal and newer hardware, Auto Aoosted clocks are controlled through application clocks.
+ Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost
+ behavior.
+
+ @param device The identifier of the target device
+ @param isEnabled Where to store the current state of Auto Boosted clocks of the target device
+ @param defaultIsEnabled Where to store the default Auto Boosted clocks behavior of the target device that the device will
+ revert to when no applications are using the GPU
+
+ @return
+ - \ref NVML_SUCCESS If \a isEnabled has been been set with the Auto Boosted clocks state of \a device
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isEnabled is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+*/
+ fn nvmlDeviceGetAutoBoostedClocksEnabled(
+ device: cuda_types::nvml::nvmlDevice_t,
+ isEnabled: *mut cuda_types::nvml::nvmlEnableState_t,
+ defaultIsEnabled: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the intended operating speed of the device's fan.
+
+ Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the
+ output will not match the actual fan speed.
+
+ For all discrete products with dedicated fans.
+
+ The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed.
+ This value may exceed 100% in certain cases.
+
+ @param device The identifier of the target device
+ @param speed Reference in which to return the fan speed percentage
+
+ @return
+ - \ref NVML_SUCCESS if \a speed has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a speed is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetFanSpeed(
+ device: cuda_types::nvml::nvmlDevice_t,
+ speed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the intended operating speed of the device's specified fan.
+
+ Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the
+ output will not match the actual fan speed.
+
+ For all discrete products with dedicated fans.
+
+ The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed.
+ This value may exceed 100% in certain cases.
+
+ @param device The identifier of the target device
+ @param fan The index of the target fan, zero indexed.
+ @param speed Reference in which to return the fan speed percentage
+
+ @return
+ - \ref NVML_SUCCESS if \a speed has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a fan is not an acceptable index, or \a speed is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan or is newer than Maxwell
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetFanSpeed_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fan: ::core::ffi::c_uint,
+ speed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the intended target speed of the device's specified fan.
+
+ Normally, the driver dynamically adjusts the fan based on
+ the needs of the GPU. But when user set fan speed using nvmlDeviceSetFanSpeed_v2,
+ the driver will attempt to make the fan achieve the setting in
+ nvmlDeviceSetFanSpeed_v2. The actual current speed of the fan
+ is reported in nvmlDeviceGetFanSpeed_v2.
+
+ For all discrete products with dedicated fans.
+
+ The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed.
+ This value may exceed 100% in certain cases.
+
+ @param device The identifier of the target device
+ @param fan The index of the target fan, zero indexed.
+ @param targetSpeed Reference in which to return the fan speed percentage
+
+ @return
+ - \ref NVML_SUCCESS if \a speed has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a fan is not an acceptable index, or \a speed is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan or is newer than Maxwell
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetTargetFanSpeed(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fan: ::core::ffi::c_uint,
+ targetSpeed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the min and max fan speed that user can set for the GPU fan.
+
+ For all cuda-capable discrete products with fans
+
+ @param device The identifier of the target device
+ @param minSpeed The minimum speed allowed to set
+ @param maxSpeed The maximum speed allowed to set
+
+ return
+ NVML_SUCCESS if speed has been adjusted
+ NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ NVML_ERROR_INVALID_ARGUMENT if device is invalid
+ NVML_ERROR_NOT_SUPPORTED if the device does not support this
+ (doesn't have fans)
+ NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMinMaxFanSpeed(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minSpeed: *mut ::core::ffi::c_uint,
+ maxSpeed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets current fan control policy.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ For all cuda-capable discrete products with fans
+
+ device The identifier of the target \a device
+ policy Reference in which to return the fan control \a policy
+
+ return
+ NVML_SUCCESS if \a policy has been populated
+ NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a policy is null or the \a fan given doesn't reference
+ a fan that exists.
+ NVML_ERROR_NOT_SUPPORTED if the \a device is older than Maxwell
+ NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetFanControlPolicy_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fan: ::core::ffi::c_uint,
+ policy: *mut cuda_types::nvml::nvmlFanControlPolicy_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the number of fans on the device.
+
+ For all discrete products with dedicated fans.
+
+ @param device The identifier of the target device
+ @param numFans The number of fans
+
+ @return
+ - \ref NVML_SUCCESS if \a fan number query was successful
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a numFans is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNumFans(
+ device: cuda_types::nvml::nvmlDevice_t,
+ numFans: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current temperature readings for the device, in degrees C.
+
+ For all products.
+
+ See \ref nvmlTemperatureSensors_t for details on available temperature sensors.
+
+ @param device The identifier of the target device
+ @param sensorType Flag that indicates which sensor reading to retrieve
+ @param temp Reference in which to return the temperature reading
+
+ @return
+ - \ref NVML_SUCCESS if \a temp has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a sensorType is invalid or \a temp is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have the specified sensor
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetTemperature(
+ device: cuda_types::nvml::nvmlDevice_t,
+ sensorType: cuda_types::nvml::nvmlTemperatureSensors_t,
+ temp: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds.
+
+ Note: This API is no longer the preferred interface for retrieving the following temperature thresholds
+ on Ada and later architectures: NVML_TEMPERATURE_THRESHOLD_SHUTDOWN, NVML_TEMPERATURE_THRESHOLD_SLOWDOWN,
+ NVML_TEMPERATURE_THRESHOLD_MEM_MAX and NVML_TEMPERATURE_THRESHOLD_GPU_MAX.
+
+ Support for reading these temperature thresholds for Ada and later architectures would be removed from this
+ API in future releases. Please use \ref nvmlDeviceGetFieldValues with NVML_FI_DEV_TEMPERATURE_* fields to retrieve
+ temperature thresholds on these architectures.
+
+ @param device The identifier of the target device
+ @param thresholdType The type of threshold value queried
+ @param temp Reference in which to return the temperature reading
+ @return
+ - \ref NVML_SUCCESS if \a temp has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetTemperatureThreshold(
+ device: cuda_types::nvml::nvmlDevice_t,
+ thresholdType: cuda_types::nvml::nvmlTemperatureThresholds_t,
+ temp: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Used to execute a list of thermal system instructions.
+
+ @param device The identifier of the target device
+ @param sensorIndex The index of the thermal sensor
+ @param pThermalSettings Reference in which to return the thermal sensor information
+
+ @return
+ - \ref NVML_SUCCESS if \a pThermalSettings has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pThermalSettings is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetThermalSettings(
+ device: cuda_types::nvml::nvmlDevice_t,
+ sensorIndex: ::core::ffi::c_uint,
+ pThermalSettings: *mut cuda_types::nvml::nvmlGpuThermalSettings_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current performance state for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ See \ref nvmlPstates_t for details on allowed performance states.
+
+ @param device The identifier of the target device
+ @param pState Reference in which to return the performance state reading
+
+ @return
+ - \ref NVML_SUCCESS if \a pState has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPerformanceState(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pState: *mut cuda_types::nvml::nvmlPstates_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves current clocks event reasons.
+
+ For all fully supported products.
+
+ \note More than one bit can be enabled at the same time. Multiple reasons can be affecting clocks at once.
+
+ @param device The identifier of the target device
+ @param clocksEventReasons Reference in which to return bitmask of active clocks event
+ reasons
+
+ @return
+ - \ref NVML_SUCCESS if \a clocksEventReasons has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clocksEventReasons is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlClocksEventReasons
+ @see nvmlDeviceGetSupportedClocksEventReasons*/
+ fn nvmlDeviceGetCurrentClocksEventReasons(
+ device: cuda_types::nvml::nvmlDevice_t,
+ clocksEventReasons: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /// @deprecated Use \ref nvmlDeviceGetCurrentClocksEventReasons instead
+ fn nvmlDeviceGetCurrentClocksThrottleReasons(
+ device: cuda_types::nvml::nvmlDevice_t,
+ clocksThrottleReasons: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves bitmask of supported clocks event reasons that can be returned by
+ \ref nvmlDeviceGetCurrentClocksEventReasons
+
+ For all fully supported products.
+
+ This method is not supported in virtual machines running virtual GPU (vGPU).
+
+ @param device The identifier of the target device
+ @param supportedClocksEventReasons Reference in which to return bitmask of supported
+ clocks event reasons
+
+ @return
+ - \ref NVML_SUCCESS if \a supportedClocksEventReasons has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a supportedClocksEventReasons is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlClocksEventReasons
+ @see nvmlDeviceGetCurrentClocksEventReasons*/
+ fn nvmlDeviceGetSupportedClocksEventReasons(
+ device: cuda_types::nvml::nvmlDevice_t,
+ supportedClocksEventReasons: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /// @deprecated Use \ref nvmlDeviceGetSupportedClocksEventReasons instead
+ fn nvmlDeviceGetSupportedClocksThrottleReasons(
+ device: cuda_types::nvml::nvmlDevice_t,
+ supportedClocksThrottleReasons: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Use \ref nvmlDeviceGetPerformanceState. This function exposes an incorrect generalization.
+
+ Retrieve the current performance state for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ See \ref nvmlPstates_t for details on allowed performance states.
+
+ @param device The identifier of the target device
+ @param pState Reference in which to return the performance state reading
+
+ @return
+ - \ref NVML_SUCCESS if \a pState has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPowerState(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pState: *mut cuda_types::nvml::nvmlPstates_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve performance monitor samples from the associated subdevice.
+
+ @param device
+ @param pDynamicPstatesInfo
+
+ @return
+ - \ref NVML_SUCCESS if \a pDynamicPstatesInfo has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pDynamicPstatesInfo is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetDynamicPstatesInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pDynamicPstatesInfo: *mut cuda_types::nvml::nvmlGpuDynamicPstatesInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the MemClk (Memory Clock) VF offset value.
+ @param[in] device The identifier of the target device
+ @param[out] offset The retrieved MemClk VF offset value
+
+ @return
+ - \ref NVML_SUCCESS if \a offset has been successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMemClkVfOffset(
+ device: cuda_types::nvml::nvmlDevice_t,
+ offset: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve min and max clocks of some clock domain for a given PState
+
+ @param device The identifier of the target device
+ @param type Clock domain
+ @param pstate PState to query
+ @param minClockMHz Reference in which to return min clock frequency
+ @param maxClockMHz Reference in which to return max clock frequency
+
+ @return
+ - \ref NVML_SUCCESS if everything worked
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a type or \a pstate are invalid or both
+ \a minClockMHz and \a maxClockMHz are NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature*/
+ fn nvmlDeviceGetMinMaxClockOfPState(
+ device: cuda_types::nvml::nvmlDevice_t,
+ type_: cuda_types::nvml::nvmlClockType_t,
+ pstate: cuda_types::nvml::nvmlPstates_t,
+ minClockMHz: *mut ::core::ffi::c_uint,
+ maxClockMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get all supported Performance States (P-States) for the device.
+
+ The returned array would contain a contiguous list of valid P-States supported by
+ the device. If the number of supported P-States is fewer than the size of the array
+ supplied missing elements would contain \a NVML_PSTATE_UNKNOWN.
+
+ The number of elements in the returned list will never exceed \a NVML_MAX_GPU_PERF_PSTATES.
+
+ @param device The identifier of the target device
+ @param pstates Container to return the list of performance states
+ supported by device
+ @param size Size of the supplied \a pstates array in bytes
+
+ @return
+ - \ref NVML_SUCCESS if \a pstates array has been retrieved
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if the the container supplied was not large enough to
+ hold the resulting list
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a pstates is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support performance state readings
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetSupportedPerformanceStates(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pstates: *mut cuda_types::nvml::nvmlPstates_t,
+ size: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the GPCCLK min max VF offset value.
+ @param[in] device The identifier of the target device
+ @param[out] minOffset The retrieved GPCCLK VF min offset value
+ @param[out] maxOffset The retrieved GPCCLK VF max offset value
+
+ @return
+ - \ref NVML_SUCCESS if \a offset has been successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGpcClkMinMaxVfOffset(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minOffset: *mut ::core::ffi::c_int,
+ maxOffset: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the MemClk (Memory Clock) min max VF offset value.
+ @param[in] device The identifier of the target device
+ @param[out] minOffset The retrieved MemClk VF min offset value
+ @param[out] maxOffset The retrieved MemClk VF max offset value
+
+ @return
+ - \ref NVML_SUCCESS if \a offset has been successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMemClkMinMaxVfOffset(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minOffset: *mut ::core::ffi::c_int,
+ maxOffset: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** This API has been deprecated.
+
+ Retrieves the power management mode associated with this device.
+
+ For products from the Fermi family.
+ - Requires \a NVML_INFOROM_POWER version 3.0 or higher.
+
+ For from the Kepler or newer families.
+ - Does not require \a NVML_INFOROM_POWER object.
+
+ This flag indicates whether any power management algorithm is currently active on the device. An
+ enabled state does not necessarily mean the device is being actively throttled -- only that
+ that the driver will do so if the appropriate conditions are met.
+
+ See \ref nvmlEnableState_t for details on allowed modes.
+
+ @param device The identifier of the target device
+ @param mode Reference in which to return the current power management mode
+
+ @return
+ - \ref NVML_SUCCESS if \a mode has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPowerManagementMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the power management limit associated with this device.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ The power limit defines the upper boundary for the card's power draw. If
+ the card's total power draw reaches this limit the power management algorithm kicks in.
+
+ This reading is only available if power management mode is supported.
+ See \ref nvmlDeviceGetPowerManagementMode.
+
+ @param device The identifier of the target device
+ @param limit Reference in which to return the power management limit in milliwatts
+
+ @return
+ - \ref NVML_SUCCESS if \a limit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPowerManagementLimit(
+ device: cuda_types::nvml::nvmlDevice_t,
+ limit: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves information about possible values of power management limits on this device.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param minLimit Reference in which to return the minimum power management limit in milliwatts
+ @param maxLimit Reference in which to return the maximum power management limit in milliwatts
+
+ @return
+ - \ref NVML_SUCCESS if \a minLimit and \a maxLimit have been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minLimit or \a maxLimit is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetPowerManagementLimit*/
+ fn nvmlDeviceGetPowerManagementLimitConstraints(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minLimit: *mut ::core::ffi::c_uint,
+ maxLimit: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves default power management limit on this device, in milliwatts.
+ Default power management limit is a power management limit that the device boots with.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param defaultLimit Reference in which to return the default power management limit in milliwatts
+
+ @return
+ - \ref NVML_SUCCESS if \a defaultLimit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPowerManagementDefaultLimit(
+ device: cuda_types::nvml::nvmlDevice_t,
+ defaultLimit: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory)
+
+ For Fermi &tm; or newer fully supported devices.
+
+ On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw. On Ampere
+ (except GA100) or newer GPUs, the API returns power averaged over 1 sec interval. On GA100 and
+ older architectures, instantaneous power is returned.
+
+ See \ref NVML_FI_DEV_POWER_AVERAGE and \ref NVML_FI_DEV_POWER_INSTANT to query specific power
+ values.
+
+ It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode.
+
+ @param device The identifier of the target device
+ @param power Reference in which to return the power usage information
+
+ @return
+ - \ref NVML_SUCCESS if \a power has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a power is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support power readings
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPowerUsage(
+ device: cuda_types::nvml::nvmlDevice_t,
+ power: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves total energy consumption for this GPU in millijoules (mJ) since the driver was last reloaded
+
+ For Volta &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param energy Reference in which to return the energy consumption information
+
+ @return
+ - \ref NVML_SUCCESS if \a energy has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a energy is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support energy readings
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetTotalEnergyConsumption(
+ device: cuda_types::nvml::nvmlDevice_t,
+ energy: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the effective power limit that the driver enforces after taking into account all limiters
+
+ Note: This can be different from the \ref nvmlDeviceGetPowerManagementLimit if other limits are set elsewhere
+ This includes the out of band power limit interface
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The device to communicate with
+ @param limit Reference in which to return the power management limit in milliwatts
+
+ @return
+ - \ref NVML_SUCCESS if \a limit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetEnforcedPowerLimit(
+ device: cuda_types::nvml::nvmlDevice_t,
+ limit: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current GOM and pending GOM (the one that GPU will switch to after reboot).
+
+ For GK110 M-class and X-class Tesla &tm; products from the Kepler family.
+ Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products.
+ Not supported on Quadro &reg; and Tesla &tm; C-class products.
+
+ @param device The identifier of the target device
+ @param current Reference in which to return the current GOM
+ @param pending Reference in which to return the pending GOM
+
+ @return
+ - \ref NVML_SUCCESS if \a mode has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a current or \a pending is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlGpuOperationMode_t
+ @see nvmlDeviceSetGpuOperationMode*/
+ fn nvmlDeviceGetGpuOperationMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ current: *mut cuda_types::nvml::nvmlGpuOperationMode_t,
+ pending: *mut cuda_types::nvml::nvmlGpuOperationMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the amount of used, free, reserved and total memory available on the device, in bytes.
+ The reserved amount is supported on version 2 only.
+
+ For all products.
+
+ Enabling ECC reduces the amount of total available memory, due to the extra required parity bits.
+ Under WDDM most device memory is allocated and managed on startup by Windows.
+
+ Under Linux and Windows TCC, the reported amount of used memory is equal to the sum of memory allocated
+ by all active channels on the device.
+
+ See \ref nvmlMemory_v2_t for details on available memory info.
+
+ @note In MIG mode, if device handle is provided, the API returns aggregate
+ information, only if the caller has appropriate privileges. Per-instance
+ information can be queried by using specific MIG device handles.
+
+ @note nvmlDeviceGetMemoryInfo_v2 adds additional memory information.
+
+ @note On systems where GPUs are NUMA nodes, the accuracy of FB memory utilization
+ provided by this API depends on the memory accounting of the operating system.
+ This is because FB memory is managed by the operating system instead of the NVIDIA GPU driver.
+ Typically, pages allocated from FB memory are not released even after
+ the process terminates to enhance performance. In scenarios where
+ the operating system is under memory pressure, it may resort to utilizing FB memory.
+ Such actions can result in discrepancies in the accuracy of memory reporting.
+
+ @param device The identifier of the target device
+ @param memory Reference in which to return the memory information
+
+ @return
+ - \ref NVML_SUCCESS if \a memory has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMemoryInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ memory: *mut cuda_types::nvml::nvmlMemory_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ fn nvmlDeviceGetMemoryInfo_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ memory: *mut cuda_types::nvml::nvmlMemory_v2_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current compute mode for the device.
+
+ For all products.
+
+ See \ref nvmlComputeMode_t for details on allowed compute modes.
+
+ @param device The identifier of the target device
+ @param mode Reference in which to return the current compute mode
+
+ @return
+ - \ref NVML_SUCCESS if \a mode has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetComputeMode()*/
+ fn nvmlDeviceGetComputeMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: *mut cuda_types::nvml::nvmlComputeMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the CUDA compute capability of the device.
+
+ For all products.
+
+ Returns the major and minor compute capability version numbers of the
+ device. The major and minor versions are equivalent to the
+ CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR and
+ CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR attributes that would be
+ returned by CUDA's cuDeviceGetAttribute().
+
+ @param device The identifier of the target device
+ @param major Reference in which to return the major CUDA compute capability
+ @param minor Reference in which to return the minor CUDA compute capability
+
+ @return
+ - \ref NVML_SUCCESS if \a major and \a minor have been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a major or \a minor are NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCudaComputeCapability(
+ device: cuda_types::nvml::nvmlDevice_t,
+ major: *mut ::core::ffi::c_int,
+ minor: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current and pending ECC modes for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+ Only applicable to devices with ECC.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher.
+
+ Changing ECC modes requires a reboot. The "pending" ECC mode refers to the target mode following
+ the next reboot.
+
+ See \ref nvmlEnableState_t for details on allowed modes.
+
+ @param device The identifier of the target device
+ @param current Reference in which to return the current ECC mode
+ @param pending Reference in which to return the pending ECC mode
+
+ @return
+ - \ref NVML_SUCCESS if \a current and \a pending have been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or either \a current or \a pending is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetEccMode()*/
+ fn nvmlDeviceGetEccMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ current: *mut cuda_types::nvml::nvmlEnableState_t,
+ pending: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the default ECC modes for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+ Only applicable to devices with ECC.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher.
+
+ See \ref nvmlEnableState_t for details on allowed modes.
+
+ @param device The identifier of the target device
+ @param defaultMode Reference in which to return the default ECC mode
+
+ @return
+ - \ref NVML_SUCCESS if \a current and \a pending have been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a default is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetEccMode()*/
+ fn nvmlDeviceGetDefaultEccMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ defaultMode: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the device boardId from 0-N.
+ Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with
+ \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well.
+ The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across
+ reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and
+ the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will
+ always return those values but they will always be different from each other).
+
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param boardId Reference in which to return the device's board ID
+
+ @return
+ - \ref NVML_SUCCESS if \a boardId has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetBoardId(
+ device: cuda_types::nvml::nvmlDevice_t,
+ boardId: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves whether the device is on a Multi-GPU Board
+ Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param multiGpuBool Reference in which to return a zero or non-zero value
+ to indicate whether the device is on a multi GPU board
+
+ @return
+ - \ref NVML_SUCCESS if \a multiGpuBool has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMultiGpuBoard(
+ device: cuda_types::nvml::nvmlDevice_t,
+ multiGpuBool: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the total ECC error counts for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+ Only applicable to devices with ECC.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher.
+ Requires ECC Mode to be enabled.
+
+ The total error count is the sum of errors across each of the separate memory systems, i.e. the total set of
+ errors across the entire device.
+
+ See \ref nvmlMemoryErrorType_t for a description of available error types.\n
+ See \ref nvmlEccCounterType_t for a description of available counter types.
+
+ @param device The identifier of the target device
+ @param errorType Flag that specifies the type of the errors.
+ @param counterType Flag that specifies the counter-type of the errors.
+ @param eccCounts Reference in which to return the specified ECC errors
+
+ @return
+ - \ref NVML_SUCCESS if \a eccCounts has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceClearEccErrorCounts()*/
+ fn nvmlDeviceGetTotalEccErrors(
+ device: cuda_types::nvml::nvmlDevice_t,
+ errorType: cuda_types::nvml::nvmlMemoryErrorType_t,
+ counterType: cuda_types::nvml::nvmlEccCounterType_t,
+ eccCounts: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the detailed ECC error counts for the device.
+
+ @deprecated This API supports only a fixed set of ECC error locations
+ On different GPU architectures different locations are supported
+ See \ref nvmlDeviceGetMemoryErrorCounter
+
+ For Fermi &tm; or newer fully supported devices.
+ Only applicable to devices with ECC.
+ Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based ECC counts.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other ECC counts.
+ Requires ECC Mode to be enabled.
+
+ Detailed errors provide separate ECC counts for specific parts of the memory system.
+
+ Reports zero for unsupported ECC error counters when a subset of ECC error counters are supported.
+
+ See \ref nvmlMemoryErrorType_t for a description of available bit types.\n
+ See \ref nvmlEccCounterType_t for a description of available counter types.\n
+ See \ref nvmlEccErrorCounts_t for a description of provided detailed ECC counts.
+
+ @param device The identifier of the target device
+ @param errorType Flag that specifies the type of the errors.
+ @param counterType Flag that specifies the counter-type of the errors.
+ @param eccCounts Reference in which to return the specified ECC errors
+
+ @return
+ - \ref NVML_SUCCESS if \a eccCounts has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceClearEccErrorCounts()*/
+ fn nvmlDeviceGetDetailedEccErrors(
+ device: cuda_types::nvml::nvmlDevice_t,
+ errorType: cuda_types::nvml::nvmlMemoryErrorType_t,
+ counterType: cuda_types::nvml::nvmlEccCounterType_t,
+ eccCounts: *mut cuda_types::nvml::nvmlEccErrorCounts_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the requested memory error counter for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+ Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based memory error counts.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other memory error counts.
+
+ Only applicable to devices with ECC.
+
+ Requires ECC Mode to be enabled.
+
+ @note On MIG-enabled GPUs, per instance information can be queried using specific
+ MIG device handles. Per instance information is currently only supported for
+ non-DRAM uncorrectable volatile errors. Querying volatile errors using device
+ handles is currently not supported.
+
+ See \ref nvmlMemoryErrorType_t for a description of available memory error types.\n
+ See \ref nvmlEccCounterType_t for a description of available counter types.\n
+ See \ref nvmlMemoryLocation_t for a description of available counter locations.\n
+
+ @param device The identifier of the target device
+ @param errorType Flag that specifies the type of error.
+ @param counterType Flag that specifies the counter-type of the errors.
+ @param locationType Specifies the location of the counter.
+ @param count Reference in which to return the ECC counter
+
+ @return
+ - \ref NVML_SUCCESS if \a count has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a bitTyp,e \a counterType or \a locationType is
+ invalid, or \a count is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support ECC error reporting in the specified memory
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMemoryErrorCounter(
+ device: cuda_types::nvml::nvmlDevice_t,
+ errorType: cuda_types::nvml::nvmlMemoryErrorType_t,
+ counterType: cuda_types::nvml::nvmlEccCounterType_t,
+ locationType: cuda_types::nvml::nvmlMemoryLocation_t,
+ count: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current utilization rates for the device's major subsystems.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ See \ref nvmlUtilization_t for details on available utilization rates.
+
+ \note During driver initialization when ECC is enabled one can see high GPU and Memory Utilization readings.
+ This is caused by ECC Memory Scrubbing mechanism that is performed during driver initialization.
+
+ @note On MIG-enabled GPUs, querying device utilization rates is not currently supported.
+
+ @param device The identifier of the target device
+ @param utilization Reference in which to return the utilization information
+
+ @return
+ - \ref NVML_SUCCESS if \a utilization has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a utilization is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetUtilizationRates(
+ device: cuda_types::nvml::nvmlDevice_t,
+ utilization: *mut cuda_types::nvml::nvmlUtilization_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current utilization and sampling size in microseconds for the Encoder
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @note On MIG-enabled GPUs, querying encoder utilization is not currently supported.
+
+ @param device The identifier of the target device
+ @param utilization Reference to an unsigned int for encoder utilization info
+ @param samplingPeriodUs Reference to an unsigned int for the sampling period in US
+
+ @return
+ - \ref NVML_SUCCESS if \a utilization has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetEncoderUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ utilization: *mut ::core::ffi::c_uint,
+ samplingPeriodUs: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current capacity of the device's encoder, as a percentage of maximum encoder capacity with valid values in the range 0-100.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param encoderQueryType Type of encoder to query
+ @param encoderCapacity Reference to an unsigned int for the encoder capacity
+
+ @return
+ - \ref NVML_SUCCESS if \a encoderCapacity is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a encoderCapacity is NULL, or \a device or \a encoderQueryType
+ are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if device does not support the encoder specified in \a encodeQueryType
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetEncoderCapacity(
+ device: cuda_types::nvml::nvmlDevice_t,
+ encoderQueryType: cuda_types::nvml::nvmlEncoderType_t,
+ encoderCapacity: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current encoder statistics for a given device.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param sessionCount Reference to an unsigned int for count of active encoder sessions
+ @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions
+ @param averageLatency Reference to an unsigned int for encode latency in microseconds
+
+ @return
+ - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount, or \a device or \a averageFps,
+ or \a averageLatency is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetEncoderStats(
+ device: cuda_types::nvml::nvmlDevice_t,
+ sessionCount: *mut ::core::ffi::c_uint,
+ averageFps: *mut ::core::ffi::c_uint,
+ averageLatency: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves information about active encoder sessions on a target device.
+
+ An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfos. The
+ array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions
+ written to the buffer.
+
+ If the supplied buffer is not large enough to accommodate the active session array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount.
+ To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return
+ NVML_SUCCESS with number of active encoder sessions updated in *sessionCount.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param sessionCount Reference to caller supplied array size, and returns the number of sessions.
+ @param sessionInfos Reference in which to return the session information
+
+ @return
+ - \ref NVML_SUCCESS if \a sessionInfos is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL.
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetEncoderSessions(
+ device: cuda_types::nvml::nvmlDevice_t,
+ sessionCount: *mut ::core::ffi::c_uint,
+ sessionInfos: *mut cuda_types::nvml::nvmlEncoderSessionInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current utilization and sampling size in microseconds for the Decoder
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @note On MIG-enabled GPUs, querying decoder utilization is not currently supported.
+
+ @param device The identifier of the target device
+ @param utilization Reference to an unsigned int for decoder utilization info
+ @param samplingPeriodUs Reference to an unsigned int for the sampling period in US
+
+ @return
+ - \ref NVML_SUCCESS if \a utilization has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetDecoderUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ utilization: *mut ::core::ffi::c_uint,
+ samplingPeriodUs: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current utilization and sampling size in microseconds for the JPG
+
+ %TURING_OR_NEWER%
+
+ @note On MIG-enabled GPUs, querying decoder utilization is not currently supported.
+
+ @param device The identifier of the target device
+ @param utilization Reference to an unsigned int for jpg utilization info
+ @param samplingPeriodUs Reference to an unsigned int for the sampling period in US
+
+ @return
+ - \ref NVML_SUCCESS if \a utilization has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetJpgUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ utilization: *mut ::core::ffi::c_uint,
+ samplingPeriodUs: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current utilization and sampling size in microseconds for the OFA (Optical Flow Accelerator)
+
+ %TURING_OR_NEWER%
+
+ @note On MIG-enabled GPUs, querying decoder utilization is not currently supported.
+
+ @param device The identifier of the target device
+ @param utilization Reference to an unsigned int for ofa utilization info
+ @param samplingPeriodUs Reference to an unsigned int for the sampling period in US
+
+ @return
+ - \ref NVML_SUCCESS if \a utilization has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetOfaUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ utilization: *mut ::core::ffi::c_uint,
+ samplingPeriodUs: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the active frame buffer capture sessions statistics for a given device.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param fbcStats Reference to nvmlFBCStats_t structure containing NvFBC stats
+
+ @return
+ - \ref NVML_SUCCESS if \a fbcStats is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a fbcStats is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetFBCStats(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fbcStats: *mut cuda_types::nvml::nvmlFBCStats_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves information about active frame buffer capture sessions on a target device.
+
+ An array of active FBC sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The
+ array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions
+ written to the buffer.
+
+ If the supplied buffer is not large enough to accommodate the active session array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlFBCSessionInfo_t array required in \a sessionCount.
+ To query the number of active FBC sessions, call this function with *sessionCount = 0. The code will return
+ NVML_SUCCESS with number of active FBC sessions updated in *sessionCount.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @note hResolution, vResolution, averageFPS and averageLatency data for a FBC session returned in \a sessionInfo may
+ be zero if there are no new frames captured since the session started.
+
+ @param device The identifier of the target device
+ @param sessionCount Reference to caller supplied array size, and returns the number of sessions.
+ @param sessionInfo Reference in which to return the session information
+
+ @return
+ - \ref NVML_SUCCESS if \a sessionInfo is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL.
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetFBCSessions(
+ device: cuda_types::nvml::nvmlDevice_t,
+ sessionCount: *mut ::core::ffi::c_uint,
+ sessionInfo: *mut cuda_types::nvml::nvmlFBCSessionInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current and pending driver model for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+ For windows only.
+
+ On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached
+ to the device it must run in WDDM mode. TCC mode is preferred if a display is not attached.
+
+ See \ref nvmlDriverModel_t for details on available driver models.
+
+ @param device The identifier of the target device
+ @param current Reference in which to return the current driver model
+ @param pending Reference in which to return the pending driver model
+
+ @return
+ - \ref NVML_SUCCESS if either \a current and/or \a pending have been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or both \a current and \a pending are NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetDriverModel()*/
+ fn nvmlDeviceGetDriverModel(
+ device: cuda_types::nvml::nvmlDevice_t,
+ current: *mut cuda_types::nvml::nvmlDriverModel_t,
+ pending: *mut cuda_types::nvml::nvmlDriverModel_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get VBIOS version of the device.
+
+ For all products.
+
+ The VBIOS version may change from time to time. It will not exceed 32 characters in length
+ (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE.
+
+ @param device The identifier of the target device
+ @param version Reference to which to return the VBIOS version
+ @param length The maximum allowed length of the string returned in \a version
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a version is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVbiosVersion(
+ device: cuda_types::nvml::nvmlDevice_t,
+ version: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Bridge Chip Information for all the bridge chips on the board.
+
+ For all fully supported products.
+ Only applicable to multi-GPU products.
+
+ @param device The identifier of the target device
+ @param bridgeHierarchy Reference to the returned bridge chip Hierarchy
+
+ @return
+ - \ref NVML_SUCCESS if bridge chip exists
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a bridgeInfo is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if bridge chip not supported on the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+*/
+ fn nvmlDeviceGetBridgeChipInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ bridgeHierarchy: *mut cuda_types::nvml::nvmlBridgeChipHierarchy_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get information about processes with a compute context on a device
+
+ For Fermi &tm; or newer fully supported devices.
+
+ This function returns information only about compute running processes (e.g. CUDA application which have
+ active context). Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by this function.
+
+ To query the current number of running compute processes, call this function with *infoCount = 0. The
+ return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call
+ \a infos is allowed to be NULL.
+
+ The usedGpuMemory field returned is all of the memory used by the application.
+
+ Keep in mind that information returned by this call is dynamic and the number of elements might change in
+ time. Allocate more space for \a infos table in case new compute processes are spawned.
+
+ @note In MIG mode, if device handle is provided, the API returns aggregate information, only if
+ the caller has appropriate privileges. Per-instance information can be queried by using
+ specific MIG device handles.
+ Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
+
+ @param device The device handle or MIG device handle
+ @param infoCount Reference in which to provide the \a infos array size, and
+ to return the number of returned elements
+ @param infos Reference in which to return the process information
+
+ @return
+ - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small
+ \a infoCount will contain minimal amount of space necessary for
+ the call to complete
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see \ref nvmlSystemGetProcessName*/
+ fn nvmlDeviceGetComputeRunningProcesses_v3(
+ device: cuda_types::nvml::nvmlDevice_t,
+ infoCount: *mut ::core::ffi::c_uint,
+ infos: *mut cuda_types::nvml::nvmlProcessInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get information about processes with a graphics context on a device
+
+ For Kepler &tm; or newer fully supported devices.
+
+ This function returns information only about graphics based processes
+ (eg. applications using OpenGL, DirectX)
+
+ To query the current number of running graphics processes, call this function with *infoCount = 0. The
+ return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call
+ \a infos is allowed to be NULL.
+
+ The usedGpuMemory field returned is all of the memory used by the application.
+
+ Keep in mind that information returned by this call is dynamic and the number of elements might change in
+ time. Allocate more space for \a infos table in case new graphics processes are spawned.
+
+ @note In MIG mode, if device handle is provided, the API returns aggregate information, only if
+ the caller has appropriate privileges. Per-instance information can be queried by using
+ specific MIG device handles.
+ Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
+
+ @param device The device handle or MIG device handle
+ @param infoCount Reference in which to provide the \a infos array size, and
+ to return the number of returned elements
+ @param infos Reference in which to return the process information
+
+ @return
+ - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small
+ \a infoCount will contain minimal amount of space necessary for
+ the call to complete
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see \ref nvmlSystemGetProcessName*/
+ fn nvmlDeviceGetGraphicsRunningProcesses_v3(
+ device: cuda_types::nvml::nvmlDevice_t,
+ infoCount: *mut ::core::ffi::c_uint,
+ infos: *mut cuda_types::nvml::nvmlProcessInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get information about processes with a MPS compute context on a device
+
+ For Volta &tm; or newer fully supported devices.
+
+ This function returns information only about compute running processes (e.g. CUDA application which have
+ active context) utilizing MPS. Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by
+ this function.
+
+ To query the current number of running compute processes, call this function with *infoCount = 0. The
+ return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call
+ \a infos is allowed to be NULL.
+
+ The usedGpuMemory field returned is all of the memory used by the application.
+
+ Keep in mind that information returned by this call is dynamic and the number of elements might change in
+ time. Allocate more space for \a infos table in case new compute processes are spawned.
+
+ @note In MIG mode, if device handle is provided, the API returns aggregate information, only if
+ the caller has appropriate privileges. Per-instance information can be queried by using
+ specific MIG device handles.
+ Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
+
+ @param device The device handle or MIG device handle
+ @param infoCount Reference in which to provide the \a infos array size, and
+ to return the number of returned elements
+ @param infos Reference in which to return the process information
+
+ @return
+ - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small
+ \a infoCount will contain minimal amount of space necessary for
+ the call to complete
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see \ref nvmlSystemGetProcessName*/
+ fn nvmlDeviceGetMPSComputeRunningProcesses_v3(
+ device: cuda_types::nvml::nvmlDevice_t,
+ infoCount: *mut ::core::ffi::c_uint,
+ infos: *mut cuda_types::nvml::nvmlProcessInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get information about running processes on a device for input context
+
+ %HOPPER_OR_NEWER%
+
+ This function returns information only about running processes (e.g. CUDA application which have
+ active context).
+
+ To determine the size of the @ref plist->procArray array to allocate, call the function with
+ @ref plist->numProcArrayEntries set to zero and @ref plist->procArray set to NULL. The return
+ code will be either NVML_ERROR_INSUFFICIENT_SIZE (if there are valid processes of type
+ @ref plist->mode to report on, in which case the @ref plist->numProcArrayEntries field will
+ indicate the required number of entries in the array) or NVML_SUCCESS (if no processes of type
+ @ref plist->mode exist).
+
+ The usedGpuMemory field returned is all of the memory used by the application.
+ The usedGpuCcProtectedMemory field returned is all of the protected memory used by the application.
+
+ Keep in mind that information returned by this call is dynamic and the number of elements might change in
+ time. Allocate more space for \a plist->procArray table in case new processes are spawned.
+
+ @note In MIG mode, if device handle is provided, the API returns aggregate information, only if
+ the caller has appropriate privileges. Per-instance information can be queried by using
+ specific MIG device handles.
+ Querying per-instance information using MIG device handles is not supported if the device is in
+ vGPU Host virtualization mode.
+ Protected memory usage is currently not available in MIG mode and in windows.
+
+ @param device The device handle or MIG device handle
+ @param plist Reference in which to process detail list
+ @param plist->version The api version
+ @param plist->mode The process mode
+ @param plist->procArray Reference in which to return the process information
+ @param plist->numProcArrayEntries Proc array size of returned entries
+
+ @return
+ - \ref NVML_SUCCESS if \a plist->numprocArrayEntries and \a plist->procArray have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a plist->numprocArrayEntries indicates that the \a plist->procArray is too small
+ \a plist->numprocArrayEntries will contain minimal amount of space necessary for
+ the call to complete
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a plist is NULL, \a plist->version is invalid,
+ \a plist->mode is invalid,
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+*/
+ fn nvmlDeviceGetRunningProcessDetailList(
+ device: cuda_types::nvml::nvmlDevice_t,
+ plist: *mut cuda_types::nvml::nvmlProcessDetailList_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Check if the GPU devices are on the same physical board.
+
+ For all fully supported products.
+
+ @param device1 The first GPU device
+ @param device2 The second GPU device
+ @param onSameBoard Reference in which to return the status.
+ Non-zero indicates that the GPUs are on the same board.
+
+ @return
+ - \ref NVML_SUCCESS if \a onSameBoard has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a dev1 or \a dev2 are invalid or \a onSameBoard is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this check is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the either GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceOnSameBoard(
+ device1: cuda_types::nvml::nvmlDevice_t,
+ device2: cuda_types::nvml::nvmlDevice_t,
+ onSameBoard: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the root/admin permissions on the target API. See \a nvmlRestrictedAPI_t for the list of supported APIs.
+ If an API is restricted only root users can call that API. See \a nvmlDeviceSetAPIRestriction to change current permissions.
+
+ For all fully supported products.
+
+ @param device The identifier of the target device
+ @param apiType Target API type for this operation
+ @param isRestricted Reference in which to return the current restriction
+ NVML_FEATURE_ENABLED indicates that the API is root-only
+ NVML_FEATURE_DISABLED indicates that the API is accessible to all users
+
+ @return
+ - \ref NVML_SUCCESS if \a isRestricted has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a apiType incorrect or \a isRestricted is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device or the device does not support
+ the feature that is being queried (E.G. Enabling/disabling Auto Boosted clocks is
+ not supported by the device)
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlRestrictedAPI_t*/
+ fn nvmlDeviceGetAPIRestriction(
+ device: cuda_types::nvml::nvmlDevice_t,
+ apiType: cuda_types::nvml::nvmlRestrictedAPI_t,
+ isRestricted: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets recent samples for the GPU.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ Based on type, this method can be used to fetch the power, utilization or clock samples maintained in the buffer by
+ the driver.
+
+ Power, Utilization and Clock samples are returned as type "unsigned int" for the union nvmlValue_t.
+
+ To get the size of samples that user needs to allocate, the method is invoked with samples set to NULL.
+ The returned samplesCount will provide the number of samples that can be queried. The user needs to
+ allocate the buffer with size as samplesCount * sizeof(nvmlSample_t).
+
+ lastSeenTimeStamp represents CPU timestamp in microseconds. Set it to 0 to fetch all the samples maintained by the
+ underlying buffer. Set lastSeenTimeStamp to one of the timeStamps retrieved from the date of the previous query
+ to get more recent samples.
+
+ This method fetches the number of entries which can be accommodated in the provided samples array, and the
+ reference samplesCount is updated to indicate how many samples were actually retrieved. The advantage of using this
+ method for samples in contrast to polling via existing methods is to get get higher frequency data at lower polling cost.
+
+ @note On MIG-enabled GPUs, querying the following sample types, NVML_GPU_UTILIZATION_SAMPLES, NVML_MEMORY_UTILIZATION_SAMPLES
+ NVML_ENC_UTILIZATION_SAMPLES and NVML_DEC_UTILIZATION_SAMPLES, is not currently supported.
+
+ @param device The identifier for the target device
+ @param type Type of sampling event
+ @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp.
+ @param sampleValType Output parameter to represent the type of sample value as described in nvmlSampleVal_t
+ @param sampleCount Reference to provide the number of elements which can be queried in samples array
+ @param samples Reference in which samples are returned
+
+ @return
+ - \ref NVML_SUCCESS if samples are successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a samplesCount is NULL or
+ reference to \a sampleCount is 0 for non null \a samples
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetSamples(
+ device: cuda_types::nvml::nvmlDevice_t,
+ type_: cuda_types::nvml::nvmlSamplingType_t,
+ lastSeenTimeStamp: ::core::ffi::c_ulonglong,
+ sampleValType: *mut cuda_types::nvml::nvmlValueType_t,
+ sampleCount: *mut ::core::ffi::c_uint,
+ samples: *mut cuda_types::nvml::nvmlSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets Total, Available and Used size of BAR1 memory.
+
+ BAR1 is used to map the FB (device memory) so that it can be directly accessed by the CPU or by 3rd party
+ devices (peer-to-peer on the PCIE bus).
+
+ @note In MIG mode, if device handle is provided, the API returns aggregate
+ information, only if the caller has appropriate privileges. Per-instance
+ information can be queried by using specific MIG device handles.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param bar1Memory Reference in which BAR1 memory
+ information is returned.
+
+ @return
+ - \ref NVML_SUCCESS if BAR1 memory is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a bar1Memory is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+*/
+ fn nvmlDeviceGetBAR1MemoryInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ bar1Memory: *mut cuda_types::nvml::nvmlBAR1Memory_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the duration of time during which the device was throttled (lower than requested clocks) due to power
+ or thermal constraints.
+
+ The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The
+ difference in violation times at two different reference times gives the indication of GPU throttling event.
+
+ Violation for thermal capping is not supported at this time.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param perfPolicyType Represents Performance policy which can trigger GPU throttling
+ @param violTime Reference to which violation time related information is returned
+
+
+ @return
+ - \ref NVML_SUCCESS if violation time is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a perfPolicyType is invalid, or \a violTime is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetViolationStatus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ perfPolicyType: cuda_types::nvml::nvmlPerfPolicyType_t,
+ violTime: *mut cuda_types::nvml::nvmlViolationTime_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the device's interrupt number
+
+ @param device The identifier of the target device
+ @param irqNum The interrupt number associated with the specified device
+
+ @return
+ - \ref NVML_SUCCESS if irq number is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a irqNum is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetIrqNum(
+ device: cuda_types::nvml::nvmlDevice_t,
+ irqNum: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the device's core count
+
+ @param device The identifier of the target device
+ @param numCores The number of cores for the specified device
+
+ @return
+ - \ref NVML_SUCCESS if Gpu core count is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a numCores is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetNumGpuCores(
+ device: cuda_types::nvml::nvmlDevice_t,
+ numCores: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the devices power source
+
+ @param device The identifier of the target device
+ @param powerSource The power source of the device
+
+ @return
+ - \ref NVML_SUCCESS if the current power source was successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a powerSource is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetPowerSource(
+ device: cuda_types::nvml::nvmlDevice_t,
+ powerSource: *mut cuda_types::nvml::nvmlPowerSource_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the device's memory bus width
+
+ @param device The identifier of the target device
+ @param busWidth The devices's memory bus width
+
+ @return
+ - \ref NVML_SUCCESS if the memory bus width is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a busWidth is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetMemoryBusWidth(
+ device: cuda_types::nvml::nvmlDevice_t,
+ busWidth: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the device's PCIE Max Link speed in MBPS
+
+ @param device The identifier of the target device
+ @param maxSpeed The devices's PCIE Max Link speed in MBPS
+
+ @return
+ - \ref NVML_SUCCESS if Pcie Max Link Speed is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a maxSpeed is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetPcieLinkMaxSpeed(
+ device: cuda_types::nvml::nvmlDevice_t,
+ maxSpeed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the device's PCIe Link speed in Mbps
+
+ @param device The identifier of the target device
+ @param pcieSpeed The devices's PCIe Max Link speed in Mbps
+
+ @return
+ - \ref NVML_SUCCESS if \a pcieSpeed has been retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pcieSpeed is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support PCIe speed getting
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPcieSpeed(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pcieSpeed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the device's Adaptive Clock status
+
+ @param device The identifier of the target device
+ @param adaptiveClockStatus The current adaptive clocking status, either
+ @ref NVML_ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED
+ or @ref NVML_ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED
+
+ @return
+ - \ref NVML_SUCCESS if the current adaptive clocking status is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a adaptiveClockStatus is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetAdaptiveClockInfoStatus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ adaptiveClockStatus: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the type of the GPU Bus (PCIe, PCI, ...)
+
+ @param device The identifier of the target device
+ @param type The PCI Bus type
+
+ return
+ - \ref NVML_SUCCESS if the bus \a type is successfully retreived
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \device is invalid or \type is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetBusType(
+ device: cuda_types::nvml::nvmlDevice_t,
+ type_: *mut cuda_types::nvml::nvmlBusType_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Will be deprecated in a future release. Use \ref nvmlDeviceGetGpuFabricInfoV instead
+
+ Get fabric information associated with the device.
+
+ %HOPPER_OR_NEWER%
+
+ On Hopper + NVSwitch systems, GPU is registered with the NVIDIA Fabric Manager
+ Upon successful registration, the GPU is added to the NVLink fabric to enable
+ peer-to-peer communication.
+ This API reports the current state of the GPU in the NVLink fabric
+ along with other useful information.
+
+
+ @param device The identifier of the target device
+ @param gpuFabricInfo Information about GPU fabric state
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support gpu fabric*/
+ fn nvmlDeviceGetGpuFabricInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpuFabricInfo: *mut cuda_types::nvml::nvmlGpuFabricInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Versioned wrapper around \ref nvmlDeviceGetGpuFabricInfo that accepts a versioned
+ \ref nvmlGpuFabricInfo_v2_t or later output structure.
+
+ @note The caller must set the \ref nvmlGpuFabricInfoV_t.version field to the
+ appropriate version prior to calling this function. For example:
+ \code
+ nvmlGpuFabricInfoV_t fabricInfo =
+ { .version = nvmlGpuFabricInfo_v2 };
+ nvmlReturn_t result = nvmlDeviceGetGpuFabricInfoV(device,&fabricInfo);
+ \endcode
+
+ %HOPPER_OR_NEWER%
+
+ @param device The identifier of the target device
+ @param gpuFabricInfo Information about GPU fabric state
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support gpu fabric*/
+ fn nvmlDeviceGetGpuFabricInfoV(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpuFabricInfo: *mut cuda_types::nvml::nvmlGpuFabricInfoV_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing System capabilities.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param capabilities System CC capabilities
+
+ @return
+ - \ref NVML_SUCCESS if \a capabilities were successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a capabilities is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlSystemGetConfComputeCapabilities(
+ capabilities: *mut cuda_types::nvml::nvmlConfComputeSystemCaps_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing System State.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param state System CC State
+
+ @return
+ - \ref NVML_SUCCESS if \a state were successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a state is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlSystemGetConfComputeState(
+ state: *mut cuda_types::nvml::nvmlConfComputeSystemState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing Protected and Unprotected Memory Sizes.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param device Device handle
+ @param memInfo Protected/Unprotected Memory sizes
+
+ @return
+ - \ref NVML_SUCCESS if \a memInfo were successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a memInfo or \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlDeviceGetConfComputeMemSizeInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ memInfo: *mut cuda_types::nvml::nvmlConfComputeMemSizeInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing GPUs ready state.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param isAcceptingWork Returns GPU current work accepting state,
+ NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE or
+ NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE
+
+ return
+ - \ref NVML_SUCCESS if \a current GPUs ready state were successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a isAcceptingWork is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlSystemGetConfComputeGpusReadyState(
+ isAcceptingWork: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing protected memory usage.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param device The identifier of the target device
+ @param memory Reference in which to return the memory information
+
+ @return
+ - \ref NVML_SUCCESS if \a memory has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetConfComputeProtectedMemoryUsage(
+ device: cuda_types::nvml::nvmlDevice_t,
+ memory: *mut cuda_types::nvml::nvmlMemory_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing Gpu certificate details.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param device The identifier of the target device
+ @param gpuCert Reference in which to return the gpu certificate information
+
+ @return
+ - \ref NVML_SUCCESS if \a gpu certificate info has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetConfComputeGpuCertificate(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpuCert: *mut cuda_types::nvml::nvmlConfComputeGpuCertificate_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing Gpu attestation report.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param device The identifier of the target device
+ @param gpuAtstReport Reference in which to return the gpu attestation report
+
+ @return
+ - \ref NVML_SUCCESS if \a gpu attestation report has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetConfComputeGpuAttestationReport(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpuAtstReport: *mut cuda_types::nvml::nvmlConfComputeGpuAttestationReport_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing key rotation threshold detail.
+
+ %HOPPER_OR_NEWER%
+ Supported on Linux, Windows TCC.
+
+ @param pKeyRotationThrInfo Reference in which to return the key rotation threshold data
+
+ @return
+ - \ref NVML_SUCCESS if \a gpu key rotation threshold info has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlSystemGetConfComputeKeyRotationThresholdInfo(
+ pKeyRotationThrInfo: *mut cuda_types::nvml::nvmlConfComputeGetKeyRotationThresholdInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing System Settings.
+
+ %HOPPER_OR_NEWER%
+ Supported on Linux, Windows TCC.
+
+ @param settings System CC settings
+
+ @return
+ - \ref NVML_SUCCESS if the query is success
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counters is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH if the provided version is invalid/unsupported
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlSystemGetConfComputeSettings(
+ settings: *mut cuda_types::nvml::nvmlSystemConfComputeSettings_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve GSP firmware version.
+
+ The caller passes in buffer via \a version and corresponding GSP firmware numbered version
+ is returned with the same parameter in string format.
+
+ @param device Device handle
+ @param version The retrieved GSP firmware version
+
+ @return
+ - \ref NVML_SUCCESS if GSP firmware version is sucessfully retrieved
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or GSP \a version pointer is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if GSP firmware is not enabled for GPU
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGspFirmwareVersion(
+ device: cuda_types::nvml::nvmlDevice_t,
+ version: *mut ::core::ffi::c_char,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve GSP firmware mode.
+
+ The caller passes in integer pointers. GSP firmware enablement and default mode information is returned with
+ corresponding parameters. The return value in \a isEnabled and \a defaultMode should be treated as boolean.
+
+ @param device Device handle
+ @param isEnabled Pointer to specify if GSP firmware is enabled
+ @param defaultMode Pointer to specify if GSP firmware is supported by default on \a device
+
+ @return
+ - \ref NVML_SUCCESS if GSP firmware mode is sucessfully retrieved
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or any of \a isEnabled or \a defaultMode is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGspFirmwareMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ isEnabled: *mut ::core::ffi::c_uint,
+ defaultMode: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries the state of per process accounting mode.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ See \ref nvmlDeviceGetAccountingStats for more details.
+ See \ref nvmlDeviceSetAccountingMode
+
+ @param device The identifier of the target device
+ @param mode Reference in which to return the current accounting mode
+
+ @return
+ - \ref NVML_SUCCESS if the mode has been successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode are NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetAccountingMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries process's accounting stats.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ Accounting stats capture GPU utilization and other statistics across the lifetime of a process.
+ Accounting stats can be queried during life time of the process and after its termination.
+ The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and
+ updated to actual running time after its termination.
+ Accounting stats are kept in a circular buffer, newly created processes overwrite information about old
+ processes.
+
+ See \ref nvmlAccountingStats_t for description of each returned metric.
+ List of processes that can be queried can be retrieved from \ref nvmlDeviceGetAccountingPids.
+
+ @note Accounting Mode needs to be on. See \ref nvmlDeviceGetAccountingMode.
+ @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be
+ queried since they don't contribute to GPU utilization.
+ @note In case of pid collision stats of only the latest process (that terminated last) will be reported
+
+ @warning On Kepler devices per process statistics are accurate only if there's one process running on a GPU.
+
+ @param device The identifier of the target device
+ @param pid Process Id of the target process to query stats for
+ @param stats Reference in which to return the process's accounting stats
+
+ @return
+ - \ref NVML_SUCCESS if stats have been successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a stats are NULL
+ - \ref NVML_ERROR_NOT_FOUND if process stats were not found
+ - \ref NVML_ERROR_NOT_SUPPORTED if \a device doesn't support this feature or accounting mode is disabled
+ or on vGPU host.
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetAccountingBufferSize*/
+ fn nvmlDeviceGetAccountingStats(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pid: ::core::ffi::c_uint,
+ stats: *mut cuda_types::nvml::nvmlAccountingStats_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries list of processes that can be queried for accounting stats. The list of processes returned
+ can be in running or terminated state.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ To just query the number of processes ready to be queried, call this function with *count = 0 and
+ pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty.
+
+ For more details see \ref nvmlDeviceGetAccountingStats.
+
+ @note In case of PID collision some processes might not be accessible before the circular buffer is full.
+
+ @param device The identifier of the target device
+ @param count Reference in which to provide the \a pids array size, and
+ to return the number of elements ready to be queried
+ @param pids Reference in which to return list of process ids
+
+ @return
+ - \ref NVML_SUCCESS if pids were successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if \a device doesn't support this feature or accounting mode is disabled
+ or on vGPU host.
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to
+ expected value)
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetAccountingBufferSize*/
+ fn nvmlDeviceGetAccountingPids(
+ device: cuda_types::nvml::nvmlDevice_t,
+ count: *mut ::core::ffi::c_uint,
+ pids: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the number of processes that the circular buffer with accounting pids can hold.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ This is the maximum number of processes that accounting information will be stored for before information
+ about oldest processes will get overwritten by information about new processes.
+
+ @param device The identifier of the target device
+ @param bufferSize Reference in which to provide the size (in number of elements)
+ of the circular buffer for accounting stats.
+
+ @return
+ - \ref NVML_SUCCESS if buffer size was successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a bufferSize is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetAccountingStats
+ @see nvmlDeviceGetAccountingPids*/
+ fn nvmlDeviceGetAccountingBufferSize(
+ device: cuda_types::nvml::nvmlDevice_t,
+ bufferSize: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the list of retired pages by source, including pages that are pending retirement
+ The address information provided from this API is the hardware address of the page that was retired. Note
+ that this does not match the virtual address used in CUDA, but will match the address information in XID 63
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param cause Filter page addresses by cause of retirement
+ @param pageCount Reference in which to provide the \a addresses buffer size, and
+ to return the number of retired pages that match \a cause
+ Set to 0 to query the size without allocating an \a addresses buffer
+ @param addresses Buffer to write the page addresses into
+
+ @return
+ - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the
+ matching page addresses. \a pageCount is set to the needed size.
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or
+ \a addresses is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetRetiredPages(
+ device: cuda_types::nvml::nvmlDevice_t,
+ cause: cuda_types::nvml::nvmlPageRetirementCause_t,
+ pageCount: *mut ::core::ffi::c_uint,
+ addresses: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the list of retired pages by source, including pages that are pending retirement
+ The address information provided from this API is the hardware address of the page that was retired. Note
+ that this does not match the virtual address used in CUDA, but will match the address information in XID 63
+
+ \note nvmlDeviceGetRetiredPages_v2 adds an additional timestamps parameter to return the time of each page's
+ retirement.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param cause Filter page addresses by cause of retirement
+ @param pageCount Reference in which to provide the \a addresses buffer size, and
+ to return the number of retired pages that match \a cause
+ Set to 0 to query the size without allocating an \a addresses buffer
+ @param addresses Buffer to write the page addresses into
+ @param timestamps Buffer to write the timestamps of page retirement, additional for _v2
+
+ @return
+ - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the
+ matching page addresses. \a pageCount is set to the needed size.
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or
+ \a addresses is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetRetiredPages_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ cause: cuda_types::nvml::nvmlPageRetirementCause_t,
+ pageCount: *mut ::core::ffi::c_uint,
+ addresses: *mut ::core::ffi::c_ulonglong,
+ timestamps: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Check if any pages are pending retirement and need a reboot to fully retire.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param isPending Reference in which to return the pending status
+
+ @return
+ - \ref NVML_SUCCESS if \a isPending was populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isPending is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetRetiredPagesPendingStatus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ isPending: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get number of remapped rows. The number of rows reported will be based on
+ the cause of the remapping. isPending indicates whether or not there are
+ pending remappings. A reset will be required to actually remap the row.
+ failureOccurred will be set if a row remapping ever failed in the past. A
+ pending remapping won't affect future work on the GPU since
+ error-containment and dynamic page blacklisting will take care of that.
+
+ @note On MIG-enabled GPUs with active instances, querying the number of
+ remapped rows is not supported
+
+ For Ampere &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param corrRows Reference for number of rows remapped due to correctable errors
+ @param uncRows Reference for number of rows remapped due to uncorrectable errors
+ @param isPending Reference for whether or not remappings are pending
+ @param failureOccurred Reference that is set when a remapping has failed in the past
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a corrRows, \a uncRows, \a isPending or \a failureOccurred is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN Unexpected error*/
+ fn nvmlDeviceGetRemappedRows(
+ device: cuda_types::nvml::nvmlDevice_t,
+ corrRows: *mut ::core::ffi::c_uint,
+ uncRows: *mut ::core::ffi::c_uint,
+ isPending: *mut ::core::ffi::c_uint,
+ failureOccurred: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the row remapper histogram. Returns the remap availability for each bank
+ on the GPU.
+
+ @param device Device handle
+ @param values Histogram values
+
+ @return
+ - \ref NVML_SUCCESS On success
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlDeviceGetRowRemapperHistogram(
+ device: cuda_types::nvml::nvmlDevice_t,
+ values: *mut cuda_types::nvml::nvmlRowRemapperHistogramValues_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get architecture for device
+
+ @param device The identifier of the target device
+ @param arch Reference where architecture is returned, if call successful.
+ Set to NVML_DEVICE_ARCH_* upon success
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a arch (output refererence) are invalid*/
+ fn nvmlDeviceGetArchitecture(
+ device: cuda_types::nvml::nvmlDevice_t,
+ arch: *mut cuda_types::nvml::nvmlDeviceArchitecture_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the frequency monitor fault status for the device.
+
+ For Ampere &tm; or newer fully supported devices.
+ Requires root user.
+
+ See \ref nvmlClkMonStatus_t for details on decoding the status output.
+
+ @param device The identifier of the target device
+ @param status Reference in which to return the clkmon fault status
+
+ @return
+ - \ref NVML_SUCCESS if \a status has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a status is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetClkMonStatus()*/
+ fn nvmlDeviceGetClkMonStatus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ status: *mut cuda_types::nvml::nvmlClkMonStatus_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current utilization and process ID
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running.
+ Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at
+ by \a utilization. One utilization sample structure is returned per process running, that had some non-zero utilization
+ during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values
+ are returned as "unsigned int" values. If no valid sample entries are found since the lastSeenTimeStamp, NVML_ERROR_NOT_FOUND
+ is returned.
+
+ To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with
+ \a utilization set to NULL. The caller should allocate a buffer of size
+ processSamplesCount * sizeof(nvmlProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed
+ in \a utilization, and \a processSamplesCount set to the number of entries the buffer is sized for.
+
+ On successful return, the function updates \a processSamplesCount with the number of process utilization sample
+ structures that were actually written. This may differ from a previously read value as instances are created or
+ destroyed.
+
+ lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0
+ to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp
+ to a timeStamp retrieved from a previous query to read utilization since the previous query.
+
+ @note On MIG-enabled GPUs, querying process utilization is not currently supported.
+
+ @param device The identifier of the target device
+ @param utilization Pointer to caller-supplied buffer in which guest process utilization samples are returned
+ @param processSamplesCount Pointer to caller-supplied array size, and returns number of processes running
+ @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp.
+
+ @return
+ - \ref NVML_SUCCESS if \a utilization has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetProcessUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ utilization: *mut cuda_types::nvml::nvmlProcessUtilizationSample_t,
+ processSamplesCount: *mut ::core::ffi::c_uint,
+ lastSeenTimeStamp: ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the recent utilization and process ID for all running processes
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder, jpeg decoder, OFA (Optical Flow Accelerator)
+ for all running processes. Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at
+ by \a procesesUtilInfo->procUtilArray. One utilization sample structure is returned per process running, that had some non-zero utilization
+ during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values
+ are returned as "unsigned int" values.
+
+ The caller should allocate a buffer of size processSamplesCount * sizeof(nvmlProcessUtilizationInfo_t). If the buffer is too small, the API will
+ return \a NVML_ERROR_INSUFFICIENT_SIZE, with the recommended minimal buffer size at \a procesesUtilInfo->processSamplesCount. The caller should
+ invoke the function again with the allocated buffer passed in \a procesesUtilInfo->procUtilArray, and \a procesesUtilInfo->processSamplesCount
+ set to the number no less than the recommended value by the previous API return.
+
+ On successful return, the function updates \a procesesUtilInfo->processSamplesCount with the number of process utilization info structures
+ that were actually written. This may differ from a previously read value as instances are created or destroyed.
+
+ \a procesesUtilInfo->lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0
+ to read utilization based on all the samples maintained by the driver's internal sample buffer. Set \a procesesUtilInfo->lastSeenTimeStamp
+ to a timeStamp retrieved from a previous query to read utilization since the previous query.
+
+ \a procesesUtilInfo->version is the version number of the structure nvmlProcessesUtilizationInfo_t, the caller should set the correct version
+ number to retrieve the specific version of processes utilization information.
+
+ @note On MIG-enabled GPUs, querying process utilization is not currently supported.
+
+ @param device The identifier of the target device
+ @param procesesUtilInfo Pointer to the caller-provided structure of nvmlProcessesUtilizationInfo_t.
+
+ @return
+ - \ref NVML_SUCCESS if \a procesesUtilInfo->procUtilArray has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a procesesUtilInfo is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_VERSION_MISMATCH if the version of \a procesesUtilInfo is invalid
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a procesesUtilInfo->procUtilArray is NULL, or the buffer size of procesesUtilInfo->procUtilArray is too small.
+ The caller should check the minimul array size from the returned procesesUtilInfo->processSamplesCount, and call
+ the function again with a buffer no smaller than procesesUtilInfo->processSamplesCount * sizeof(nvmlProcessUtilizationInfo_t)
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetProcessesUtilizationInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ procesesUtilInfo: *mut cuda_types::nvml::nvmlProcessesUtilizationInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the LED state for the unit. The LED can be either green (0) or amber (1).
+
+ For S-class products.
+ Requires root/admin permissions.
+
+ This operation takes effect immediately.
+
+
+ <b>Current S-Class products don't provide unique LEDs for each unit. As such, both front
+ and back LEDs will be toggled in unison regardless of which unit is specified with this command.</b>
+
+ See \ref nvmlLedColor_t for available colors.
+
+ @param unit The identifier of the target unit
+ @param color The target LED color
+
+ @return
+ - \ref NVML_SUCCESS if the LED color has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a color is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlUnitGetLedState()*/
+ fn nvmlUnitSetLedState(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ color: cuda_types::nvml::nvmlLedColor_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the persistence mode for the device.
+
+ For all products.
+ For Linux only.
+ Requires root/admin permissions.
+
+ The persistence mode determines whether the GPU driver software is torn down after the last client
+ exits.
+
+ This operation takes effect immediately. It is not persistent across reboots. After each reboot the
+ persistence mode is reset to "Disabled".
+
+ See \ref nvmlEnableState_t for available modes.
+
+ After calling this API with mode set to NVML_FEATURE_DISABLED on a device that has its own NUMA
+ memory, the given device handle will no longer be valid, and to continue to interact with this
+ device, a new handle should be obtained from one of the nvmlDeviceGetHandleBy*() APIs. This
+ limitation is currently only applicable to devices that have a coherent NVLink connection to
+ system memory.
+
+ @param device The identifier of the target device
+ @param mode The target persistence mode
+
+ @return
+ - \ref NVML_SUCCESS if the persistence mode was set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetPersistenceMode()*/
+ fn nvmlDeviceSetPersistenceMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the compute mode for the device.
+
+ For all products.
+ Requires root/admin permissions.
+
+ The compute mode determines whether a GPU can be used for compute operations and whether it can
+ be shared across contexts.
+
+ This operation takes effect immediately. Under Linux it is not persistent across reboots and
+ always resets to "Default". Under windows it is persistent.
+
+ Under windows compute mode may only be set to DEFAULT when running in WDDM
+
+ @note On MIG-enabled GPUs, compute mode would be set to DEFAULT and changing it is not supported.
+
+ See \ref nvmlComputeMode_t for details on available compute modes.
+
+ @param device The identifier of the target device
+ @param mode The target compute mode
+
+ @return
+ - \ref NVML_SUCCESS if the compute mode was set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetComputeMode()*/
+ fn nvmlDeviceSetComputeMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: cuda_types::nvml::nvmlComputeMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the ECC mode for the device.
+
+ For Kepler &tm; or newer fully supported devices.
+ Only applicable to devices with ECC.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher.
+ Requires root/admin permissions.
+
+ The ECC mode determines whether the GPU enables its ECC support.
+
+ This operation takes effect after the next reboot.
+
+ See \ref nvmlEnableState_t for details on available modes.
+
+ @param device The identifier of the target device
+ @param ecc The target ECC mode
+
+ @return
+ - \ref NVML_SUCCESS if the ECC mode was set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a ecc is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetEccMode()*/
+ fn nvmlDeviceSetEccMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ecc: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Clear the ECC error and other memory error counts for the device.
+
+ For Kepler &tm; or newer fully supported devices.
+ Only applicable to devices with ECC.
+ Requires \a NVML_INFOROM_ECC version 2.0 or higher to clear aggregate location-based ECC counts.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher to clear all other ECC counts.
+ Requires root/admin permissions.
+ Requires ECC Mode to be enabled.
+
+ Sets all of the specified ECC counters to 0, including both detailed and total counts.
+
+ This operation takes effect immediately.
+
+ See \ref nvmlMemoryErrorType_t for details on available counter types.
+
+ @param device The identifier of the target device
+ @param counterType Flag that indicates which type of errors should be cleared.
+
+ @return
+ - \ref NVML_SUCCESS if the error counts were cleared
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counterType is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see
+ - nvmlDeviceGetDetailedEccErrors()
+ - nvmlDeviceGetTotalEccErrors()*/
+ fn nvmlDeviceClearEccErrorCounts(
+ device: cuda_types::nvml::nvmlDevice_t,
+ counterType: cuda_types::nvml::nvmlEccCounterType_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the driver model for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+ For windows only.
+ Requires root/admin permissions.
+
+ On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached
+ to the device it must run in WDDM mode.
+
+ It is possible to force the change to WDM (TCC) while the display is still attached with a force flag (nvmlFlagForce).
+ This should only be done if the host is subsequently powered down and the display is detached from the device
+ before the next reboot.
+
+ This operation takes effect after the next reboot.
+
+ Windows driver model may only be set to WDDM when running in DEFAULT compute mode.
+
+ Change driver model to WDDM is not supported when GPU doesn't support graphics acceleration or
+ will not support it after reboot. See \ref nvmlDeviceSetGpuOperationMode.
+
+ See \ref nvmlDriverModel_t for details on available driver models.
+ See \ref nvmlFlagDefault and \ref nvmlFlagForce
+
+ @param device The identifier of the target device
+ @param driverModel The target driver model
+ @param flags Flags that change the default behavior
+
+ @return
+ - \ref NVML_SUCCESS if the driver model has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a driverModel is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows or the device does not support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetDriverModel()*/
+ fn nvmlDeviceSetDriverModel(
+ device: cuda_types::nvml::nvmlDevice_t,
+ driverModel: cuda_types::nvml::nvmlDriverModel_t,
+ flags: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set clocks that device will lock to.
+
+ Sets the clocks that the device will be running at to the value in the range of minGpuClockMHz to maxGpuClockMHz.
+ Setting this will supersede application clock values and take effect regardless if a cuda app is running.
+ See /ref nvmlDeviceSetApplicationsClocks
+
+ Can be used as a setting to request constant performance.
+
+ This can be called with a pair of integer clock frequencies in MHz, or a pair of /ref nvmlClockLimitId_t values.
+ See the table below for valid combinations of these values.
+
+ minGpuClock | maxGpuClock | Effect
+ ------------+-------------+--------------------------------------------------
+ tdp | tdp | Lock clock to TDP
+ unlimited | tdp | Upper bound is TDP but clock may drift below this
+ tdp | unlimited | Lower bound is TDP but clock may boost above this
+ unlimited | unlimited | Unlocked (== nvmlDeviceResetGpuLockedClocks)
+
+ If one arg takes one of these values, the other must be one of these values as
+ well. Mixed numeric and symbolic calls return NVML_ERROR_INVALID_ARGUMENT.
+
+ Requires root/admin permissions.
+
+ After system reboot or driver reload applications clocks go back to their default value.
+ See \ref nvmlDeviceResetGpuLockedClocks.
+
+ For Volta &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param minGpuClockMHz Requested minimum gpu clock in MHz
+ @param maxGpuClockMHz Requested maximum gpu clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if new settings were successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minGpuClockMHz and \a maxGpuClockMHz
+ is not a valid clock combination
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetGpuLockedClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minGpuClockMHz: ::core::ffi::c_uint,
+ maxGpuClockMHz: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Resets the gpu clock to the default value
+
+ This is the gpu clock that will be used after system reboot or driver reload.
+ Default values are idle clocks, but the current values can be changed using \ref nvmlDeviceSetApplicationsClocks.
+
+ @see nvmlDeviceSetGpuLockedClocks
+
+ For Volta &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if new settings were successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceResetGpuLockedClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set memory clocks that device will lock to.
+
+ Sets the device's memory clocks to the value in the range of minMemClockMHz to maxMemClockMHz.
+ Setting this will supersede application clock values and take effect regardless of whether a cuda app is running.
+ See /ref nvmlDeviceSetApplicationsClocks
+
+ Can be used as a setting to request constant performance.
+
+ Requires root/admin permissions.
+
+ After system reboot or driver reload applications clocks go back to their default value.
+ See \ref nvmlDeviceResetMemoryLockedClocks.
+
+ For Ampere &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param minMemClockMHz Requested minimum memory clock in MHz
+ @param maxMemClockMHz Requested maximum memory clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if new settings were successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minGpuClockMHz and \a maxGpuClockMHz
+ is not a valid clock combination
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetMemoryLockedClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minMemClockMHz: ::core::ffi::c_uint,
+ maxMemClockMHz: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Resets the memory clock to the default value
+
+ This is the memory clock that will be used after system reboot or driver reload.
+ Default values are idle clocks, but the current values can be changed using \ref nvmlDeviceSetApplicationsClocks.
+
+ @see nvmlDeviceSetMemoryLockedClocks
+
+ For Ampere &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if new settings were successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceResetMemoryLockedClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set clocks that applications will lock to.
+
+ Sets the clocks that compute and graphics applications will be running at.
+ e.g. CUDA driver requests these clocks during context creation which means this property
+ defines clocks at which CUDA applications will be running unless some overspec event
+ occurs (e.g. over power, over thermal or external HW brake).
+
+ Can be used as a setting to request constant performance.
+
+ On Pascal and newer hardware, this will automatically disable automatic boosting of clocks.
+
+ On K80 and newer Kepler and Maxwell GPUs, users desiring fixed performance should also call
+ \ref nvmlDeviceSetAutoBoostedClocksEnabled to prevent clocks from automatically boosting
+ above the clock value being set.
+
+ For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices.
+ Requires root/admin permissions.
+
+ See \ref nvmlDeviceGetSupportedMemoryClocks and \ref nvmlDeviceGetSupportedGraphicsClocks
+ for details on how to list available clocks combinations.
+
+ After system reboot or driver reload applications clocks go back to their default value.
+ See \ref nvmlDeviceResetApplicationsClocks.
+
+ @param device The identifier of the target device
+ @param memClockMHz Requested memory clock in MHz
+ @param graphicsClockMHz Requested graphics clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if new settings were successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memClockMHz and \a graphicsClockMHz
+ is not a valid clock combination
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetApplicationsClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ memClockMHz: ::core::ffi::c_uint,
+ graphicsClockMHz: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Resets the application clock to the default value
+
+ This is the applications clock that will be used after system reboot or driver reload.
+ Default value is constant, but the current value an be changed using \ref nvmlDeviceSetApplicationsClocks.
+
+ On Pascal and newer hardware, if clocks were previously locked with \ref nvmlDeviceSetApplicationsClocks,
+ this call will unlock clocks. This returns clocks their default behavior ofautomatically boosting above
+ base clocks as thermal limits allow.
+
+ @see nvmlDeviceGetApplicationsClock
+ @see nvmlDeviceSetApplicationsClocks
+
+ For Fermi &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices.
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if new settings were successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceResetApplicationsClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Try to set the current state of Auto Boosted clocks on a device.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates
+ to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock
+ rates are desired.
+
+ Non-root users may use this API by default but can be restricted by root from using this API by calling
+ \ref nvmlDeviceSetAPIRestriction with apiType=NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS.
+ Note: Persistence Mode is required to modify current Auto Boost settings, therefore, it must be enabled.
+
+ On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks.
+ Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost
+ behavior.
+
+ @param device The identifier of the target device
+ @param enabled What state to try to set Auto Boosted clocks of the target device to
+
+ @return
+ - \ref NVML_SUCCESS If the Auto Boosted clocks were successfully set to the state specified by \a enabled
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+*/
+ fn nvmlDeviceSetAutoBoostedClocksEnabled(
+ device: cuda_types::nvml::nvmlDevice_t,
+ enabled: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will
+ return to when no compute running processes (e.g. CUDA application which have an active context) are running
+
+ For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices.
+ Requires root/admin permissions.
+
+ Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates
+ to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock
+ rates are desired.
+
+ On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks.
+ Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost
+ behavior.
+
+ @param device The identifier of the target device
+ @param enabled What state to try to set default Auto Boosted clocks of the target device to
+ @param flags Flags that change the default behavior. Currently Unused.
+
+ @return
+ - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state.
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+*/
+ fn nvmlDeviceSetDefaultAutoBoostedClocksEnabled(
+ device: cuda_types::nvml::nvmlDevice_t,
+ enabled: cuda_types::nvml::nvmlEnableState_t,
+ flags: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets the speed of the fan control policy to default.
+
+ For all cuda-capable discrete products with fans
+
+ @param device The identifier of the target device
+ @param fan The index of the fan, starting at zero
+
+ return
+ NVML_SUCCESS if speed has been adjusted
+ NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ NVML_ERROR_INVALID_ARGUMENT if device is invalid
+ NVML_ERROR_NOT_SUPPORTED if the device does not support this
+ (doesn't have fans)
+ NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetDefaultFanSpeed_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fan: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets current fan control policy.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ Requires privileged user.
+
+ For all cuda-capable discrete products with fans
+
+ device The identifier of the target \a device
+ policy The fan control \a policy to set
+
+ return
+ NVML_SUCCESS if \a policy has been set
+ NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a policy is null or the \a fan given doesn't reference
+ a fan that exists.
+ NVML_ERROR_NOT_SUPPORTED if the \a device is older than Maxwell
+ NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetFanControlPolicy(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fan: ::core::ffi::c_uint,
+ policy: cuda_types::nvml::nvmlFanControlPolicy_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets the temperature threshold for the GPU with the specified threshold type in degrees C.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds.
+
+ @param device The identifier of the target device
+ @param thresholdType The type of threshold value to be set
+ @param temp Reference which hold the value to be set
+ @return
+ - \ref NVML_SUCCESS if \a temp has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetTemperatureThreshold(
+ device: cuda_types::nvml::nvmlDevice_t,
+ thresholdType: cuda_types::nvml::nvmlTemperatureThresholds_t,
+ temp: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set new power limit of this device.
+
+ For Kepler &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values.
+
+ \note Limit is not persistent across reboots or driver unloads.
+ Enable persistent mode to prevent driver from unloading when no application is using the device.
+
+ @param device The identifier of the target device
+ @param limit Power management limit in milliwatts to set
+
+ @return
+ - \ref NVML_SUCCESS if \a limit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is out of range
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetPowerManagementLimitConstraints
+ @see nvmlDeviceGetPowerManagementDefaultLimit*/
+ fn nvmlDeviceSetPowerManagementLimit(
+ device: cuda_types::nvml::nvmlDevice_t,
+ limit: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets new GOM. See \a nvmlGpuOperationMode_t for details.
+
+ For GK110 M-class and X-class Tesla &tm; products from the Kepler family.
+ Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products.
+ Not supported on Quadro &reg; and Tesla &tm; C-class products.
+ Requires root/admin permissions.
+
+ Changing GOMs requires a reboot.
+ The reboot requirement might be removed in the future.
+
+ Compute only GOMs don't support graphics acceleration. Under windows switching to these GOMs when
+ pending driver model is WDDM is not supported. See \ref nvmlDeviceSetDriverModel.
+
+ @param device The identifier of the target device
+ @param mode Target GOM
+
+ @return
+ - \ref NVML_SUCCESS if \a mode has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode incorrect
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support GOM or specific mode
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlGpuOperationMode_t
+ @see nvmlDeviceGetGpuOperationMode*/
+ fn nvmlDeviceSetGpuOperationMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: cuda_types::nvml::nvmlGpuOperationMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Changes the root/admin restructions on certain APIs. See \a nvmlRestrictedAPI_t for the list of supported APIs.
+ This method can be used by a root/admin user to give non-root/admin access to certain otherwise-restricted APIs.
+ The new setting lasts for the lifetime of the NVIDIA driver; it is not persistent. See \a nvmlDeviceGetAPIRestriction
+ to query the current restriction settings.
+
+ For Kepler &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ @param device The identifier of the target device
+ @param apiType Target API type for this operation
+ @param isRestricted The target restriction
+
+ @return
+ - \ref NVML_SUCCESS if \a isRestricted has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a apiType incorrect
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support changing API restrictions or the device does not support
+ the feature that api restrictions are being set for (E.G. Enabling/disabling auto
+ boosted clocks is not supported by the device)
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlRestrictedAPI_t*/
+ fn nvmlDeviceSetAPIRestriction(
+ device: cuda_types::nvml::nvmlDevice_t,
+ apiType: cuda_types::nvml::nvmlRestrictedAPI_t,
+ isRestricted: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets the speed of a specified fan.
+
+ WARNING: This function changes the fan control policy to manual. It means that YOU have to monitor
+ the temperature and adjust the fan speed accordingly.
+ If you set the fan speed too low you can burn your GPU!
+ Use nvmlDeviceSetDefaultFanSpeed_v2 to restore default control policy.
+
+ For all cuda-capable discrete products with fans that are Maxwell or Newer.
+
+ device The identifier of the target device
+ fan The index of the fan, starting at zero
+ speed The target speed of the fan [0-100] in % of max speed
+
+ return
+ NVML_SUCCESS if the fan speed has been set
+ NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ NVML_ERROR_INVALID_ARGUMENT if the device is not valid, or the speed is outside acceptable ranges,
+ or if the fan index doesn't reference an actual fan.
+ NVML_ERROR_NOT_SUPPORTED if the device is older than Maxwell.
+ NVML_ERROR_UNKNOWN if there was an unexpected error.*/
+ fn nvmlDeviceSetFanSpeed_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fan: ::core::ffi::c_uint,
+ speed: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the GPCCLK VF offset value
+ @param[in] device The identifier of the target device
+ @param[in] offset The GPCCLK VF offset value to set
+
+ @return
+ - \ref NVML_SUCCESS if \a offset has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetGpcClkVfOffset(
+ device: cuda_types::nvml::nvmlDevice_t,
+ offset: ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the MemClk (Memory Clock) VF offset value. It requires elevated privileges.
+ @param[in] device The identifier of the target device
+ @param[in] offset The MemClk VF offset value to set
+
+ @return
+ - \ref NVML_SUCCESS if \a offset has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetMemClkVfOffset(
+ device: cuda_types::nvml::nvmlDevice_t,
+ offset: ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set Conf Computing Unprotected Memory Size.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param device Device Handle
+ @param sizeKiB Unprotected Memory size to be set in KiB
+
+ @return
+ - \ref NVML_SUCCESS if \a sizeKiB successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlDeviceSetConfComputeUnprotectedMemSize(
+ device: cuda_types::nvml::nvmlDevice_t,
+ sizeKiB: ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set Conf Computing GPUs ready state.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param isAcceptingWork GPU accepting new work, NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE or
+ NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE
+
+ return
+ - \ref NVML_SUCCESS if \a current GPUs ready state is successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a isAcceptingWork is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlSystemSetConfComputeGpusReadyState(
+ isAcceptingWork: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set Conf Computing key rotation threshold.
+
+ %HOPPER_OR_NEWER%
+ Supported on Linux, Windows TCC.
+
+ This function is to set the confidential compute key rotation threshold parameters.
+ @ref pKeyRotationThrInfo->maxAttackerAdvantage should be in the range from
+ NVML_CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MIN to NVML_CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MAX.
+ Default value is 60.
+
+ @param pKeyRotationThrInfo Reference to the key rotation threshold data
+
+ @return
+ - \ref NVML_SUCCESS if \a key rotation threashold max attacker advantage has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL
+ - \ref NVML_ERROR_INVALID_STATE if confidential compute GPU ready state is enabled
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlSystemSetConfComputeKeyRotationThresholdInfo(
+ pKeyRotationThrInfo: *mut cuda_types::nvml::nvmlConfComputeSetKeyRotationThresholdInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Enables or disables per process accounting.
+
+ For Kepler &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ @note This setting is not persistent and will default to disabled after driver unloads.
+ Enable persistence mode to be sure the setting doesn't switch off to disabled.
+
+ @note Enabling accounting mode has no negative impact on the GPU performance.
+
+ @note Disabling accounting clears all accounting pids information.
+
+ @note On MIG-enabled GPUs, accounting mode would be set to DISABLED and changing it is not supported.
+
+ See \ref nvmlDeviceGetAccountingMode
+ See \ref nvmlDeviceGetAccountingStats
+ See \ref nvmlDeviceClearAccountingPids
+
+ @param device The identifier of the target device
+ @param mode The target accounting mode
+
+ @return
+ - \ref NVML_SUCCESS if the new mode has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetAccountingMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Clears accounting information about all processes that have already terminated.
+
+ For Kepler &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ See \ref nvmlDeviceGetAccountingMode
+ See \ref nvmlDeviceGetAccountingStats
+ See \ref nvmlDeviceSetAccountingMode
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if accounting information has been cleared
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceClearAccountingPids(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the state of the device's NvLink for the link specified
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param isActive \a nvmlEnableState_t where NVML_FEATURE_ENABLED indicates that
+ the link is active and NVML_FEATURE_DISABLED indicates it
+ is inactive
+
+ @return
+ - \ref NVML_SUCCESS if \a isActive has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a isActive is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkState(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ isActive: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the version of the device's NvLink for the link specified
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param version Requested NvLink version
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a version is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkVersion(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ version: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the requested capability from the device's NvLink for the link specified
+ Please refer to the \a nvmlNvLinkCapability_t structure for the specific caps that can be queried
+ The return value should be treated as a boolean.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param capability Specifies the \a nvmlNvLinkCapability_t to be queried
+ @param capResult A boolean for the queried capability indicating that feature is available
+
+ @return
+ - \ref NVML_SUCCESS if \a capResult has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a capability is invalid or \a capResult is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkCapability(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ capability: cuda_types::nvml::nvmlNvLinkCapability_t,
+ capResult: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the PCI information for the remote node on a NvLink link
+ Note: pciSubSystemId is not filled in this function and is indeterminate
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param pci \a nvmlPciInfo_t of the remote node for the specified link
+
+ @return
+ - \ref NVML_SUCCESS if \a pci has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a pci is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkRemotePciInfo_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ pci: *mut cuda_types::nvml::nvmlPciInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the specified error counter value
+ Please refer to \a nvmlNvLinkErrorCounter_t for error counters that are available
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param counter Specifies the NvLink counter to be queried
+ @param counterValue Returned counter value
+
+ @return
+ - \ref NVML_SUCCESS if \a counter has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid or \a counterValue is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkErrorCounter(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ counter: cuda_types::nvml::nvmlNvLinkErrorCounter_t,
+ counterValue: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Resets all error counters to zero
+ Please refer to \a nvmlNvLinkErrorCounter_t for the list of error counters that are reset
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+
+ @return
+ - \ref NVML_SUCCESS if the reset is successful
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceResetNvLinkErrorCounters(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Setting utilization counter control is no longer supported.
+
+ Set the NVLINK utilization counter control information for the specified counter, 0 or 1.
+ Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition. Performs a reset
+ of the counters if the reset parameter is non-zero.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param counter Specifies the counter that should be set (0 or 1).
+ @param link Specifies the NvLink link to be queried
+ @param control A reference to the \a nvmlNvLinkUtilizationControl_t to set
+ @param reset Resets the counters on set if non-zero
+
+ @return
+ - \ref NVML_SUCCESS if the control has been set successfully
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetNvLinkUtilizationControl(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ counter: ::core::ffi::c_uint,
+ control: *mut cuda_types::nvml::nvmlNvLinkUtilizationControl_t,
+ reset: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Getting utilization counter control is no longer supported.
+
+ Get the NVLINK utilization counter control information for the specified counter, 0 or 1.
+ Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param counter Specifies the counter that should be set (0 or 1).
+ @param link Specifies the NvLink link to be queried
+ @param control A reference to the \a nvmlNvLinkUtilizationControl_t to place information
+
+ @return
+ - \ref NVML_SUCCESS if the control has been set successfully
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkUtilizationControl(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ counter: ::core::ffi::c_uint,
+ control: *mut cuda_types::nvml::nvmlNvLinkUtilizationControl_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Use \ref nvmlDeviceGetFieldValues with NVML_FI_DEV_NVLINK_THROUGHPUT_* as field values instead.
+
+ Retrieve the NVLINK utilization counter based on the current control for a specified counter.
+ In general it is good practice to use \a nvmlDeviceSetNvLinkUtilizationControl
+ before reading the utilization counters as they have no default state
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param counter Specifies the counter that should be read (0 or 1).
+ @param rxcounter Receive counter return value
+ @param txcounter Transmit counter return value
+
+ @return
+ - \ref NVML_SUCCESS if \a rxcounter and \a txcounter have been successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, or \a link is invalid or \a rxcounter or \a txcounter are NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkUtilizationCounter(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ counter: ::core::ffi::c_uint,
+ rxcounter: *mut ::core::ffi::c_ulonglong,
+ txcounter: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Freezing NVLINK utilization counters is no longer supported.
+
+ Freeze the NVLINK utilization counters
+ Both the receive and transmit counters are operated on by this function
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param counter Specifies the counter that should be frozen (0 or 1).
+ @param freeze NVML_FEATURE_ENABLED = freeze the receive and transmit counters
+ NVML_FEATURE_DISABLED = unfreeze the receive and transmit counters
+
+ @return
+ - \ref NVML_SUCCESS if counters were successfully frozen or unfrozen
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, \a counter, or \a freeze is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceFreezeNvLinkUtilizationCounter(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ counter: ::core::ffi::c_uint,
+ freeze: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Resetting NVLINK utilization counters is no longer supported.
+
+ Reset the NVLINK utilization counters
+ Both the receive and transmit counters are operated on by this function
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be reset
+ @param counter Specifies the counter that should be reset (0 or 1)
+
+ @return
+ - \ref NVML_SUCCESS if counters were successfully reset
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceResetNvLinkUtilizationCounter(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ counter: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the NVLink device type of the remote device connected over the given link.
+
+ @param device The device handle of the target GPU
+ @param link The NVLink link index on the target GPU
+ @param pNvLinkDeviceType Pointer in which the output remote device type is returned
+
+ @return
+ - \ref NVML_SUCCESS if \a pNvLinkDeviceType has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NOT_SUPPORTED if NVLink is not supported
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid, or
+ \a pNvLinkDeviceType is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is
+ otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkRemoteDeviceType(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ pNvLinkDeviceType: *mut cuda_types::nvml::nvmlIntNvLinkDeviceType_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Create an empty set of events.
+ Event set should be freed by \ref nvmlEventSetFree
+
+ For Fermi &tm; or newer fully supported devices.
+ @param set Reference in which to return the event handle
+
+ @return
+ - \ref NVML_SUCCESS if the event has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a set is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlEventSetFree*/
+ fn nvmlEventSetCreate(
+ set: *mut cuda_types::nvml::nvmlEventSet_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Starts recording of events on a specified devices and add the events to specified \ref nvmlEventSet_t
+
+ For Fermi &tm; or newer fully supported devices.
+ Ecc events are available only on ECC enabled devices (see \ref nvmlDeviceGetTotalEccErrors)
+ Power capping events are available only on Power Management enabled devices (see \ref nvmlDeviceGetPowerManagementMode)
+
+ For Linux only.
+
+ \b IMPORTANT: Operations on \a set are not thread safe
+
+ This call starts recording of events on specific device.
+ All events that occurred before this call are not recorded.
+ Checking if some event occurred can be done with \ref nvmlEventSetWait_v2
+
+ If function reports NVML_ERROR_UNKNOWN, event set is in undefined state and should be freed.
+ If function reports NVML_ERROR_NOT_SUPPORTED, event set can still be used. None of the requested eventTypes
+ are registered in that case.
+
+ @param device The identifier of the target device
+ @param eventTypes Bitmask of \ref nvmlEventType to record
+ @param set Set to which add new event types
+
+ @return
+ - \ref NVML_SUCCESS if the event has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventTypes is invalid or \a set is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the platform does not support this feature or some of requested event types
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlEventType
+ @see nvmlDeviceGetSupportedEventTypes
+ @see nvmlEventSetWait
+ @see nvmlEventSetFree*/
+ fn nvmlDeviceRegisterEvents(
+ device: cuda_types::nvml::nvmlDevice_t,
+ eventTypes: ::core::ffi::c_ulonglong,
+ set: cuda_types::nvml::nvmlEventSet_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns information about events supported on device
+
+ For Fermi &tm; or newer fully supported devices.
+
+ Events are not supported on Windows. So this function returns an empty mask in \a eventTypes on Windows.
+
+ @param device The identifier of the target device
+ @param eventTypes Reference in which to return bitmask of supported events
+
+ @return
+ - \ref NVML_SUCCESS if the eventTypes has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventType is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlEventType
+ @see nvmlDeviceRegisterEvents*/
+ fn nvmlDeviceGetSupportedEventTypes(
+ device: cuda_types::nvml::nvmlDevice_t,
+ eventTypes: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Waits on events and delivers events
+
+ For Fermi &tm; or newer fully supported devices.
+
+ If some events are ready to be delivered at the time of the call, function returns immediately.
+ If there are no events ready to be delivered, function sleeps till event arrives
+ but not longer than specified timeout. This function in certain conditions can return before
+ specified timeout passes (e.g. when interrupt arrives)
+
+ On Windows, in case of xid error, the function returns the most recent xid error type seen by the system.
+ If there are multiple xid errors generated before nvmlEventSetWait is invoked then the last seen xid error
+ type is returned for all xid error events.
+
+ On Linux, every xid error event would return the associated event data and other information if applicable.
+
+ In MIG mode, if device handle is provided, the API reports all the events for the available instances,
+ only if the caller has appropriate privileges. In absence of required privileges, only the events which
+ affect all the instances (i.e. whole device) are reported.
+
+ This API does not currently support per-instance event reporting using MIG device handles.
+
+ @param set Reference to set of events to wait on
+ @param data Reference in which to return event data
+ @param timeoutms Maximum amount of wait time in milliseconds for registered event
+
+ @return
+ - \ref NVML_SUCCESS if the data has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a data is NULL
+ - \ref NVML_ERROR_TIMEOUT if no event arrived in specified timeout or interrupt arrived
+ - \ref NVML_ERROR_GPU_IS_LOST if a GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlEventType
+ @see nvmlDeviceRegisterEvents*/
+ fn nvmlEventSetWait_v2(
+ set: cuda_types::nvml::nvmlEventSet_t,
+ data: *mut cuda_types::nvml::nvmlEventData_t,
+ timeoutms: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Releases events in the set
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param set Reference to events to be released
+
+ @return
+ - \ref NVML_SUCCESS if the event has been successfully released
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceRegisterEvents*/
+ fn nvmlEventSetFree(
+ set: cuda_types::nvml::nvmlEventSet_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Modify the drain state of a GPU. This method forces a GPU to no longer accept new incoming requests.
+ Any new NVML process will no longer see this GPU. Persistence mode for this GPU must be turned off before
+ this call is made.
+ Must be called as administrator.
+ For Linux only.
+
+ For Pascal &tm; or newer fully supported devices.
+ Some Kepler devices supported.
+
+ @param pciInfo The PCI address of the GPU drain state to be modified
+ @param newState The drain state that should be entered, see \ref nvmlEnableState_t
+
+ @return
+ - \ref NVML_SUCCESS if counters were successfully reset
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a newState is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation
+ - \ref NVML_ERROR_IN_USE if the device has persistence mode turned on
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceModifyDrainState(
+ pciInfo: *mut cuda_types::nvml::nvmlPciInfo_t,
+ newState: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Query the drain state of a GPU. This method is used to check if a GPU is in a currently draining
+ state.
+ For Linux only.
+
+ For Pascal &tm; or newer fully supported devices.
+ Some Kepler devices supported.
+
+ @param pciInfo The PCI address of the GPU drain state to be queried
+ @param currentState The current drain state for this GPU, see \ref nvmlEnableState_t
+
+ @return
+ - \ref NVML_SUCCESS if counters were successfully reset
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a currentState is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceQueryDrainState(
+ pciInfo: *mut cuda_types::nvml::nvmlPciInfo_t,
+ currentState: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** This method will remove the specified GPU from the view of both NVML and the NVIDIA kernel driver
+ as long as no other processes are attached. If other processes are attached, this call will return
+ NVML_ERROR_IN_USE and the GPU will be returned to its original "draining" state. Note: the
+ only situation where a process can still be attached after nvmlDeviceModifyDrainState() is called
+ to initiate the draining state is if that process was using, and is still using, a GPU before the
+ call was made. Also note, persistence mode counts as an attachment to the GPU thus it must be disabled
+ prior to this call.
+
+ For long-running NVML processes please note that this will change the enumeration of current GPUs.
+ For example, if there are four GPUs present and GPU1 is removed, the new enumeration will be 0-2.
+ Also, device handles after the removed GPU will not be valid and must be re-established.
+ Must be run as administrator.
+ For Linux only.
+
+ For Pascal &tm; or newer fully supported devices.
+ Some Kepler devices supported.
+
+ @param pciInfo The PCI address of the GPU to be removed
+ @param gpuState Whether the GPU is to be removed, from the OS
+ see \ref nvmlDetachGpuState_t
+ @param linkState Requested upstream PCIe link state, see \ref nvmlPcieLinkState_t
+
+ @return
+ - \ref NVML_SUCCESS if counters were successfully reset
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_IN_USE if the device is still in use and cannot be removed*/
+ fn nvmlDeviceRemoveGpu_v2(
+ pciInfo: *mut cuda_types::nvml::nvmlPciInfo_t,
+ gpuState: cuda_types::nvml::nvmlDetachGpuState_t,
+ linkState: cuda_types::nvml::nvmlPcieLinkState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Request the OS and the NVIDIA kernel driver to rediscover a portion of the PCI subsystem looking for GPUs that
+ were previously removed. The portion of the PCI tree can be narrowed by specifying a domain, bus, and device.
+ If all are zeroes then the entire PCI tree will be searched. Please note that for long-running NVML processes
+ the enumeration will change based on how many GPUs are discovered and where they are inserted in bus order.
+
+ In addition, all newly discovered GPUs will be initialized and their ECC scrubbed which may take several seconds
+ per GPU. Also, all device handles are no longer guaranteed to be valid post discovery.
+
+ Must be run as administrator.
+ For Linux only.
+
+ For Pascal &tm; or newer fully supported devices.
+ Some Kepler devices supported.
+
+ @param pciInfo The PCI tree to be searched. Only the domain, bus, and device
+ fields are used in this call.
+
+ @return
+ - \ref NVML_SUCCESS if counters were successfully reset
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciInfo is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the operating system does not support this feature
+ - \ref NVML_ERROR_OPERATING_SYSTEM if the operating system is denying this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceDiscoverGpus(
+ pciInfo: *mut cuda_types::nvml::nvmlPciInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Request values for a list of fields for a device. This API allows multiple fields to be queried at once.
+ If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs
+ will be populated from a single call rather than making a driver call for each fieldId.
+
+ @param device The device handle of the GPU to request field values for
+ @param valuesCount Number of entries in values that should be retrieved
+ @param values Array of \a valuesCount structures to hold field values.
+ Each value's fieldId must be populated prior to this call
+
+ @return
+ - \ref NVML_SUCCESS if any values in \a values were populated. Note that you must
+ check the nvmlReturn field of each value for each individual
+ status
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a values is NULL*/
+ fn nvmlDeviceGetFieldValues(
+ device: cuda_types::nvml::nvmlDevice_t,
+ valuesCount: ::core::ffi::c_int,
+ values: *mut cuda_types::nvml::nvmlFieldValue_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Clear values for a list of fields for a device. This API allows multiple fields to be cleared at once.
+
+ @param device The device handle of the GPU to request field values for
+ @param valuesCount Number of entries in values that should be cleared
+ @param values Array of \a valuesCount structures to hold field values.
+ Each value's fieldId must be populated prior to this call
+
+ @return
+ - \ref NVML_SUCCESS if any values in \a values were cleared. Note that you must
+ check the nvmlReturn field of each value for each individual
+ status
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a values is NULL*/
+ fn nvmlDeviceClearFieldValues(
+ device: cuda_types::nvml::nvmlDevice_t,
+ valuesCount: ::core::ffi::c_int,
+ values: *mut cuda_types::nvml::nvmlFieldValue_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** This method is used to get the virtualization mode corresponding to the GPU.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device Identifier of the target device
+ @param pVirtualMode Reference to virtualization mode. One of NVML_GPU_VIRTUALIZATION_?
+
+ @return
+ - \ref NVML_SUCCESS if \a pVirtualMode is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVirtualizationMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pVirtualMode: *mut cuda_types::nvml::nvmlGpuVirtualizationMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries if SR-IOV host operation is supported on a vGPU supported device.
+
+ Checks whether SR-IOV host capability is supported by the device and the
+ driver, and indicates device is in SR-IOV mode if both of these conditions
+ are true.
+
+ @param device The identifier of the target device
+ @param pHostVgpuMode Reference in which to return the current vGPU mode
+
+ @return
+ - \ref NVML_SUCCESS if device's vGPU mode has been successfully retrieved
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device handle is 0 or \a pVgpuMode is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if \a device doesn't support this feature.
+ - \ref NVML_ERROR_UNKNOWN if any unexpected error occurred*/
+ fn nvmlDeviceGetHostVgpuMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pHostVgpuMode: *mut cuda_types::nvml::nvmlHostVgpuMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** This method is used to set the virtualization mode corresponding to the GPU.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device Identifier of the target device
+ @param virtualMode virtualization mode. One of NVML_GPU_VIRTUALIZATION_?
+
+ @return
+ - \ref NVML_SUCCESS if \a virtualMode is set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a virtualMode is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_SUPPORTED if setting of virtualization mode is not supported.
+ - \ref NVML_ERROR_NO_PERMISSION if setting of virtualization mode is not allowed for this client.*/
+ fn nvmlDeviceSetVirtualizationMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ virtualMode: cuda_types::nvml::nvmlGpuVirtualizationMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the vGPU heterogeneous mode for the device.
+
+ When in heterogeneous mode, a vGPU can concurrently host timesliced vGPUs with differing framebuffer sizes.
+
+ On successful return, the function returns \a pHeterogeneousMode->mode with the current vGPU heterogeneous mode.
+ \a pHeterogeneousMode->version is the version number of the structure nvmlVgpuHeterogeneousMode_t, the caller should
+ set the correct version number to retrieve the vGPU heterogeneous mode.
+ \a pHeterogeneousMode->mode can either be \ref NVML_FEATURE_ENABLED or \ref NVML_FEATURE_DISABLED.
+
+ @param device The identifier of the target device
+ @param pHeterogeneousMode Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid or \a pHeterogeneousMode is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support this feature
+ - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pHeterogeneousMode is invalid
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlDeviceGetVgpuHeterogeneousMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pHeterogeneousMode: *mut cuda_types::nvml::nvmlVgpuHeterogeneousMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Enable or disable vGPU heterogeneous mode for the device.
+
+ When in heterogeneous mode, a vGPU can concurrently host timesliced vGPUs with differing framebuffer sizes.
+
+ API would return an appropriate error code upon unsuccessful activation. For example, the heterogeneous mode
+ set will fail with error \ref NVML_ERROR_IN_USE if any vGPU instance is active on the device. The caller of this API
+ is expected to shutdown the vGPU VMs and retry setting the \a mode.
+ On successful return, the function updates the vGPU heterogeneous mode with the user provided \a pHeterogeneousMode->mode.
+ \a pHeterogeneousMode->version is the version number of the structure nvmlVgpuHeterogeneousMode_t, the caller should
+ set the correct version number to set the vGPU heterogeneous mode.
+
+ @param device Identifier of the target device
+ @param pHeterogeneousMode Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a pHeterogeneousMode is NULL or \a pHeterogeneousMode->mode is invalid
+ - \ref NVML_ERROR_IN_USE If the \a device is in use
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or \a device doesn't support this feature
+ - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pHeterogeneousMode is invalid
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlDeviceSetVgpuHeterogeneousMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pHeterogeneousMode: *const cuda_types::nvml::nvmlVgpuHeterogeneousMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Query the placement ID of active vGPU instance.
+
+ When in vGPU heterogeneous mode, this function returns a valid placement ID as \a pPlacement->placementId
+ else NVML_INVALID_VGPU_PLACEMENT_ID is returned.
+ \a pPlacement->version is the version number of the structure nvmlVgpuPlacementId_t, the caller should
+ set the correct version number to get placement id of the vGPU instance \a vgpuInstance.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param pPlacement Pointer to vGPU placement ID structure \a nvmlVgpuPlacementId_t
+
+ @return
+ - \ref NVML_SUCCESS If information is successfully retrieved
+ - \ref NVML_ERROR_NOT_FOUND If \a vgpuInstance does not match a valid active vGPU instance
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuInstance is invalid or \a pPlacement is NULL
+ - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pPlacement is invalid
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlVgpuInstanceGetPlacementId(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ pPlacement: *mut cuda_types::nvml::nvmlVgpuPlacementId_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Query the supported vGPU placement ID of the vGPU type.
+
+ An array of supported vGPU placement IDs for the vGPU type ID indicated by \a vgpuTypeId is returned in the
+ caller-supplied buffer of \a pPlacementList->placementIds. Memory needed for the placementIds array should be
+ allocated based on maximum instances of a vGPU type which can be queried via \ref nvmlVgpuTypeGetMaxInstances().
+
+ This function will return supported placement IDs even if GPU is not in vGPU heterogeneous mode.
+
+ @param device Identifier of the target device
+ @param vgpuTypeId Handle to vGPU type. The vGPU type ID
+ @param pPlacementList Pointer to the vGPU placement structure \a nvmlVgpuPlacementList_t
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a vgpuTypeId is invalid or \a pPlacementList is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device or \a vgpuTypeId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pPlacementList is invalid
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlDeviceGetVgpuTypeSupportedPlacements(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ pPlacementList: *mut cuda_types::nvml::nvmlVgpuPlacementList_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Query the creatable vGPU placement ID of the vGPU type.
+
+ An array of creatable vGPU placement IDs for the vGPU type ID indicated by \a vgpuTypeId is returned in the
+ caller-supplied buffer of \a pPlacementList->placementIds. Memory needed for the placementIds array should be
+ allocated based on maximum instances of a vGPU type which can be queried via \ref nvmlVgpuTypeGetMaxInstances().
+ The creatable vGPU placement IDs may differ over time, as there may be restrictions on what type of vGPU the
+ vGPU instance is running.
+
+ The function will return \ref NVML_ERROR_NOT_SUPPORTED if the \a device is not in vGPU heterogeneous mode.
+
+ @param device The identifier of the target device
+ @param vgpuTypeId Handle to vGPU type. The vGPU type ID
+ @param pPlacementList Pointer to the list of vGPU placement structure \a nvmlVgpuPlacementList_t
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a vgpuTypeId is invalid or \a pPlacementList is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device or \a vgpuTypeId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pPlacementList is invalid
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlDeviceGetVgpuTypeCreatablePlacements(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ pPlacementList: *mut cuda_types::nvml::nvmlVgpuPlacementList_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the static GSP heap size of the vGPU type in bytes
+
+ @param vgpuTypeId Handle to vGPU type
+ @param gspHeapSize Reference to return the GSP heap size value
+ @return
+ - \ref NVML_SUCCESS Successful completion
+ - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuTypeId is invalid, or \a gspHeapSize is NULL
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlVgpuTypeGetGspHeapSize(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ gspHeapSize: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the static framebuffer reservation of the vGPU type in bytes
+
+ @param vgpuTypeId Handle to vGPU type
+ @param fbReservation Reference to return the framebuffer reservation
+ @return
+ - \ref NVML_SUCCESS Successful completion
+ - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuTypeId is invalid, or \a fbReservation is NULL
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlVgpuTypeGetFbReservation(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ fbReservation: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the desirable vGPU capability of a device
+
+ Refer to the \a nvmlDeviceVgpuCapability_t structure for the specific capabilities that can be set.
+ See \ref nvmlEnableState_t for available state.
+
+ @param device The identifier of the target device
+ @param capability Specifies the \a nvmlDeviceVgpuCapability_t to be set
+ @param state The target capability mode
+
+ @return
+ - \ref NVML_SUCCESS Successful completion
+ - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid, or \a capability is invalid, or \a state is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state, or \a device not in vGPU mode
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlDeviceSetVgpuCapabilities(
+ device: cuda_types::nvml::nvmlDevice_t,
+ capability: cuda_types::nvml::nvmlDeviceVgpuCapability_t,
+ state: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the vGPU Software licensable features.
+
+ Identifies whether the system supports vGPU Software Licensing. If it does, return the list of licensable feature(s)
+ and their current license status.
+
+ @param device Identifier of the target device
+ @param pGridLicensableFeatures Pointer to structure in which vGPU software licensable features are returned
+
+ @return
+ - \ref NVML_SUCCESS if licensable features are successfully retrieved
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pGridLicensableFeatures is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGridLicensableFeatures_v4(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pGridLicensableFeatures: *mut cuda_types::nvml::nvmlGridLicensableFeatures_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the requested vGPU driver capability.
+
+ Refer to the \a nvmlVgpuDriverCapability_t structure for the specific capabilities that can be queried.
+ The return value in \a capResult should be treated as a boolean, with a non-zero value indicating that the capability
+ is supported.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param capability Specifies the \a nvmlVgpuDriverCapability_t to be queried
+ @param capResult A boolean for the queried capability indicating that feature is supported
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a capability is invalid, or \a capResult is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED the API is not supported in current state or \a devices not in vGPU mode
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlGetVgpuDriverCapabilities(
+ capability: cuda_types::nvml::nvmlVgpuDriverCapability_t,
+ capResult: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the requested vGPU capability for GPU.
+
+ Refer to the \a nvmlDeviceVgpuCapability_t structure for the specific capabilities that can be queried.
+ The return value in \a capResult reports a non-zero value indicating that the capability
+ is supported, and also reports the capability's data based on the queried capability.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param capability Specifies the \a nvmlDeviceVgpuCapability_t to be queried
+ @param capResult Specifies that the queried capability is supported, and also returns capability's data
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a capability is invalid, or \a capResult is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED the API is not supported in current state or \a device not in vGPU mode
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuCapabilities(
+ device: cuda_types::nvml::nvmlDevice_t,
+ capability: cuda_types::nvml::nvmlDeviceVgpuCapability_t,
+ capResult: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the supported vGPU types on a physical GPU (device).
+
+ An array of supported vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer
+ pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount
+ is used to return the number of vGPU types written to the buffer.
+
+ If the supplied buffer is not large enough to accommodate the vGPU type array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount.
+ To query the number of vGPU types supported for the GPU, call this function with *vgpuCount = 0.
+ The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are supported.
+
+ @param device The identifier of the target device
+ @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types
+ @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL or \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetSupportedVgpus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuCount: *mut ::core::ffi::c_uint,
+ vgpuTypeIds: *mut cuda_types::nvml::nvmlVgpuTypeId_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the currently creatable vGPU types on a physical GPU (device).
+
+ An array of creatable vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer
+ pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount
+ is used to return the number of vGPU types written to the buffer.
+
+ The creatable vGPU types for a device may differ over time, as there may be restrictions on what type of vGPU types
+ can concurrently run on a device. For example, if only one vGPU type is allowed at a time on a device, then the creatable
+ list will be restricted to whatever vGPU type is already running on the device.
+
+ If the supplied buffer is not large enough to accommodate the vGPU type array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount.
+ To query the number of vGPU types that can be created for the GPU, call this function with *vgpuCount = 0.
+ The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are creatable.
+
+ @param device The identifier of the target device
+ @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types
+ @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCreatableVgpus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuCount: *mut ::core::ffi::c_uint,
+ vgpuTypeIds: *mut cuda_types::nvml::nvmlVgpuTypeId_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the class of a vGPU type. It will not exceed 64 characters in length (including the NUL terminator).
+ See \ref nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param vgpuTypeClass Pointer to string array to return class in
+ @param size Size of string
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeClass is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetClass(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ vgpuTypeClass: *mut ::core::ffi::c_char,
+ size: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the vGPU type name.
+
+ The name is an alphanumeric string that denotes a particular vGPU, e.g. GRID M60-2Q. It will not
+ exceed 64 characters in length (including the NUL terminator). See \ref
+ nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param vgpuTypeName Pointer to buffer to return name
+ @param size Size of buffer
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a name is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetName(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ vgpuTypeName: *mut ::core::ffi::c_char,
+ size: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the GPU Instance Profile ID for the given vGPU type ID.
+ The API will return a valid GPU Instance Profile ID for the MIG capable vGPU types, else INVALID_GPU_INSTANCE_PROFILE_ID is
+ returned.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param gpuInstanceProfileId GPU Instance Profile ID
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_NOT_SUPPORTED if \a device is not in vGPU Host virtualization mode
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a gpuInstanceProfileId is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetGpuInstanceProfileId(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ gpuInstanceProfileId: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the device ID of a vGPU type.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param deviceID Device ID and vendor ID of the device contained in single 32 bit value
+ @param subsystemID Subsystem ID and subsystem vendor ID of the device contained in single 32 bit value
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a deviceId or \a subsystemID are NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetDeviceID(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ deviceID: *mut ::core::ffi::c_ulonglong,
+ subsystemID: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the vGPU framebuffer size in bytes.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param fbSize Pointer to framebuffer size in bytes
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a fbSize is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetFramebufferSize(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ fbSize: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve count of vGPU's supported display heads.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param numDisplayHeads Pointer to number of display heads
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a numDisplayHeads is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetNumDisplayHeads(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ numDisplayHeads: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve vGPU display head's maximum supported resolution.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param displayIndex Zero-based index of display head
+ @param xdim Pointer to maximum number of pixels in X dimension
+ @param ydim Pointer to maximum number of pixels in Y dimension
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a xdim or \a ydim are NULL, or \a displayIndex
+ is out of range.
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetResolution(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ displayIndex: ::core::ffi::c_uint,
+ xdim: *mut ::core::ffi::c_uint,
+ ydim: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve license requirements for a vGPU type
+
+ The license type and version required to run the specified vGPU type is returned as an alphanumeric string, in the form
+ "<license name>,<version>", for example "GRID-Virtual-PC,2.0". If a vGPU is runnable with* more than one type of license,
+ the licenses are delimited by a semicolon, for example "GRID-Virtual-PC,2.0;GRID-Virtual-WS,2.0;GRID-Virtual-WS-Ext,2.0".
+
+ The total length of the returned string will not exceed 128 characters, including the NUL terminator.
+ See \ref nvmlVgpuConstants::NVML_GRID_LICENSE_BUFFER_SIZE.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param vgpuTypeLicenseString Pointer to buffer to return license info
+ @param size Size of \a vgpuTypeLicenseString buffer
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeLicenseString is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetLicense(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ vgpuTypeLicenseString: *mut ::core::ffi::c_char,
+ size: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the static frame rate limit value of the vGPU type
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param frameRateLimit Reference to return the frame rate limit value
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a frameRateLimit is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetFrameRateLimit(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ frameRateLimit: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the maximum number of vGPU instances creatable on a device for given vGPU type
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param vgpuTypeId Handle to vGPU type
+ @param vgpuInstanceCount Pointer to get the max number of vGPU instances
+ that can be created on a deicve for given vgpuTypeId
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid or is not supported on target device,
+ or \a vgpuInstanceCount is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetMaxInstances(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ vgpuInstanceCount: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the maximum number of vGPU instances supported per VM for given vGPU type
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param vgpuInstanceCountPerVm Pointer to get the max number of vGPU instances supported per VM for given \a vgpuTypeId
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuInstanceCountPerVm is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetMaxInstancesPerVm(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ vgpuInstanceCountPerVm: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the active vGPU instances on a device.
+
+ An array of active vGPU instances is returned in the caller-supplied buffer pointed at by \a vgpuInstances. The
+ array element count is passed in \a vgpuCount, and \a vgpuCount is used to return the number of vGPU instances
+ written to the buffer.
+
+ If the supplied buffer is not large enough to accommodate the vGPU instance array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuInstance_t array required in \a vgpuCount.
+ To query the number of active vGPU instances, call this function with *vgpuCount = 0. The code will return
+ NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU Types are supported.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param vgpuCount Pointer which passes in the array size as well as get
+ back the number of types
+ @param vgpuInstances Pointer to array in which to return list of vGPU instances
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a vgpuCount is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetActiveVgpus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuCount: *mut ::core::ffi::c_uint,
+ vgpuInstances: *mut cuda_types::nvml::nvmlVgpuInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the VM ID associated with a vGPU instance.
+
+ The VM ID is returned as a string, not exceeding 80 characters in length (including the NUL terminator).
+ See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE.
+
+ The format of the VM ID varies by platform, and is indicated by the type identifier returned in \a vmIdType.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param vmId Pointer to caller-supplied buffer to hold VM ID
+ @param size Size of buffer in bytes
+ @param vmIdType Pointer to hold VM ID type
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vmId or \a vmIdType is NULL, or \a vgpuInstance is 0
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetVmID(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ vmId: *mut ::core::ffi::c_char,
+ size: ::core::ffi::c_uint,
+ vmIdType: *mut cuda_types::nvml::nvmlVgpuVmIdType_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the UUID of a vGPU instance.
+
+ The UUID is a globally unique identifier associated with the vGPU, and is returned as a 5-part hexadecimal string,
+ not exceeding 80 characters in length (including the NULL terminator).
+ See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param uuid Pointer to caller-supplied buffer to hold vGPU UUID
+ @param size Size of buffer in bytes
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a uuid is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetUUID(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ uuid: *mut ::core::ffi::c_char,
+ size: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the NVIDIA driver version installed in the VM associated with a vGPU.
+
+ The version is returned as an alphanumeric string in the caller-supplied buffer \a version. The length of the version
+ string will not exceed 80 characters in length (including the NUL terminator).
+ See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE.
+
+ nvmlVgpuInstanceGetVmDriverVersion() may be called at any time for a vGPU instance. The guest VM driver version is
+ returned as "Not Available" if no NVIDIA driver is installed in the VM, or the VM has not yet booted to the point where the
+ NVIDIA driver is loaded and initialized.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param version Caller-supplied buffer to return driver version string
+ @param length Size of \a version buffer
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetVmDriverVersion(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ version: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the framebuffer usage in bytes.
+
+ Framebuffer usage is the amont of vGPU framebuffer memory that is currently in use by the VM.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance The identifier of the target instance
+ @param fbUsage Pointer to framebuffer usage in bytes
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a fbUsage is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetFbUsage(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ fbUsage: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** @deprecated Use \ref nvmlVgpuInstanceGetLicenseInfo_v2.
+
+ Retrieve the current licensing state of the vGPU instance.
+
+ If the vGPU is currently licensed, \a licensed is set to 1, otherwise it is set to 0.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param licensed Reference to return the licensing status
+
+ @return
+ - \ref NVML_SUCCESS if \a licensed has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a licensed is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetLicenseStatus(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ licensed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the vGPU type of a vGPU instance.
+
+ Returns the vGPU type ID of vgpu assigned to the vGPU instance.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param vgpuTypeId Reference to return the vgpuTypeId
+
+ @return
+ - \ref NVML_SUCCESS if \a vgpuTypeId has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a vgpuTypeId is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetType(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ vgpuTypeId: *mut cuda_types::nvml::nvmlVgpuTypeId_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the frame rate limit set for the vGPU instance.
+
+ Returns the value of the frame rate limit set for the vGPU instance
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param frameRateLimit Reference to return the frame rate limit
+
+ @return
+ - \ref NVML_SUCCESS if \a frameRateLimit has been set
+ - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a frameRateLimit is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetFrameRateLimit(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ frameRateLimit: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the current ECC mode of vGPU instance.
+
+ @param vgpuInstance The identifier of the target vGPU instance
+ @param eccMode Reference in which to return the current ECC mode
+
+ @return
+ - \ref NVML_SUCCESS if the vgpuInstance's ECC mode has been successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mode is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetEccMode(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ eccMode: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param encoderCapacity Reference to an unsigned int for the encoder capacity
+
+ @return
+ - \ref NVML_SUCCESS if \a encoderCapacity has been retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a encoderQueryType is invalid
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetEncoderCapacity(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ encoderCapacity: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param encoderCapacity Unsigned int for the encoder capacity value
+
+ @return
+ - \ref NVML_SUCCESS if \a encoderCapacity has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a encoderCapacity is out of range of 0-100.
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceSetEncoderCapacity(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ encoderCapacity: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current encoder statistics of a vGPU Instance
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param sessionCount Reference to an unsigned int for count of active encoder sessions
+ @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions
+ @param averageLatency Reference to an unsigned int for encode latency in microseconds
+
+ @return
+ - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount , or \a averageFps or \a averageLatency is NULL
+ or \a vgpuInstance is 0.
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetEncoderStats(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ sessionCount: *mut ::core::ffi::c_uint,
+ averageFps: *mut ::core::ffi::c_uint,
+ averageLatency: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves information about all active encoder sessions on a vGPU Instance.
+
+ An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The
+ array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions
+ written to the buffer.
+
+ If the supplied buffer is not large enough to accommodate the active session array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount.
+ To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return
+ NVML_SUCCESS with number of active encoder sessions updated in *sessionCount.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param sessionCount Reference to caller supplied array size, and returns
+ the number of sessions.
+ @param sessionInfo Reference to caller supplied array in which the list
+ of session information us returned.
+
+ @return
+ - \ref NVML_SUCCESS if \a sessionInfo is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is
+returned in \a sessionCount
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL, or \a vgpuInstance is 0.
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetEncoderSessions(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ sessionCount: *mut ::core::ffi::c_uint,
+ sessionInfo: *mut cuda_types::nvml::nvmlEncoderSessionInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the active frame buffer capture sessions statistics of a vGPU Instance
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param fbcStats Reference to nvmlFBCStats_t structure containing NvFBC stats
+
+ @return
+ - \ref NVML_SUCCESS if \a fbcStats is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a fbcStats is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetFBCStats(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ fbcStats: *mut cuda_types::nvml::nvmlFBCStats_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves information about active frame buffer capture sessions on a vGPU Instance.
+
+ An array of active FBC sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The
+ array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions
+ written to the buffer.
+
+ If the supplied buffer is not large enough to accommodate the active session array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlFBCSessionInfo_t array required in \a sessionCount.
+ To query the number of active FBC sessions, call this function with *sessionCount = 0. The code will return
+ NVML_SUCCESS with number of active FBC sessions updated in *sessionCount.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @note hResolution, vResolution, averageFPS and averageLatency data for a FBC session returned in \a sessionInfo may
+ be zero if there are no new frames captured since the session started.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param sessionCount Reference to caller supplied array size, and returns the number of sessions.
+ @param sessionInfo Reference in which to return the session information
+
+ @return
+ - \ref NVML_SUCCESS if \a sessionInfo is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a sessionCount is NULL.
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetFBCSessions(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ sessionCount: *mut ::core::ffi::c_uint,
+ sessionInfo: *mut cuda_types::nvml::nvmlFBCSessionInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the GPU Instance ID for the given vGPU Instance.
+ The API will return a valid GPU Instance ID for MIG backed vGPU Instance, else INVALID_GPU_INSTANCE_ID is returned.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param gpuInstanceId GPU Instance ID
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a gpuInstanceId is NULL.
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetGpuInstanceId(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ gpuInstanceId: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the PCI Id of the given vGPU Instance i.e. the PCI Id of the GPU as seen inside the VM.
+
+ The vGPU PCI id is returned as "00000000:00:00.0" if NVIDIA driver is not installed on the vGPU instance.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param vgpuPciId Caller-supplied buffer to return vGPU PCI Id string
+ @param length Size of the vgpuPciId buffer
+
+ @return
+ - \ref NVML_SUCCESS if vGPU PCI Id is sucessfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a vgpuPciId is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small, \a length is set to required length
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetGpuPciId(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ vgpuPciId: *mut ::core::ffi::c_char,
+ length: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the requested capability for a given vGPU type. Refer to the \a nvmlVgpuCapability_t structure
+ for the specific capabilities that can be queried. The return value in \a capResult should be treated as
+ a boolean, with a non-zero value indicating that the capability is supported.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param capability Specifies the \a nvmlVgpuCapability_t to be queried
+ @param capResult A boolean for the queried capability indicating that feature is supported
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a capability is invalid, or \a capResult is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetCapabilities(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ capability: cuda_types::nvml::nvmlVgpuCapability_t,
+ capResult: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the MDEV UUID of a vGPU instance.
+
+ The MDEV UUID is a globally unique identifier of the mdev device assigned to the VM, and is returned as a 5-part hexadecimal string,
+ not exceeding 80 characters in length (including the NULL terminator).
+ MDEV UUID is displayed only on KVM platform.
+ See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param mdevUuid Pointer to caller-supplied buffer to hold MDEV UUID
+ @param size Size of buffer in bytes
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NOT_SUPPORTED on any hypervisor other than KVM
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mdevUuid is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetMdevUUID(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ mdevUuid: *mut ::core::ffi::c_char,
+ size: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns vGPU metadata structure for a running vGPU. The structure contains information about the vGPU and its associated VM
+ such as the currently installed NVIDIA guest driver version, together with host driver version and an opaque data section
+ containing internal state.
+
+ nvmlVgpuInstanceGetMetadata() may be called at any time for a vGPU instance. Some fields in the returned structure are
+ dependent on information obtained from the guest VM, which may not yet have reached a state where that information
+ is available. The current state of these dependent fields is reflected in the info structure's \ref nvmlVgpuGuestInfoState_t field.
+
+ The VMM may choose to read and save the vGPU's VM info as persistent metadata associated with the VM, and provide
+ it to Virtual GPU Manager when creating a vGPU for subsequent instances of the VM.
+
+ The caller passes in a buffer via \a vgpuMetadata, with the size of the buffer in \a bufferSize. If the vGPU Metadata structure
+ is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed
+ in \a bufferSize.
+
+ @param vgpuInstance vGPU instance handle
+ @param vgpuMetadata Pointer to caller-supplied buffer into which vGPU metadata is written
+ @param bufferSize Size of vgpuMetadata buffer
+
+ @return
+ - \ref NVML_SUCCESS vGPU metadata structure was successfully returned
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE vgpuMetadata buffer is too small, required size is returned in \a bufferSize
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a vgpuInstance is 0; if \a vgpuMetadata is NULL and the value of \a bufferSize is not 0.
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetMetadata(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ vgpuMetadata: *mut cuda_types::nvml::nvmlVgpuMetadata_t,
+ bufferSize: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns a vGPU metadata structure for the physical GPU indicated by \a device. The structure contains information about
+ the GPU and the currently installed NVIDIA host driver version that's controlling it, together with an opaque data section
+ containing internal state.
+
+ The caller passes in a buffer via \a pgpuMetadata, with the size of the buffer in \a bufferSize. If the \a pgpuMetadata
+ structure is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed
+ in \a bufferSize.
+
+ @param device The identifier of the target device
+ @param pgpuMetadata Pointer to caller-supplied buffer into which \a pgpuMetadata is written
+ @param bufferSize Pointer to size of \a pgpuMetadata buffer
+
+ @return
+ - \ref NVML_SUCCESS GPU metadata structure was successfully returned
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE pgpuMetadata buffer is too small, required size is returned in \a bufferSize
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a device is invalid; if \a pgpuMetadata is NULL and the value of \a bufferSize is not 0.
+ - \ref NVML_ERROR_NOT_SUPPORTED vGPU is not supported by the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuMetadata(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pgpuMetadata: *mut cuda_types::nvml::nvmlVgpuPgpuMetadata_t,
+ bufferSize: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Takes a vGPU instance metadata structure read from \ref nvmlVgpuInstanceGetMetadata(), and a vGPU metadata structure for a
+ physical GPU read from \ref nvmlDeviceGetVgpuMetadata(), and returns compatibility information of the vGPU instance and the
+ physical GPU.
+
+ The caller passes in a buffer via \a compatibilityInfo, into which a compatibility information structure is written. The
+ structure defines the states in which the vGPU / VM may be booted on the physical GPU. If the vGPU / VM compatibility
+ with the physical GPU is limited, a limit code indicates the factor limiting compatability.
+ (see \ref nvmlVgpuPgpuCompatibilityLimitCode_t for details).
+
+ Note: vGPU compatibility does not take into account dynamic capacity conditions that may limit a system's ability to
+ boot a given vGPU or associated VM.
+
+ @param vgpuMetadata Pointer to caller-supplied vGPU metadata structure
+ @param pgpuMetadata Pointer to caller-supplied GPU metadata structure
+ @param compatibilityInfo Pointer to caller-supplied buffer to hold compatibility info
+
+ @return
+ - \ref NVML_SUCCESS vGPU metadata structure was successfully returned
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuMetadata or \a pgpuMetadata or \a bufferSize are NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlGetVgpuCompatibility(
+ vgpuMetadata: *mut cuda_types::nvml::nvmlVgpuMetadata_t,
+ pgpuMetadata: *mut cuda_types::nvml::nvmlVgpuPgpuMetadata_t,
+ compatibilityInfo: *mut cuda_types::nvml::nvmlVgpuPgpuCompatibility_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the properties of the physical GPU indicated by the device in an ascii-encoded string format.
+
+ The caller passes in a buffer via \a pgpuMetadata, with the size of the buffer in \a bufferSize. If the
+ string is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed
+ in \a bufferSize.
+
+ @param device The identifier of the target device
+ @param pgpuMetadata Pointer to caller-supplied buffer into which \a pgpuMetadata is written
+ @param bufferSize Pointer to size of \a pgpuMetadata buffer
+
+ @return
+ - \ref NVML_SUCCESS GPU metadata structure was successfully returned
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE \a pgpuMetadata buffer is too small, required size is returned in \a bufferSize
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a device is invalid; if \a pgpuMetadata is NULL and the value of \a bufferSize is not 0.
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPgpuMetadataString(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pgpuMetadata: *mut ::core::ffi::c_char,
+ bufferSize: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the vGPU Software scheduler logs.
+ \a pSchedulerLog points to a caller-allocated structure to contain the logs. The number of elements returned will
+ never exceed \a NVML_SCHEDULER_SW_MAX_LOG_ENTRIES.
+
+ To get the entire logs, call the function atleast 5 times a second.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target \a device
+ @param pSchedulerLog Reference in which \a pSchedulerLog is written
+
+ @return
+ - \ref NVML_SUCCESS vGPU scheduler logs were successfully obtained
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerLog is NULL or \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuSchedulerLog(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pSchedulerLog: *mut cuda_types::nvml::nvmlVgpuSchedulerLog_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the vGPU scheduler state.
+ The information returned in \a nvmlVgpuSchedulerGetState_t is not relevant if the BEST EFFORT policy is set.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target \a device
+ @param pSchedulerState Reference in which \a pSchedulerState is returned
+
+ @return
+ - \ref NVML_SUCCESS vGPU scheduler state is successfully obtained
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerState is NULL or \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuSchedulerState(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pSchedulerState: *mut cuda_types::nvml::nvmlVgpuSchedulerGetState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the vGPU scheduler capabilities.
+ The list of supported vGPU schedulers returned in \a nvmlVgpuSchedulerCapabilities_t is from
+ the NVML_VGPU_SCHEDULER_POLICY_*. This list enumerates the supported scheduler policies
+ if the engine is Graphics type.
+ The other values in \a nvmlVgpuSchedulerCapabilities_t are also applicable if the engine is
+ Graphics type. For other engine types, it is BEST EFFORT policy.
+ If ARR is supported and enabled, scheduling frequency and averaging factor are applicable
+ else timeSlice is applicable.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target \a device
+ @param pCapabilities Reference in which \a pCapabilities is written
+
+ @return
+ - \ref NVML_SUCCESS vGPU scheduler capabilities were successfully obtained
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pCapabilities is NULL or \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuSchedulerCapabilities(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pCapabilities: *mut cuda_types::nvml::nvmlVgpuSchedulerCapabilities_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets the vGPU scheduler state.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ The scheduler state change won't persist across module load/unload.
+ Scheduler state and params will be allowed to set only when no VM is running.
+ In \a nvmlVgpuSchedulerSetState_t, IFF enableARRMode is enabled then
+ provide avgFactorForARR and frequency as input. If enableARRMode is disabled
+ then provide timeslice as input.
+
+ @param device The identifier of the target \a device
+ @param pSchedulerState vGPU \a pSchedulerState to set
+
+ @return
+ - \ref NVML_SUCCESS vGPU scheduler state has been successfully set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerState is NULL or \a device is invalid
+ - \ref NVML_ERROR_RESET_REQUIRED if setting \a pSchedulerState failed with fatal error,
+ reboot is required to overcome from this error.
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode
+ or if any vGPU instance currently exists on the \a device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetVgpuSchedulerState(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pSchedulerState: *mut cuda_types::nvml::nvmlVgpuSchedulerSetState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Query the ranges of supported vGPU versions.
+
+ This function gets the linear range of supported vGPU versions that is preset for the NVIDIA vGPU Manager and the range set by an administrator.
+ If the preset range has not been overridden by \ref nvmlSetVgpuVersion, both ranges are the same.
+
+ The caller passes pointers to the following \ref nvmlVgpuVersion_t structures, into which the NVIDIA vGPU Manager writes the ranges:
+ 1. \a supported structure that represents the preset range of vGPU versions supported by the NVIDIA vGPU Manager.
+ 2. \a current structure that represents the range of supported vGPU versions set by an administrator. By default, this range is the same as the preset range.
+
+ @param supported Pointer to the structure in which the preset range of vGPU versions supported by the NVIDIA vGPU Manager is written
+ @param current Pointer to the structure in which the range of supported vGPU versions set by an administrator is written
+
+ @return
+ - \ref NVML_SUCCESS The vGPU version range structures were successfully obtained.
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported.
+ - \ref NVML_ERROR_INVALID_ARGUMENT The \a supported parameter or the \a current parameter is NULL.
+ - \ref NVML_ERROR_UNKNOWN An error occurred while the data was being fetched.*/
+ fn nvmlGetVgpuVersion(
+ supported: *mut cuda_types::nvml::nvmlVgpuVersion_t,
+ current: *mut cuda_types::nvml::nvmlVgpuVersion_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Override the preset range of vGPU versions supported by the NVIDIA vGPU Manager with a range set by an administrator.
+
+ This function configures the NVIDIA vGPU Manager with a range of supported vGPU versions set by an administrator. This range must be a subset of the
+ preset range that the NVIDIA vGPU Manager supports. The custom range set by an administrator takes precedence over the preset range and is advertised to
+ the guest VM for negotiating the vGPU version. See \ref nvmlGetVgpuVersion for details of how to query the preset range of versions supported.
+
+ This function takes a pointer to vGPU version range structure \ref nvmlVgpuVersion_t as input to override the preset vGPU version range that the NVIDIA vGPU Manager supports.
+
+ After host system reboot or driver reload, the range of supported versions reverts to the range that is preset for the NVIDIA vGPU Manager.
+
+ @note 1. The range set by the administrator must be a subset of the preset range that the NVIDIA vGPU Manager supports. Otherwise, an error is returned.
+ 2. If the range of supported guest driver versions does not overlap the range set by the administrator, the guest driver fails to load.
+ 3. If the range of supported guest driver versions overlaps the range set by the administrator, the guest driver will load with a negotiated
+ vGPU version that is the maximum value in the overlapping range.
+ 4. No VMs must be running on the host when this function is called. If a VM is running on the host, the call to this function fails.
+
+ @param vgpuVersion Pointer to a caller-supplied range of supported vGPU versions.
+
+ @return
+ - \ref NVML_SUCCESS The preset range of supported vGPU versions was successfully overridden.
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported.
+ - \ref NVML_ERROR_IN_USE The range was not overridden because a VM is running on the host.
+ - \ref NVML_ERROR_INVALID_ARGUMENT The \a vgpuVersion parameter specifies a range that is outside the range supported by the NVIDIA vGPU Manager or if \a vgpuVersion is NULL.*/
+ fn nvmlSetVgpuVersion(
+ vgpuVersion: *mut cuda_types::nvml::nvmlVgpuVersion_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves current utilization for vGPUs on a physical GPU (device).
+
+ For Kepler &tm; or newer fully supported devices.
+
+ Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for vGPU instances running
+ on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer
+ pointed at by \a utilizationSamples. One utilization sample structure is returned per vGPU instance, and includes the
+ CPU timestamp at which the samples were recorded. Individual utilization values are returned as "unsigned int" values
+ in nvmlValue_t unions. The function sets the caller-supplied \a sampleValType to NVML_VALUE_TYPE_UNSIGNED_INT to
+ indicate the returned value type.
+
+ To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with
+ \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance
+ count in \a vgpuInstanceSamplesCount, or NVML_SUCCESS if the current vGPU instance count is zero. The caller should allocate
+ a buffer of size vgpuInstanceSamplesCount * sizeof(nvmlVgpuInstanceUtilizationSample_t). Invoke the function again with
+ the allocated buffer passed in \a utilizationSamples, and \a vgpuInstanceSamplesCount set to the number of entries the
+ buffer is sized for.
+
+ On successful return, the function updates \a vgpuInstanceSampleCount with the number of vGPU utilization sample
+ structures that were actually written. This may differ from a previously read value as vGPU instances are created or
+ destroyed.
+
+ lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0
+ to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp
+ to a timeStamp retrieved from a previous query to read utilization since the previous query.
+
+ @param device The identifier for the target device
+ @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp.
+ @param sampleValType Pointer to caller-supplied buffer to hold the type of returned sample values
+ @param vgpuInstanceSamplesCount Pointer to caller-supplied array size, and returns number of vGPU instances
+ @param utilizationSamples Pointer to caller-supplied buffer in which vGPU utilization samples are returned
+
+ @return
+ - \ref NVML_SUCCESS if utilization samples are successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuInstanceSamplesCount or \a sampleValType is
+ NULL, or a sample count of 0 is passed with a non-NULL \a utilizationSamples
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuInstanceSamplesCount is too small to return samples for all
+ vGPU instances currently executing on the device
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ lastSeenTimeStamp: ::core::ffi::c_ulonglong,
+ sampleValType: *mut cuda_types::nvml::nvmlValueType_t,
+ vgpuInstanceSamplesCount: *mut ::core::ffi::c_uint,
+ utilizationSamples: *mut cuda_types::nvml::nvmlVgpuInstanceUtilizationSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves recent utilization for vGPU instances running on a physical GPU (device).
+
+ For Kepler &tm; or newer fully supported devices.
+
+ Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, video decoder, jpeg decoder, and OFA for vGPU
+ instances running on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied
+ buffer pointed at by \a vgpuUtilInfo->vgpuUtilArray. One utilization sample structure is returned per vGPU instance, and includes the
+ CPU timestamp at which the samples were recorded. Individual utilization values are returned as "unsigned int" values
+ in nvmlValue_t unions. The function sets the caller-supplied \a vgpuUtilInfo->sampleValType to NVML_VALUE_TYPE_UNSIGNED_INT to
+ indicate the returned value type.
+
+ To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with
+ \a vgpuUtilInfo->vgpuUtilArray set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance
+ count in \a vgpuUtilInfo->vgpuInstanceCount, or NVML_SUCCESS if the current vGPU instance count is zero. The caller should allocate
+ a buffer of size vgpuUtilInfo->vgpuInstanceCount * sizeof(nvmlVgpuInstanceUtilizationInfo_t). Invoke the function again with
+ the allocated buffer passed in \a vgpuUtilInfo->vgpuUtilArray, and \a vgpuUtilInfo->vgpuInstanceCount set to the number of entries the
+ buffer is sized for.
+
+ On successful return, the function updates \a vgpuUtilInfo->vgpuInstanceCount with the number of vGPU utilization sample
+ structures that were actually written. This may differ from a previously read value as vGPU instances are created or
+ destroyed.
+
+ \a vgpuUtilInfo->lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0
+ to read utilization based on all the samples maintained by the driver's internal sample buffer. Set \a vgpuUtilInfo->lastSeenTimeStamp
+ to a timeStamp retrieved from a previous query to read utilization since the previous query.
+
+ @param device The identifier for the target device
+ @param vgpuUtilInfo Pointer to the caller-provided structure of nvmlVgpuInstancesUtilizationInfo_t
+
+ @return
+ - \ref NVML_SUCCESS if utilization samples are successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuUtilInfo is NULL, or \a vgpuUtilInfo->vgpuInstanceCount is 0
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_VERSION_MISMATCH if the version of \a vgpuUtilInfo is invalid
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a vgpuUtilInfo->vgpuUtilArray is NULL, or the buffer size of vgpuUtilInfo->vgpuInstanceCount is too small.
+ The caller should check the current vGPU instance count from the returned vgpuUtilInfo->vgpuInstanceCount, and call
+ the function again with a buffer of size vgpuUtilInfo->vgpuInstanceCount * sizeof(nvmlVgpuInstanceUtilizationInfo_t)
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuInstancesUtilizationInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuUtilInfo: *mut cuda_types::nvml::nvmlVgpuInstancesUtilizationInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves current utilization for processes running on vGPUs on a physical GPU (device).
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running on
+ vGPU instances active on a device. Utilization values are returned as an array of utilization sample structures in the
+ caller-supplied buffer pointed at by \a utilizationSamples. One utilization sample structure is returned per process running
+ on vGPU instances, that had some non-zero utilization during the last sample period. It includes the CPU timestamp at which
+ the samples were recorded. Individual utilization values are returned as "unsigned int" values.
+
+ To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with
+ \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance
+ count in \a vgpuProcessSamplesCount. The caller should allocate a buffer of size
+ vgpuProcessSamplesCount * sizeof(nvmlVgpuProcessUtilizationSample_t). Invoke the function again with
+ the allocated buffer passed in \a utilizationSamples, and \a vgpuProcessSamplesCount set to the number of entries the
+ buffer is sized for.
+
+ On successful return, the function updates \a vgpuSubProcessSampleCount with the number of vGPU sub process utilization sample
+ structures that were actually written. This may differ from a previously read value depending on the number of processes that are active
+ in any given sample period.
+
+ lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0
+ to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp
+ to a timeStamp retrieved from a previous query to read utilization since the previous query.
+
+ @param device The identifier for the target device
+ @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp.
+ @param vgpuProcessSamplesCount Pointer to caller-supplied array size, and returns number of processes running on vGPU instances
+ @param utilizationSamples Pointer to caller-supplied buffer in which vGPU sub process utilization samples are returned
+
+ @return
+ - \ref NVML_SUCCESS if utilization samples are successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuProcessSamplesCount or a sample count of 0 is
+ passed with a non-NULL \a utilizationSamples
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuProcessSamplesCount is too small to return samples for all
+ vGPU instances currently executing on the device
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuProcessUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ lastSeenTimeStamp: ::core::ffi::c_ulonglong,
+ vgpuProcessSamplesCount: *mut ::core::ffi::c_uint,
+ utilizationSamples: *mut cuda_types::nvml::nvmlVgpuProcessUtilizationSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves recent utilization for processes running on vGPU instances on a physical GPU (device).
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, video decoder, jpeg decoder, and OFA for processes running
+ on vGPU instances active on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied
+ buffer pointed at by \a vgpuProcUtilInfo->vgpuProcUtilArray. One utilization sample structure is returned per process running
+ on vGPU instances, that had some non-zero utilization during the last sample period. It includes the CPU timestamp at which
+ the samples were recorded. Individual utilization values are returned as "unsigned int" values.
+
+ To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with
+ \a vgpuProcUtilInfo->vgpuProcUtilArray set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current processes' count
+ running on vGPU instances in \a vgpuProcUtilInfo->vgpuProcessCount. The caller should allocate a buffer of size
+ vgpuProcUtilInfo->vgpuProcessCount * sizeof(nvmlVgpuProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed
+ in \a vgpuProcUtilInfo->vgpuProcUtilArray, and \a vgpuProcUtilInfo->vgpuProcessCount set to the number of entries the buffer is sized for.
+
+ On successful return, the function updates \a vgpuProcUtilInfo->vgpuProcessCount with the number of vGPU sub process utilization sample
+ structures that were actually written. This may differ from a previously read value depending on the number of processes that are active
+ in any given sample period.
+
+ vgpuProcUtilInfo->lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0
+ to read utilization based on all the samples maintained by the driver's internal sample buffer. Set vgpuProcUtilInfo->lastSeenTimeStamp
+ to a timeStamp retrieved from a previous query to read utilization since the previous query.
+
+ @param device The identifier for the target device
+ @param vgpuProcUtilInfo Pointer to the caller-provided structure of nvmlVgpuProcessesUtilizationInfo_t
+
+ @return
+ - \ref NVML_SUCCESS if utilization samples are successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a vgpuProcUtilInfo is null
+ - \ref NVML_ERROR_VERSION_MISMATCH if the version of \a vgpuProcUtilInfo is invalid
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a vgpuProcUtilInfo->vgpuProcUtilArray is null, or supplied \a vgpuProcUtilInfo->vgpuProcessCount
+ is too small to return samples for all processes on vGPU instances currently executing on the device.
+ The caller should check the current processes count from the returned \a vgpuProcUtilInfo->vgpuProcessCount,
+ and call the function again with a buffer of size
+ vgpuProcUtilInfo->vgpuProcessCount * sizeof(nvmlVgpuProcessUtilizationSample_t)
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuProcessesUtilizationInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuProcUtilInfo: *mut cuda_types::nvml::nvmlVgpuProcessesUtilizationInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries the state of per process accounting mode on vGPU.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance The identifier of the target vGPU instance
+ @param mode Reference in which to return the current accounting mode
+
+ @return
+ - \ref NVML_SUCCESS if the mode has been successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mode is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature
+ - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetAccountingMode(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ mode: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries list of processes running on vGPU that can be queried for accounting stats. The list of processes
+ returned can be in running or terminated state.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ To just query the maximum number of processes that can be queried, call this function with *count = 0 and
+ pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty.
+
+ For more details see \ref nvmlVgpuInstanceGetAccountingStats.
+
+ @note In case of PID collision some processes might not be accessible before the circular buffer is full.
+
+ @param vgpuInstance The identifier of the target vGPU instance
+ @param count Reference in which to provide the \a pids array size, and
+ to return the number of elements ready to be queried
+ @param pids Reference in which to return list of process ids
+
+ @return
+ - \ref NVML_SUCCESS if pids were successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a count is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to expected value)
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlVgpuInstanceGetAccountingPids*/
+ fn nvmlVgpuInstanceGetAccountingPids(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ count: *mut ::core::ffi::c_uint,
+ pids: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries process's accounting stats.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ Accounting stats capture GPU utilization and other statistics across the lifetime of a process, and
+ can be queried during life time of the process or after its termination.
+ The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and
+ updated to actual running time after its termination.
+ Accounting stats are kept in a circular buffer, newly created processes overwrite information about old
+ processes.
+
+ See \ref nvmlAccountingStats_t for description of each returned metric.
+ List of processes that can be queried can be retrieved from \ref nvmlVgpuInstanceGetAccountingPids.
+
+ @note Accounting Mode needs to be on. See \ref nvmlVgpuInstanceGetAccountingMode.
+ @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be
+ queried since they don't contribute to GPU utilization.
+ @note In case of pid collision stats of only the latest process (that terminated last) will be reported
+
+ @param vgpuInstance The identifier of the target vGPU instance
+ @param pid Process Id of the target process to query stats for
+ @param stats Reference in which to return the process's accounting stats
+
+ @return
+ - \ref NVML_SUCCESS if stats have been successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a stats is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ or \a stats is not found
+ - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetAccountingStats(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ pid: ::core::ffi::c_uint,
+ stats: *mut cuda_types::nvml::nvmlAccountingStats_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Clears accounting information of the vGPU instance that have already terminated.
+
+ For Maxwell &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ @note Accounting Mode needs to be on. See \ref nvmlVgpuInstanceGetAccountingMode.
+ @note Only compute and graphics applications stats are reported and can be cleared since monitoring applications
+ stats don't contribute to GPU utilization.
+
+ @param vgpuInstance The identifier of the target vGPU instance
+
+ @return
+ - \ref NVML_SUCCESS if accounting information has been cleared
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceClearAccountingPids(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Query the license information of the vGPU instance.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param licenseInfo Pointer to vGPU license information structure
+
+ @return
+ - \ref NVML_SUCCESS if information is successfully retrieved
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a licenseInfo is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetLicenseInfo_v2(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ licenseInfo: *mut cuda_types::nvml::nvmlVgpuLicenseInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the number of excluded GPU devices in the system.
+
+ For all products.
+
+ @param deviceCount Reference in which to return the number of excluded devices
+
+ @return
+ - \ref NVML_SUCCESS if \a deviceCount has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL*/
+ fn nvmlGetExcludedDeviceCount(
+ deviceCount: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Acquire the device information for an excluded GPU device, based on its index.
+
+ For all products.
+
+ Valid indices are derived from the \a deviceCount returned by
+ \ref nvmlGetExcludedDeviceCount(). For example, if \a deviceCount is 2 the valid indices
+ are 0 and 1, corresponding to GPU 0 and GPU 1.
+
+ @param index The index of the target GPU, >= 0 and < \a deviceCount
+ @param info Reference in which to return the device information
+
+ @return
+ - \ref NVML_SUCCESS if \a device has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a info is NULL
+
+ @see nvmlGetExcludedDeviceCount*/
+ fn nvmlGetExcludedDeviceInfoByIndex(
+ index: ::core::ffi::c_uint,
+ info: *mut cuda_types::nvml::nvmlExcludedDeviceInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set MIG mode for the device.
+
+ For Ampere &tm; or newer fully supported devices.
+ Requires root user.
+
+ This mode determines whether a GPU instance can be created.
+
+ This API may unbind or reset the device to activate the requested mode. Thus, the attributes associated with the
+ device, such as minor number, might change. The caller of this API is expected to query such attributes again.
+
+ On certain platforms like pass-through virtualization, where reset functionality may not be exposed directly, VM
+ reboot is required. \a activationStatus would return \ref NVML_ERROR_RESET_REQUIRED for such cases.
+
+ \a activationStatus would return the appropriate error code upon unsuccessful activation. For example, if device
+ unbind fails because the device isn't idle, \ref NVML_ERROR_IN_USE would be returned. The caller of this API
+ is expected to idle the device and retry setting the \a mode.
+
+ @note On Windows, only disabling MIG mode is supported. \a activationStatus would return \ref
+ NVML_ERROR_NOT_SUPPORTED as GPU reset is not supported on Windows through this API.
+
+ @param device The identifier of the target device
+ @param mode The mode to be set, \ref NVML_DEVICE_MIG_DISABLE or
+ \ref NVML_DEVICE_MIG_ENABLE
+ @param activationStatus The activationStatus status
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device,\a mode or \a activationStatus are invalid
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG mode*/
+ fn nvmlDeviceSetMigMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: ::core::ffi::c_uint,
+ activationStatus: *mut cuda_types::nvml::nvmlReturn_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get MIG mode for the device.
+
+ For Ampere &tm; or newer fully supported devices.
+
+ Changing MIG modes may require device unbind or reset. The "pending" MIG mode refers to the target mode following the
+ next activation trigger.
+
+ @param device The identifier of the target device
+ @param currentMode Returns the current mode, \ref NVML_DEVICE_MIG_DISABLE or
+ \ref NVML_DEVICE_MIG_ENABLE
+ @param pendingMode Returns the pending mode, \ref NVML_DEVICE_MIG_DISABLE or
+ \ref NVML_DEVICE_MIG_ENABLE
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a currentMode or \a pendingMode are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG mode*/
+ fn nvmlDeviceGetMigMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ currentMode: *mut ::core::ffi::c_uint,
+ pendingMode: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instance profile information
+
+ Information provided by this API is immutable throughout the lifetime of a MIG mode.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+ @param profile One of the NVML_GPU_INSTANCE_PROFILE_*
+ @param info Returns detailed profile information
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile or \a info are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG or \a profile isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlDeviceGetGpuInstanceProfileInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profile: ::core::ffi::c_uint,
+ info: *mut cuda_types::nvml::nvmlGpuInstanceProfileInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Versioned wrapper around \ref nvmlDeviceGetGpuInstanceProfileInfo that accepts a versioned
+ \ref nvmlGpuInstanceProfileInfo_v2_t or later output structure.
+
+ @note The caller must set the \ref nvmlGpuInstanceProfileInfo_v2_t.version field to the
+ appropriate version prior to calling this function. For example:
+ \code
+ nvmlGpuInstanceProfileInfo_v2_t profileInfo =
+ { .version = nvmlGpuInstanceProfileInfo_v2 };
+ nvmlReturn_t result = nvmlDeviceGetGpuInstanceProfileInfoV(device,
+ profile,
+ &profileInfo);
+ \endcode
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+ @param profile One of the NVML_GPU_INSTANCE_PROFILE_*
+ @param info Returns detailed profile information
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a info, or \a info->version are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profile isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlDeviceGetGpuInstanceProfileInfoV(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profile: ::core::ffi::c_uint,
+ info: *mut cuda_types::nvml::nvmlGpuInstanceProfileInfo_v2_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instance placements.
+
+ A placement represents the location of a GPU instance within a device. This API only returns all the possible
+ placements for the given profile regardless of whether MIG is enabled or not.
+ A created GPU instance occupies memory slices described by its placement. Creation of new GPU instance will
+ fail if there is overlap with the already occupied memory slices.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param device The identifier of the target device
+ @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo
+ @param placements Returns placements allowed for the profile. Can be NULL to discover number
+ of allowed placements for this profile. If non-NULL must be large enough
+ to accommodate the placements supported by the profile.
+ @param count Returns number of allowed placemenets for the profile.
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId or \a count are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG or \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlDeviceGetGpuInstancePossiblePlacements_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profileId: ::core::ffi::c_uint,
+ placements: *mut cuda_types::nvml::nvmlGpuInstancePlacement_t,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instance profile capacity.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param device The identifier of the target device
+ @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo
+ @param count Returns remaining instance count for the profile ID
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId or \a count are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlDeviceGetGpuInstanceRemainingCapacity(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profileId: ::core::ffi::c_uint,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Create GPU instance.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ If the parent device is unbound, reset or the GPU instance is destroyed explicitly, the GPU instance handle would
+ become invalid. The GPU instance must be recreated to acquire a valid handle.
+
+ @param device The identifier of the target device
+ @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo
+ @param gpuInstance Returns the GPU instance handle
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a profileId or \a gpuInstance are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested GPU instance could not be created*/
+ fn nvmlDeviceCreateGpuInstance(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profileId: ::core::ffi::c_uint,
+ gpuInstance: *mut cuda_types::nvml::nvmlGpuInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Create GPU instance with the specified placement.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ If the parent device is unbound, reset or the GPU instance is destroyed explicitly, the GPU instance handle would
+ become invalid. The GPU instance must be recreated to acquire a valid handle.
+
+ @param device The identifier of the target device
+ @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo
+ @param placement The requested placement. See \ref nvmlDeviceGetGpuInstancePossiblePlacements_v2
+ @param gpuInstance Returns the GPU instance handle
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a profileId, \a placement or \a gpuInstance
+ are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested GPU instance could not be created*/
+ fn nvmlDeviceCreateGpuInstanceWithPlacement(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profileId: ::core::ffi::c_uint,
+ placement: *const cuda_types::nvml::nvmlGpuInstancePlacement_t,
+ gpuInstance: *mut cuda_types::nvml::nvmlGpuInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Destroy GPU instance.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param gpuInstance The GPU instance handle
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_IN_USE If the GPU instance is in use. This error would be returned if processes
+ (e.g. CUDA application) or compute instances are active on the
+ GPU instance.*/
+ fn nvmlGpuInstanceDestroy(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instances for given profile ID.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param device The identifier of the target device
+ @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo
+ @param gpuInstances Returns pre-exiting GPU instances, the buffer must be large enough to
+ accommodate the instances supported by the profile.
+ See \ref nvmlDeviceGetGpuInstanceProfileInfo
+ @param count The count of returned GPU instances
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId, \a gpuInstances or \a count are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlDeviceGetGpuInstances(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profileId: ::core::ffi::c_uint,
+ gpuInstances: *mut cuda_types::nvml::nvmlGpuInstance_t,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instances for given instance ID.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param device The identifier of the target device
+ @param id The GPU instance ID
+ @param gpuInstance Returns GPU instance
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a id or \a gpuInstance are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_NOT_FOUND If the GPU instance is not found.*/
+ fn nvmlDeviceGetGpuInstanceById(
+ device: cuda_types::nvml::nvmlDevice_t,
+ id: ::core::ffi::c_uint,
+ gpuInstance: *mut cuda_types::nvml::nvmlGpuInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instance information.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param gpuInstance The GPU instance handle
+ @param info Return GPU instance information
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance or \a info are invalid
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlGpuInstanceGetInfo(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ info: *mut cuda_types::nvml::nvmlGpuInstanceInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instance profile information.
+
+ Information provided by this API is immutable throughout the lifetime of a MIG mode.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profile One of the NVML_COMPUTE_INSTANCE_PROFILE_*
+ @param engProfile One of the NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_*
+ @param info Returns detailed profile information
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a engProfile or \a info are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a profile isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlGpuInstanceGetComputeInstanceProfileInfo(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profile: ::core::ffi::c_uint,
+ engProfile: ::core::ffi::c_uint,
+ info: *mut cuda_types::nvml::nvmlComputeInstanceProfileInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Versioned wrapper around \ref nvmlGpuInstanceGetComputeInstanceProfileInfo that accepts a versioned
+ \ref nvmlComputeInstanceProfileInfo_v2_t or later output structure.
+
+ @note The caller must set the \ref nvmlGpuInstanceProfileInfo_v2_t.version field to the
+ appropriate version prior to calling this function. For example:
+ \code
+ nvmlComputeInstanceProfileInfo_v2_t profileInfo =
+ { .version = nvmlComputeInstanceProfileInfo_v2 };
+ nvmlReturn_t result = nvmlGpuInstanceGetComputeInstanceProfileInfoV(gpuInstance,
+ profile,
+ engProfile,
+ &profileInfo);
+ \endcode
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profile One of the NVML_COMPUTE_INSTANCE_PROFILE_*
+ @param engProfile One of the NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_*
+ @param info Returns detailed profile information
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a engProfile, \a info, or \a info->version are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a profile isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlGpuInstanceGetComputeInstanceProfileInfoV(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profile: ::core::ffi::c_uint,
+ engProfile: ::core::ffi::c_uint,
+ info: *mut cuda_types::nvml::nvmlComputeInstanceProfileInfo_v2_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instance profile capacity.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profileId The compute instance profile ID.
+ See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo
+ @param count Returns remaining instance count for the profile ID
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profileId or \a availableCount are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlGpuInstanceGetComputeInstanceRemainingCapacity(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profileId: ::core::ffi::c_uint,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instance placements.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ A placement represents the location of a compute instance within a GPU instance. This API only returns all the possible
+ placements for the given profile.
+ A created compute instance occupies compute slices described by its placement. Creation of new compute instance will
+ fail if there is overlap with the already occupied compute slices.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profileId The compute instance profile ID. See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo
+ @param placements Returns placements allowed for the profile. Can be NULL to discover number
+ of allowed placements for this profile. If non-NULL must be large enough
+ to accommodate the placements supported by the profile.
+ @param count Returns number of allowed placemenets for the profile.
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profileId or \a count are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlGpuInstanceGetComputeInstancePossiblePlacements(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profileId: ::core::ffi::c_uint,
+ placements: *mut cuda_types::nvml::nvmlComputeInstancePlacement_t,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Create compute instance.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ If the parent device is unbound, reset or the parent GPU instance is destroyed or the compute instance is destroyed
+ explicitly, the compute instance handle would become invalid. The compute instance must be recreated to acquire
+ a valid handle.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profileId The compute instance profile ID.
+ See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo
+ @param computeInstance Returns the compute instance handle
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a profileId or \a computeInstance
+ are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested compute instance could not be created*/
+ fn nvmlGpuInstanceCreateComputeInstance(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profileId: ::core::ffi::c_uint,
+ computeInstance: *mut cuda_types::nvml::nvmlComputeInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Create compute instance with the specified placement.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ If the parent device is unbound, reset or the parent GPU instance is destroyed or the compute instance is destroyed
+ explicitly, the compute instance handle would become invalid. The compute instance must be recreated to acquire
+ a valid handle.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profileId The compute instance profile ID.
+ See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo
+ @param placement The requested placement. See \ref nvmlGpuInstanceGetComputeInstancePossiblePlacements
+ @param computeInstance Returns the compute instance handle
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a profileId or \a computeInstance
+ are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested compute instance could not be created*/
+ fn nvmlGpuInstanceCreateComputeInstanceWithPlacement(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profileId: ::core::ffi::c_uint,
+ placement: *const cuda_types::nvml::nvmlComputeInstancePlacement_t,
+ computeInstance: *mut cuda_types::nvml::nvmlComputeInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Destroy compute instance.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param computeInstance The compute instance handle
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a computeInstance is invalid
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_IN_USE If the compute instance is in use. This error would be returned if
+ processes (e.g. CUDA application) are active on the compute instance.*/
+ fn nvmlComputeInstanceDestroy(
+ computeInstance: cuda_types::nvml::nvmlComputeInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instances for given profile ID.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profileId The compute instance profile ID.
+ See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo
+ @param computeInstances Returns pre-exiting compute instances, the buffer must be large enough to
+ accommodate the instances supported by the profile.
+ See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo
+ @param count The count of returned compute instances
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profileId, \a computeInstances or \a count
+ are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlGpuInstanceGetComputeInstances(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profileId: ::core::ffi::c_uint,
+ computeInstances: *mut cuda_types::nvml::nvmlComputeInstance_t,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instance for given instance ID.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param id The compute instance ID
+ @param computeInstance Returns compute instance
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a ID or \a computeInstance are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_NOT_FOUND If the compute instance is not found.*/
+ fn nvmlGpuInstanceGetComputeInstanceById(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ id: ::core::ffi::c_uint,
+ computeInstance: *mut cuda_types::nvml::nvmlComputeInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instance information.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param computeInstance The compute instance handle
+ @param info Return compute instance information
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a computeInstance or \a info are invalid
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlComputeInstanceGetInfo_v2(
+ computeInstance: cuda_types::nvml::nvmlComputeInstance_t,
+ info: *mut cuda_types::nvml::nvmlComputeInstanceInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Test if the given handle refers to a MIG device.
+
+ A MIG device handle is an NVML abstraction which maps to a MIG compute instance.
+ These overloaded references can be used (with some restrictions) interchangeably
+ with a GPU device handle to execute queries at a per-compute instance granularity.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device NVML handle to test
+ @param isMigDevice True when handle refers to a MIG device
+
+ @return
+ - \ref NVML_SUCCESS if \a device status was successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device handle or \a isMigDevice reference is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this check is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceIsMigDeviceHandle(
+ device: cuda_types::nvml::nvmlDevice_t,
+ isMigDevice: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instance ID for the given MIG device handle.
+
+ GPU instance IDs are unique per device and remain valid until the GPU instance is destroyed.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device Target MIG device handle
+ @param id GPU instance ID
+
+ @return
+ - \ref NVML_SUCCESS if instance ID was successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a id reference is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGpuInstanceId(
+ device: cuda_types::nvml::nvmlDevice_t,
+ id: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instance ID for the given MIG device handle.
+
+ Compute instance IDs are unique per GPU instance and remain valid until the compute instance
+ is destroyed.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device Target MIG device handle
+ @param id Compute instance ID
+
+ @return
+ - \ref NVML_SUCCESS if instance ID was successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a id reference is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetComputeInstanceId(
+ device: cuda_types::nvml::nvmlDevice_t,
+ id: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the maximum number of MIG devices that can exist under a given parent NVML device.
+
+ Returns zero if MIG is not supported or enabled.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device Target device handle
+ @param count Count of MIG devices
+
+ @return
+ - \ref NVML_SUCCESS if \a count was successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a count reference is invalid
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMaxMigDeviceCount(
+ device: cuda_types::nvml::nvmlDevice_t,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get MIG device handle for the given index under its parent NVML device.
+
+ If the compute instance is destroyed either explicitly or by destroying,
+ resetting or unbinding the parent GPU instance or the GPU device itself
+ the MIG device handle would remain invalid and must be requested again
+ using this API. Handles may be reused and their properties can change in
+ the process.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device Reference to the parent GPU device handle
+ @param index Index of the MIG device
+ @param migDevice Reference to the MIG device handle
+
+ @return
+ - \ref NVML_SUCCESS if \a migDevice handle was successfully created
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a index or \a migDevice reference is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_NOT_FOUND if no valid MIG device was found at \a index
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMigDeviceHandleByIndex(
+ device: cuda_types::nvml::nvmlDevice_t,
+ index: ::core::ffi::c_uint,
+ migDevice: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get parent device handle from a MIG device handle.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param migDevice MIG device handle
+ @param device Device handle
+
+ @return
+ - \ref NVML_SUCCESS if \a device handle was successfully created
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a migDevice or \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetDeviceHandleFromMigDeviceHandle(
+ migDevice: cuda_types::nvml::nvmlDevice_t,
+ device: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Calculate GPM metrics from two samples.
+
+ For Hopper &tm; or newer fully supported devices.
+
+ @param metricsGet IN/OUT: populated \a nvmlGpmMetricsGet_t struct
+
+ @return
+ - \ref NVML_SUCCESS on success
+ - Nonzero NVML_ERROR_? enum on error*/
+ fn nvmlGpmMetricsGet(
+ metricsGet: *mut cuda_types::nvml::nvmlGpmMetricsGet_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Free an allocated sample buffer that was allocated with \ref nvmlGpmSampleAlloc()
+
+ For Hopper &tm; or newer fully supported devices.
+
+ @param gpmSample Sample to free
+
+ @return
+ - \ref NVML_SUCCESS on success
+ - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid pointer is provided*/
+ fn nvmlGpmSampleFree(
+ gpmSample: cuda_types::nvml::nvmlGpmSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Allocate a sample buffer to be used with NVML GPM . You will need to allocate
+ at least two of these buffers to use with the NVML GPM feature
+
+ For Hopper &tm; or newer fully supported devices.
+
+ @param gpmSample Where the allocated sample will be stored
+
+ @return
+ - \ref NVML_SUCCESS on success
+ - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid pointer is provided
+ - \ref NVML_ERROR_MEMORY if system memory is insufficient*/
+ fn nvmlGpmSampleAlloc(
+ gpmSample: *mut cuda_types::nvml::nvmlGpmSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Read a sample of GPM metrics into the provided \a gpmSample buffer. After
+ two samples are gathered, you can call nvmlGpmMetricGet on those samples to
+ retrive metrics
+
+ For Hopper &tm; or newer fully supported devices.
+
+ @param device Device to get samples for
+ @param gpmSample Buffer to read samples into
+
+ @return
+ - \ref NVML_SUCCESS on success
+ - Nonzero NVML_ERROR_? enum on error*/
+ fn nvmlGpmSampleGet(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpmSample: cuda_types::nvml::nvmlGpmSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Read a sample of GPM metrics into the provided \a gpmSample buffer for a MIG GPU Instance.
+
+ After two samples are gathered, you can call nvmlGpmMetricGet on those
+ samples to retrive metrics
+
+ For Hopper &tm; or newer fully supported devices.
+
+ @param device Device to get samples for
+ @param gpuInstanceId MIG GPU Instance ID
+ @param gpmSample Buffer to read samples into
+
+ @return
+ - \ref NVML_SUCCESS on success
+ - Nonzero NVML_ERROR_? enum on error*/
+ fn nvmlGpmMigSampleGet(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpuInstanceId: ::core::ffi::c_uint,
+ gpmSample: cuda_types::nvml::nvmlGpmSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Indicate whether the supplied device supports GPM
+
+ @param device NVML device to query for
+ @param gpmSupport Structure to indicate GPM support \a nvmlGpmSupport_t. Indicates
+ GPM support per system for the supplied device
+
+ @return
+ - NVML_SUCCESS on success
+ - Nonzero NVML_ERROR_? enum if there is an error in processing the query*/
+ fn nvmlGpmQueryDeviceSupport(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpmSupport: *mut cuda_types::nvml::nvmlGpmSupport_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPM stream state.
+
+ %HOPPER_OR_NEWER%
+ Supported on Linux, Windows TCC.
+
+ @param device The identifier of the target device
+ @param state Returns GPM stream state
+ NVML_FEATURE_DISABLED or NVML_FEATURE_ENABLED
+
+ @return
+ - \ref NVML_SUCCESS if \a current GPM stream state were successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a state is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlGpmQueryIfStreamingEnabled(
+ device: cuda_types::nvml::nvmlDevice_t,
+ state: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set GPM stream state.
+
+ %HOPPER_OR_NEWER%
+ Supported on Linux, Windows TCC.
+
+ @param device The identifier of the target device
+ @param state GPM stream state,
+ NVML_FEATURE_DISABLED or NVML_FEATURE_ENABLED
+
+ @return
+ - \ref NVML_SUCCESS if \a current GPM stream state is successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlGpmSetStreamingEnabled(
+ device: cuda_types::nvml::nvmlDevice_t,
+ state: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set NvLink Low Power Threshold for device.
+
+ %HOPPER_OR_NEWER%
+
+ @param device The identifier of the target device
+ @param info Reference to \a nvmlNvLinkPowerThres_t struct
+ input parameters
+
+ @return
+ - \ref NVML_SUCCESS if the \a Threshold is successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a Threshold is not within range
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+*/
+ fn nvmlDeviceSetNvLinkDeviceLowPowerThreshold(
+ device: cuda_types::nvml::nvmlDevice_t,
+ info: *mut cuda_types::nvml::nvmlNvLinkPowerThres_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the global nvlink bandwith mode
+
+ @param nvlinkBwMode nvlink bandwidth mode
+ @return
+ - \ref NVML_SUCCESS on success
+ - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid argument is provided
+ - \ref NVML_ERROR_IN_USE if P2P object exists
+ - \ref NVML_ERROR_NOT_SUPPORTED if GPU is not Hopper or newer architecture.
+ - \ref NVML_ERROR_NO_PERMISSION if not root user*/
+ fn nvmlSystemSetNvlinkBwMode(
+ nvlinkBwMode: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the global nvlink bandwith mode
+
+ @param nvlinkBwMode reference of nvlink bandwidth mode
+ @return
+ - \ref NVML_SUCCESS on success
+ - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid pointer is provided
+ - \ref NVML_ERROR_NOT_SUPPORTED if GPU is not Hopper or newer architecture.
+ - \ref NVML_ERROR_NO_PERMISSION if not root user*/
+ fn nvmlSystemGetNvlinkBwMode(
+ nvlinkBwMode: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set new power limit of this device.
+
+ For Kepler &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values.
+
+ See \ref nvmlPowerValue_v2_t for more information on the struct.
+
+ \note Limit is not persistent across reboots or driver unloads.
+ Enable persistent mode to prevent driver from unloading when no application is using the device.
+
+ This API replaces nvmlDeviceSetPowerManagementLimit. It can be used as a drop-in replacement for the older version.
+
+ @param device The identifier of the target device
+ @param powerValue Power management limit in milliwatts to set
+
+ @return
+ - \ref NVML_SUCCESS if \a limit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a powerValue is NULL or contains invalid values
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see NVML_FI_DEV_POWER_AVERAGE
+ @see NVML_FI_DEV_POWER_INSTANT
+ @see NVML_FI_DEV_POWER_MIN_LIMIT
+ @see NVML_FI_DEV_POWER_MAX_LIMIT
+ @see NVML_FI_DEV_POWER_CURRENT_LIMIT*/
+ fn nvmlDeviceSetPowerManagementLimit_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ powerValue: *mut cuda_types::nvml::nvmlPowerValue_v2_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get SRAM ECC error status of this device.
+
+ For Ampere &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ See \ref nvmlEccSramErrorStatus_v1_t for more information on the struct.
+
+ @param device The identifier of the target device
+ @param status Returns SRAM ECC error status
+
+ @return
+ - \ref NVML_SUCCESS if \a limit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counters is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_VERSION_MISMATCH if the version of \a nvmlEccSramErrorStatus_t is invalid
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetSramEccErrorStatus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ status: *mut cuda_types::nvml::nvmlEccSramErrorStatus_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+}
diff --git a/cuda_types/src/cuda.rs b/cuda_types/src/cuda.rs
new file mode 100644
index 0000000..2c2716a
--- /dev/null
+++ b/cuda_types/src/cuda.rs
@@ -0,0 +1,8110 @@
+// Generated automatically by zluda_bindgen
+// DO NOT EDIT MANUALLY
+#![allow(warnings)]
+pub const CUDA_VERSION: u32 = 12040;
+pub const CU_IPC_HANDLE_SIZE: u32 = 64;
+pub const CU_COMPUTE_ACCELERATED_TARGET_BASE: u32 = 65536;
+pub const CU_GRAPH_COND_ASSIGN_DEFAULT: u32 = 1;
+pub const CU_GRAPH_KERNEL_NODE_PORT_DEFAULT: u32 = 0;
+pub const CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC: u32 = 1;
+pub const CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER: u32 = 2;
+pub const CU_MEMHOSTALLOC_PORTABLE: u32 = 1;
+pub const CU_MEMHOSTALLOC_DEVICEMAP: u32 = 2;
+pub const CU_MEMHOSTALLOC_WRITECOMBINED: u32 = 4;
+pub const CU_MEMHOSTREGISTER_PORTABLE: u32 = 1;
+pub const CU_MEMHOSTREGISTER_DEVICEMAP: u32 = 2;
+pub const CU_MEMHOSTREGISTER_IOMEMORY: u32 = 4;
+pub const CU_MEMHOSTREGISTER_READ_ONLY: u32 = 8;
+pub const CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL: u32 = 1;
+pub const CU_TENSOR_MAP_NUM_QWORDS: u32 = 16;
+pub const CUDA_EXTERNAL_MEMORY_DEDICATED: u32 = 1;
+pub const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC: u32 = 1;
+pub const CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC: u32 = 2;
+pub const CUDA_NVSCISYNC_ATTR_SIGNAL: u32 = 1;
+pub const CUDA_NVSCISYNC_ATTR_WAIT: u32 = 2;
+pub const CU_MEM_CREATE_USAGE_TILE_POOL: u32 = 1;
+pub const CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC: u32 = 1;
+pub const CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC: u32 = 2;
+pub const CUDA_ARRAY3D_LAYERED: u32 = 1;
+pub const CUDA_ARRAY3D_2DARRAY: u32 = 1;
+pub const CUDA_ARRAY3D_SURFACE_LDST: u32 = 2;
+pub const CUDA_ARRAY3D_CUBEMAP: u32 = 4;
+pub const CUDA_ARRAY3D_TEXTURE_GATHER: u32 = 8;
+pub const CUDA_ARRAY3D_DEPTH_TEXTURE: u32 = 16;
+pub const CUDA_ARRAY3D_COLOR_ATTACHMENT: u32 = 32;
+pub const CUDA_ARRAY3D_SPARSE: u32 = 64;
+pub const CUDA_ARRAY3D_DEFERRED_MAPPING: u32 = 128;
+pub const CU_TRSA_OVERRIDE_FORMAT: u32 = 1;
+pub const CU_TRSF_READ_AS_INTEGER: u32 = 1;
+pub const CU_TRSF_NORMALIZED_COORDINATES: u32 = 2;
+pub const CU_TRSF_SRGB: u32 = 16;
+pub const CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION: u32 = 32;
+pub const CU_TRSF_SEAMLESS_CUBEMAP: u32 = 64;
+pub const CU_LAUNCH_PARAM_END_AS_INT: u32 = 0;
+pub const CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT: u32 = 1;
+pub const CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT: u32 = 2;
+pub const CU_PARAM_TR_DEFAULT: i32 = -1;
+pub const CUDA_EGL_INFINITE_TIMEOUT: u32 = 4294967295;
+pub type cuuint32_t = u32;
+pub type cuuint64_t = u64;
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUdeviceptr_v2(pub *mut ::core::ffi::c_void);
+pub type CUdeviceptr = CUdeviceptr_v2;
+pub type CUdevice_v1 = ::core::ffi::c_int;
+pub type CUdevice = CUdevice_v1;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUctx_st {
+ _unused: [u8; 0],
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUcontext(pub *mut CUctx_st);
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUmod_st {
+ _unused: [u8; 0],
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmodule(pub *mut CUmod_st);
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUfunc_st {
+ _unused: [u8; 0],
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUfunction(pub *mut CUfunc_st);
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUlib_st {
+ _unused: [u8; 0],
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUlibrary(pub *mut CUlib_st);
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUkern_st {
+ _unused: [u8; 0],
+}
+pub type CUkernel = *mut CUkern_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUarray_st {
+ _unused: [u8; 0],
+}
+pub type CUarray = *mut CUarray_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUmipmappedArray_st {
+ _unused: [u8; 0],
+}
+pub type CUmipmappedArray = *mut CUmipmappedArray_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUtexref_st {
+ _unused: [u8; 0],
+}
+pub type CUtexref = *mut CUtexref_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUsurfref_st {
+ _unused: [u8; 0],
+}
+pub type CUsurfref = *mut CUsurfref_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUevent_st {
+ _unused: [u8; 0],
+}
+pub type CUevent = *mut CUevent_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUstream_st {
+ _unused: [u8; 0],
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUstream(pub *mut CUstream_st);
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUgraphicsResource_st {
+ _unused: [u8; 0],
+}
+pub type CUgraphicsResource = *mut CUgraphicsResource_st;
+pub type CUtexObject_v1 = ::core::ffi::c_ulonglong;
+pub type CUtexObject = CUtexObject_v1;
+pub type CUsurfObject_v1 = ::core::ffi::c_ulonglong;
+pub type CUsurfObject = CUsurfObject_v1;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUextMemory_st {
+ _unused: [u8; 0],
+}
+pub type CUexternalMemory = *mut CUextMemory_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUextSemaphore_st {
+ _unused: [u8; 0],
+}
+pub type CUexternalSemaphore = *mut CUextSemaphore_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUgraph_st {
+ _unused: [u8; 0],
+}
+pub type CUgraph = *mut CUgraph_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUgraphNode_st {
+ _unused: [u8; 0],
+}
+pub type CUgraphNode = *mut CUgraphNode_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUgraphExec_st {
+ _unused: [u8; 0],
+}
+pub type CUgraphExec = *mut CUgraphExec_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUmemPoolHandle_st {
+ _unused: [u8; 0],
+}
+pub type CUmemoryPool = *mut CUmemPoolHandle_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUuserObject_st {
+ _unused: [u8; 0],
+}
+pub type CUuserObject = *mut CUuserObject_st;
+pub type CUgraphConditionalHandle = cuuint64_t;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUgraphDeviceUpdatableNode_st {
+ _unused: [u8; 0],
+}
+pub type CUgraphDeviceNode = *mut CUgraphDeviceUpdatableNode_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUasyncCallbackEntry_st {
+ _unused: [u8; 0],
+}
+pub type CUasyncCallbackHandle = *mut CUasyncCallbackEntry_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUuuid_st {
+ pub bytes: [::core::ffi::c_uchar; 16usize],
+}
+pub type CUuuid = CUuuid_st;
+/** Fabric handle - An opaque handle representing a memory allocation
+ that can be exported to processes in same or different nodes. For IPC
+ between processes on different nodes they must be connected via the
+ NVSwitch fabric.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemFabricHandle_st {
+ pub data: [::core::ffi::c_uchar; 64usize],
+}
+/** Fabric handle - An opaque handle representing a memory allocation
+ that can be exported to processes in same or different nodes. For IPC
+ between processes on different nodes they must be connected via the
+ NVSwitch fabric.*/
+pub type CUmemFabricHandle_v1 = CUmemFabricHandle_st;
+/** Fabric handle - An opaque handle representing a memory allocation
+ that can be exported to processes in same or different nodes. For IPC
+ between processes on different nodes they must be connected via the
+ NVSwitch fabric.*/
+pub type CUmemFabricHandle = CUmemFabricHandle_v1;
+/// CUDA IPC event handle
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUipcEventHandle_st {
+ pub reserved: [::core::ffi::c_char; 64usize],
+}
+/// CUDA IPC event handle
+pub type CUipcEventHandle_v1 = CUipcEventHandle_st;
+/// CUDA IPC event handle
+pub type CUipcEventHandle = CUipcEventHandle_v1;
+/// CUDA IPC mem handle
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUipcMemHandle_st {
+ pub reserved: [::core::ffi::c_char; 64usize],
+}
+/// CUDA IPC mem handle
+pub type CUipcMemHandle_v1 = CUipcMemHandle_st;
+/// CUDA IPC mem handle
+pub type CUipcMemHandle = CUipcMemHandle_v1;
+impl CUipcMem_flags_enum {
+ ///< Automatically enable peer access between remote devices as needed
+ pub const CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS: CUipcMem_flags_enum = CUipcMem_flags_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// CUDA Ipc Mem Flags
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUipcMem_flags_enum(pub ::core::ffi::c_uint);
+/// CUDA Ipc Mem Flags
+pub use self::CUipcMem_flags_enum as CUipcMem_flags;
+impl CUmemAttach_flags_enum {
+ ///< Memory can be accessed by any stream on any device
+ pub const CU_MEM_ATTACH_GLOBAL: CUmemAttach_flags_enum = CUmemAttach_flags_enum(1);
+}
+impl CUmemAttach_flags_enum {
+ ///< Memory cannot be accessed by any stream on any device
+ pub const CU_MEM_ATTACH_HOST: CUmemAttach_flags_enum = CUmemAttach_flags_enum(2);
+}
+impl CUmemAttach_flags_enum {
+ ///< Memory can only be accessed by a single stream on the associated device
+ pub const CU_MEM_ATTACH_SINGLE: CUmemAttach_flags_enum = CUmemAttach_flags_enum(4);
+}
+#[repr(transparent)]
+/// CUDA Mem Attach Flags
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemAttach_flags_enum(pub ::core::ffi::c_uint);
+/// CUDA Mem Attach Flags
+pub use self::CUmemAttach_flags_enum as CUmemAttach_flags;
+impl CUctx_flags_enum {
+ ///< Automatic scheduling
+ pub const CU_CTX_SCHED_AUTO: CUctx_flags_enum = CUctx_flags_enum(0);
+}
+impl CUctx_flags_enum {
+ ///< Set spin as default scheduling
+ pub const CU_CTX_SCHED_SPIN: CUctx_flags_enum = CUctx_flags_enum(1);
+}
+impl CUctx_flags_enum {
+ ///< Set yield as default scheduling
+ pub const CU_CTX_SCHED_YIELD: CUctx_flags_enum = CUctx_flags_enum(2);
+}
+impl CUctx_flags_enum {
+ ///< Set blocking synchronization as default scheduling
+ pub const CU_CTX_SCHED_BLOCKING_SYNC: CUctx_flags_enum = CUctx_flags_enum(4);
+}
+impl CUctx_flags_enum {
+ /**< Set blocking synchronization as default scheduling
+ \deprecated This flag was deprecated as of CUDA 4.0
+ and was replaced with ::CU_CTX_SCHED_BLOCKING_SYNC.*/
+ pub const CU_CTX_BLOCKING_SYNC: CUctx_flags_enum = CUctx_flags_enum(4);
+}
+impl CUctx_flags_enum {
+ pub const CU_CTX_SCHED_MASK: CUctx_flags_enum = CUctx_flags_enum(7);
+}
+impl CUctx_flags_enum {
+ /**< \deprecated This flag was deprecated as of CUDA 11.0
+ and it no longer has any effect. All contexts
+ as of CUDA 3.2 behave as though the flag is enabled.*/
+ pub const CU_CTX_MAP_HOST: CUctx_flags_enum = CUctx_flags_enum(8);
+}
+impl CUctx_flags_enum {
+ ///< Keep local memory allocation after launch
+ pub const CU_CTX_LMEM_RESIZE_TO_MAX: CUctx_flags_enum = CUctx_flags_enum(16);
+}
+impl CUctx_flags_enum {
+ ///< Trigger coredumps from exceptions in this context
+ pub const CU_CTX_COREDUMP_ENABLE: CUctx_flags_enum = CUctx_flags_enum(32);
+}
+impl CUctx_flags_enum {
+ ///< Enable user pipe to trigger coredumps in this context
+ pub const CU_CTX_USER_COREDUMP_ENABLE: CUctx_flags_enum = CUctx_flags_enum(64);
+}
+impl CUctx_flags_enum {
+ ///< Ensure synchronous memory operations on this context will synchronize
+ pub const CU_CTX_SYNC_MEMOPS: CUctx_flags_enum = CUctx_flags_enum(128);
+}
+impl CUctx_flags_enum {
+ pub const CU_CTX_FLAGS_MASK: CUctx_flags_enum = CUctx_flags_enum(255);
+}
+#[repr(transparent)]
+/// Context creation flags
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUctx_flags_enum(pub ::core::ffi::c_uint);
+/// Context creation flags
+pub use self::CUctx_flags_enum as CUctx_flags;
+impl CUevent_sched_flags_enum {
+ ///< Automatic scheduling
+ pub const CU_EVENT_SCHED_AUTO: CUevent_sched_flags_enum = CUevent_sched_flags_enum(
+ 0,
+ );
+}
+impl CUevent_sched_flags_enum {
+ ///< Set spin as default scheduling
+ pub const CU_EVENT_SCHED_SPIN: CUevent_sched_flags_enum = CUevent_sched_flags_enum(
+ 1,
+ );
+}
+impl CUevent_sched_flags_enum {
+ ///< Set yield as default scheduling
+ pub const CU_EVENT_SCHED_YIELD: CUevent_sched_flags_enum = CUevent_sched_flags_enum(
+ 2,
+ );
+}
+impl CUevent_sched_flags_enum {
+ ///< Set blocking synchronization as default scheduling
+ pub const CU_EVENT_SCHED_BLOCKING_SYNC: CUevent_sched_flags_enum = CUevent_sched_flags_enum(
+ 4,
+ );
+}
+#[repr(transparent)]
+/// Event sched flags
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUevent_sched_flags_enum(pub ::core::ffi::c_uint);
+/// Event sched flags
+pub use self::CUevent_sched_flags_enum as CUevent_sched_flags;
+impl CUstream_flags_enum {
+ ///< Default stream flag
+ pub const CU_STREAM_DEFAULT: CUstream_flags_enum = CUstream_flags_enum(0);
+}
+impl CUstream_flags_enum {
+ ///< Stream does not synchronize with stream 0 (the NULL stream)
+ pub const CU_STREAM_NON_BLOCKING: CUstream_flags_enum = CUstream_flags_enum(1);
+}
+#[repr(transparent)]
+/// Stream creation flags
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUstream_flags_enum(pub ::core::ffi::c_uint);
+/// Stream creation flags
+pub use self::CUstream_flags_enum as CUstream_flags;
+impl CUevent_flags_enum {
+ ///< Default event flag
+ pub const CU_EVENT_DEFAULT: CUevent_flags_enum = CUevent_flags_enum(0);
+}
+impl CUevent_flags_enum {
+ ///< Event uses blocking synchronization
+ pub const CU_EVENT_BLOCKING_SYNC: CUevent_flags_enum = CUevent_flags_enum(1);
+}
+impl CUevent_flags_enum {
+ ///< Event will not record timing data
+ pub const CU_EVENT_DISABLE_TIMING: CUevent_flags_enum = CUevent_flags_enum(2);
+}
+impl CUevent_flags_enum {
+ ///< Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set
+ pub const CU_EVENT_INTERPROCESS: CUevent_flags_enum = CUevent_flags_enum(4);
+}
+#[repr(transparent)]
+/// Event creation flags
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUevent_flags_enum(pub ::core::ffi::c_uint);
+/// Event creation flags
+pub use self::CUevent_flags_enum as CUevent_flags;
+impl CUevent_record_flags_enum {
+ ///< Default event record flag
+ pub const CU_EVENT_RECORD_DEFAULT: CUevent_record_flags_enum = CUevent_record_flags_enum(
+ 0,
+ );
+}
+impl CUevent_record_flags_enum {
+ /**< When using stream capture, create an event record node
+ instead of the default behavior. This flag is invalid
+ when used outside of capture.*/
+ pub const CU_EVENT_RECORD_EXTERNAL: CUevent_record_flags_enum = CUevent_record_flags_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Event record flags
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUevent_record_flags_enum(pub ::core::ffi::c_uint);
+/// Event record flags
+pub use self::CUevent_record_flags_enum as CUevent_record_flags;
+impl CUevent_wait_flags_enum {
+ ///< Default event wait flag
+ pub const CU_EVENT_WAIT_DEFAULT: CUevent_wait_flags_enum = CUevent_wait_flags_enum(
+ 0,
+ );
+}
+impl CUevent_wait_flags_enum {
+ /**< When using stream capture, create an event wait node
+ instead of the default behavior. This flag is invalid
+ when used outside of capture.*/
+ pub const CU_EVENT_WAIT_EXTERNAL: CUevent_wait_flags_enum = CUevent_wait_flags_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Event wait flags
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUevent_wait_flags_enum(pub ::core::ffi::c_uint);
+/// Event wait flags
+pub use self::CUevent_wait_flags_enum as CUevent_wait_flags;
+impl CUstreamWaitValue_flags_enum {
+ /**< Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit
+values). Note this is a cyclic comparison which ignores wraparound.
+(Default behavior.)*/
+ pub const CU_STREAM_WAIT_VALUE_GEQ: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum(
+ 0,
+ );
+}
+impl CUstreamWaitValue_flags_enum {
+ ///< Wait until *addr == value.
+ pub const CU_STREAM_WAIT_VALUE_EQ: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum(
+ 1,
+ );
+}
+impl CUstreamWaitValue_flags_enum {
+ ///< Wait until (*addr & value) != 0.
+ pub const CU_STREAM_WAIT_VALUE_AND: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum(
+ 2,
+ );
+}
+impl CUstreamWaitValue_flags_enum {
+ /**< Wait until ~(*addr | value) != 0. Support for this operation can be
+queried with ::cuDeviceGetAttribute() and
+::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR.*/
+ pub const CU_STREAM_WAIT_VALUE_NOR: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum(
+ 3,
+ );
+}
+impl CUstreamWaitValue_flags_enum {
+ /**< Follow the wait operation with a flush of outstanding remote writes. This
+means that, if a remote write operation is guaranteed to have reached the
+device before the wait can be satisfied, that write is guaranteed to be
+visible to downstream device work. The device is permitted to reorder
+remote writes internally. For example, this flag would be required if
+two remote writes arrive in a defined order, the wait is satisfied by the
+second write, and downstream work needs to observe the first write.
+Support for this operation is restricted to selected platforms and can be
+queried with ::CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES.*/
+ pub const CU_STREAM_WAIT_VALUE_FLUSH: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum(
+ 1073741824,
+ );
+}
+#[repr(transparent)]
+/// Flags for ::cuStreamWaitValue32 and ::cuStreamWaitValue64
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUstreamWaitValue_flags_enum(pub ::core::ffi::c_uint);
+/// Flags for ::cuStreamWaitValue32 and ::cuStreamWaitValue64
+pub use self::CUstreamWaitValue_flags_enum as CUstreamWaitValue_flags;
+impl CUstreamWriteValue_flags_enum {
+ ///< Default behavior
+ pub const CU_STREAM_WRITE_VALUE_DEFAULT: CUstreamWriteValue_flags_enum = CUstreamWriteValue_flags_enum(
+ 0,
+ );
+}
+impl CUstreamWriteValue_flags_enum {
+ /**< Permits the write to be reordered with writes which were issued
+before it, as a performance optimization. Normally,
+::cuStreamWriteValue32 will provide a memory fence before the
+write, which has similar semantics to
+__threadfence_system() but is scoped to the stream
+rather than a CUDA thread.
+This flag is not supported in the v2 API.*/
+ pub const CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER: CUstreamWriteValue_flags_enum = CUstreamWriteValue_flags_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Flags for ::cuStreamWriteValue32
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUstreamWriteValue_flags_enum(pub ::core::ffi::c_uint);
+/// Flags for ::cuStreamWriteValue32
+pub use self::CUstreamWriteValue_flags_enum as CUstreamWriteValue_flags;
+impl CUstreamBatchMemOpType_enum {
+ ///< Represents a ::cuStreamWaitValue32 operation
+ pub const CU_STREAM_MEM_OP_WAIT_VALUE_32: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum(
+ 1,
+ );
+}
+impl CUstreamBatchMemOpType_enum {
+ ///< Represents a ::cuStreamWriteValue32 operation
+ pub const CU_STREAM_MEM_OP_WRITE_VALUE_32: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum(
+ 2,
+ );
+}
+impl CUstreamBatchMemOpType_enum {
+ ///< Represents a ::cuStreamWaitValue64 operation
+ pub const CU_STREAM_MEM_OP_WAIT_VALUE_64: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum(
+ 4,
+ );
+}
+impl CUstreamBatchMemOpType_enum {
+ ///< Represents a ::cuStreamWriteValue64 operation
+ pub const CU_STREAM_MEM_OP_WRITE_VALUE_64: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum(
+ 5,
+ );
+}
+impl CUstreamBatchMemOpType_enum {
+ ///< Insert a memory barrier of the specified type
+ pub const CU_STREAM_MEM_OP_BARRIER: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum(
+ 6,
+ );
+}
+impl CUstreamBatchMemOpType_enum {
+ /**< This has the same effect as ::CU_STREAM_WAIT_VALUE_FLUSH, but as a
+standalone operation.*/
+ pub const CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum(
+ 3,
+ );
+}
+#[repr(transparent)]
+/// Operations for ::cuStreamBatchMemOp
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUstreamBatchMemOpType_enum(pub ::core::ffi::c_uint);
+/// Operations for ::cuStreamBatchMemOp
+pub use self::CUstreamBatchMemOpType_enum as CUstreamBatchMemOpType;
+impl CUstreamMemoryBarrier_flags_enum {
+ ///< System-wide memory barrier.
+ pub const CU_STREAM_MEMORY_BARRIER_TYPE_SYS: CUstreamMemoryBarrier_flags_enum = CUstreamMemoryBarrier_flags_enum(
+ 0,
+ );
+}
+impl CUstreamMemoryBarrier_flags_enum {
+ ///< Limit memory barrier scope to the GPU.
+ pub const CU_STREAM_MEMORY_BARRIER_TYPE_GPU: CUstreamMemoryBarrier_flags_enum = CUstreamMemoryBarrier_flags_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Flags for ::cuStreamMemoryBarrier
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUstreamMemoryBarrier_flags_enum(pub ::core::ffi::c_uint);
+/// Flags for ::cuStreamMemoryBarrier
+pub use self::CUstreamMemoryBarrier_flags_enum as CUstreamMemoryBarrier_flags;
+/// Per-operation parameters for ::cuStreamBatchMemOp
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUstreamBatchMemOpParams_union {
+ pub operation: CUstreamBatchMemOpType,
+ pub waitValue: CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st,
+ pub writeValue: CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st,
+ pub flushRemoteWrites: CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st,
+ pub memoryBarrier: CUstreamBatchMemOpParams_union_CUstreamMemOpMemoryBarrierParams_st,
+ pub pad: [cuuint64_t; 6usize],
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st {
+ pub operation: CUstreamBatchMemOpType,
+ pub address: CUdeviceptr,
+ pub __bindgen_anon_1: CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1,
+ pub flags: ::core::ffi::c_uint,
+ ///< For driver internal use. Initial value is unimportant.
+ pub alias: CUdeviceptr,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1 {
+ pub value: cuuint32_t,
+ pub value64: cuuint64_t,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st {
+ pub operation: CUstreamBatchMemOpType,
+ pub address: CUdeviceptr,
+ pub __bindgen_anon_1: CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1,
+ pub flags: ::core::ffi::c_uint,
+ ///< For driver internal use. Initial value is unimportant.
+ pub alias: CUdeviceptr,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1 {
+ pub value: cuuint32_t,
+ pub value64: cuuint64_t,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st {
+ pub operation: CUstreamBatchMemOpType,
+ pub flags: ::core::ffi::c_uint,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpMemoryBarrierParams_st {
+ pub operation: CUstreamBatchMemOpType,
+ pub flags: ::core::ffi::c_uint,
+}
+/// Per-operation parameters for ::cuStreamBatchMemOp
+pub type CUstreamBatchMemOpParams_v1 = CUstreamBatchMemOpParams_union;
+/// Per-operation parameters for ::cuStreamBatchMemOp
+pub type CUstreamBatchMemOpParams = CUstreamBatchMemOpParams_v1;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st {
+ pub ctx: CUcontext,
+ pub count: ::core::ffi::c_uint,
+ pub paramArray: *mut CUstreamBatchMemOpParams,
+ pub flags: ::core::ffi::c_uint,
+}
+pub type CUDA_BATCH_MEM_OP_NODE_PARAMS_v1 = CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st;
+pub type CUDA_BATCH_MEM_OP_NODE_PARAMS = CUDA_BATCH_MEM_OP_NODE_PARAMS_v1;
+/// Batch memory operation node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st {
+ ///< Context to use for the operations.
+ pub ctx: CUcontext,
+ ///< Number of operations in paramArray.
+ pub count: ::core::ffi::c_uint,
+ ///< Array of batch memory operations.
+ pub paramArray: *mut CUstreamBatchMemOpParams,
+ ///< Flags to control the node.
+ pub flags: ::core::ffi::c_uint,
+}
+/// Batch memory operation node parameters
+pub type CUDA_BATCH_MEM_OP_NODE_PARAMS_v2 = CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st;
+impl CUoccupancy_flags_enum {
+ ///< Default behavior
+ pub const CU_OCCUPANCY_DEFAULT: CUoccupancy_flags_enum = CUoccupancy_flags_enum(0);
+}
+impl CUoccupancy_flags_enum {
+ ///< Assume global caching is enabled and cannot be automatically turned off
+ pub const CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE: CUoccupancy_flags_enum = CUoccupancy_flags_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Occupancy calculator flag
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUoccupancy_flags_enum(pub ::core::ffi::c_uint);
+/// Occupancy calculator flag
+pub use self::CUoccupancy_flags_enum as CUoccupancy_flags;
+impl CUstreamUpdateCaptureDependencies_flags_enum {
+ ///< Add new nodes to the dependency set
+ pub const CU_STREAM_ADD_CAPTURE_DEPENDENCIES: CUstreamUpdateCaptureDependencies_flags_enum = CUstreamUpdateCaptureDependencies_flags_enum(
+ 0,
+ );
+}
+impl CUstreamUpdateCaptureDependencies_flags_enum {
+ ///< Replace the dependency set with the new nodes
+ pub const CU_STREAM_SET_CAPTURE_DEPENDENCIES: CUstreamUpdateCaptureDependencies_flags_enum = CUstreamUpdateCaptureDependencies_flags_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Flags for ::cuStreamUpdateCaptureDependencies
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUstreamUpdateCaptureDependencies_flags_enum(pub ::core::ffi::c_uint);
+/// Flags for ::cuStreamUpdateCaptureDependencies
+pub use self::CUstreamUpdateCaptureDependencies_flags_enum as CUstreamUpdateCaptureDependencies_flags;
+impl CUasyncNotificationType_enum {
+ pub const CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET: CUasyncNotificationType_enum = CUasyncNotificationType_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Types of async notification that can be sent
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUasyncNotificationType_enum(pub ::core::ffi::c_uint);
+/// Types of async notification that can be sent
+pub use self::CUasyncNotificationType_enum as CUasyncNotificationType;
+/// Information passed to the user via the async notification callback
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUasyncNotificationInfo_st {
+ pub type_: CUasyncNotificationType,
+ pub info: CUasyncNotificationInfo_st__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUasyncNotificationInfo_st__bindgen_ty_1 {
+ pub overBudget: CUasyncNotificationInfo_st__bindgen_ty_1__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUasyncNotificationInfo_st__bindgen_ty_1__bindgen_ty_1 {
+ pub bytesOverBudget: ::core::ffi::c_ulonglong,
+}
+/// Information passed to the user via the async notification callback
+pub type CUasyncNotificationInfo = CUasyncNotificationInfo_st;
+/** CUDA async notification callback
+ \param info Information describing what actions to take as a result of this trim notification.
+ \param userData Pointer to user defined data provided at registration.
+ \param callback The callback handle associated with this specific callback.*/
+pub type CUasyncCallback = ::core::option::Option<
+ unsafe extern "system" fn(
+ info: *mut CUasyncNotificationInfo,
+ userData: *mut ::core::ffi::c_void,
+ callback: CUasyncCallbackHandle,
+ ),
+>;
+impl CUarray_format_enum {
+ ///< Unsigned 8-bit integers
+ pub const CU_AD_FORMAT_UNSIGNED_INT8: CUarray_format_enum = CUarray_format_enum(1);
+}
+impl CUarray_format_enum {
+ ///< Unsigned 16-bit integers
+ pub const CU_AD_FORMAT_UNSIGNED_INT16: CUarray_format_enum = CUarray_format_enum(2);
+}
+impl CUarray_format_enum {
+ ///< Unsigned 32-bit integers
+ pub const CU_AD_FORMAT_UNSIGNED_INT32: CUarray_format_enum = CUarray_format_enum(3);
+}
+impl CUarray_format_enum {
+ ///< Signed 8-bit integers
+ pub const CU_AD_FORMAT_SIGNED_INT8: CUarray_format_enum = CUarray_format_enum(8);
+}
+impl CUarray_format_enum {
+ ///< Signed 16-bit integers
+ pub const CU_AD_FORMAT_SIGNED_INT16: CUarray_format_enum = CUarray_format_enum(9);
+}
+impl CUarray_format_enum {
+ ///< Signed 32-bit integers
+ pub const CU_AD_FORMAT_SIGNED_INT32: CUarray_format_enum = CUarray_format_enum(10);
+}
+impl CUarray_format_enum {
+ ///< 16-bit floating point
+ pub const CU_AD_FORMAT_HALF: CUarray_format_enum = CUarray_format_enum(16);
+}
+impl CUarray_format_enum {
+ ///< 32-bit floating point
+ pub const CU_AD_FORMAT_FLOAT: CUarray_format_enum = CUarray_format_enum(32);
+}
+impl CUarray_format_enum {
+ ///< 8-bit YUV planar format, with 4:2:0 sampling
+ pub const CU_AD_FORMAT_NV12: CUarray_format_enum = CUarray_format_enum(176);
+}
+impl CUarray_format_enum {
+ ///< 1 channel unsigned 8-bit normalized integer
+ pub const CU_AD_FORMAT_UNORM_INT8X1: CUarray_format_enum = CUarray_format_enum(192);
+}
+impl CUarray_format_enum {
+ ///< 2 channel unsigned 8-bit normalized integer
+ pub const CU_AD_FORMAT_UNORM_INT8X2: CUarray_format_enum = CUarray_format_enum(193);
+}
+impl CUarray_format_enum {
+ ///< 4 channel unsigned 8-bit normalized integer
+ pub const CU_AD_FORMAT_UNORM_INT8X4: CUarray_format_enum = CUarray_format_enum(194);
+}
+impl CUarray_format_enum {
+ ///< 1 channel unsigned 16-bit normalized integer
+ pub const CU_AD_FORMAT_UNORM_INT16X1: CUarray_format_enum = CUarray_format_enum(195);
+}
+impl CUarray_format_enum {
+ ///< 2 channel unsigned 16-bit normalized integer
+ pub const CU_AD_FORMAT_UNORM_INT16X2: CUarray_format_enum = CUarray_format_enum(196);
+}
+impl CUarray_format_enum {
+ ///< 4 channel unsigned 16-bit normalized integer
+ pub const CU_AD_FORMAT_UNORM_INT16X4: CUarray_format_enum = CUarray_format_enum(197);
+}
+impl CUarray_format_enum {
+ ///< 1 channel signed 8-bit normalized integer
+ pub const CU_AD_FORMAT_SNORM_INT8X1: CUarray_format_enum = CUarray_format_enum(198);
+}
+impl CUarray_format_enum {
+ ///< 2 channel signed 8-bit normalized integer
+ pub const CU_AD_FORMAT_SNORM_INT8X2: CUarray_format_enum = CUarray_format_enum(199);
+}
+impl CUarray_format_enum {
+ ///< 4 channel signed 8-bit normalized integer
+ pub const CU_AD_FORMAT_SNORM_INT8X4: CUarray_format_enum = CUarray_format_enum(200);
+}
+impl CUarray_format_enum {
+ ///< 1 channel signed 16-bit normalized integer
+ pub const CU_AD_FORMAT_SNORM_INT16X1: CUarray_format_enum = CUarray_format_enum(201);
+}
+impl CUarray_format_enum {
+ ///< 2 channel signed 16-bit normalized integer
+ pub const CU_AD_FORMAT_SNORM_INT16X2: CUarray_format_enum = CUarray_format_enum(202);
+}
+impl CUarray_format_enum {
+ ///< 4 channel signed 16-bit normalized integer
+ pub const CU_AD_FORMAT_SNORM_INT16X4: CUarray_format_enum = CUarray_format_enum(203);
+}
+impl CUarray_format_enum {
+ ///< 4 channel unsigned normalized block-compressed (BC1 compression) format
+ pub const CU_AD_FORMAT_BC1_UNORM: CUarray_format_enum = CUarray_format_enum(145);
+}
+impl CUarray_format_enum {
+ ///< 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding
+ pub const CU_AD_FORMAT_BC1_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum(
+ 146,
+ );
+}
+impl CUarray_format_enum {
+ ///< 4 channel unsigned normalized block-compressed (BC2 compression) format
+ pub const CU_AD_FORMAT_BC2_UNORM: CUarray_format_enum = CUarray_format_enum(147);
+}
+impl CUarray_format_enum {
+ ///< 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding
+ pub const CU_AD_FORMAT_BC2_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum(
+ 148,
+ );
+}
+impl CUarray_format_enum {
+ ///< 4 channel unsigned normalized block-compressed (BC3 compression) format
+ pub const CU_AD_FORMAT_BC3_UNORM: CUarray_format_enum = CUarray_format_enum(149);
+}
+impl CUarray_format_enum {
+ ///< 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding
+ pub const CU_AD_FORMAT_BC3_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum(
+ 150,
+ );
+}
+impl CUarray_format_enum {
+ ///< 1 channel unsigned normalized block-compressed (BC4 compression) format
+ pub const CU_AD_FORMAT_BC4_UNORM: CUarray_format_enum = CUarray_format_enum(151);
+}
+impl CUarray_format_enum {
+ ///< 1 channel signed normalized block-compressed (BC4 compression) format
+ pub const CU_AD_FORMAT_BC4_SNORM: CUarray_format_enum = CUarray_format_enum(152);
+}
+impl CUarray_format_enum {
+ ///< 2 channel unsigned normalized block-compressed (BC5 compression) format
+ pub const CU_AD_FORMAT_BC5_UNORM: CUarray_format_enum = CUarray_format_enum(153);
+}
+impl CUarray_format_enum {
+ ///< 2 channel signed normalized block-compressed (BC5 compression) format
+ pub const CU_AD_FORMAT_BC5_SNORM: CUarray_format_enum = CUarray_format_enum(154);
+}
+impl CUarray_format_enum {
+ ///< 3 channel unsigned half-float block-compressed (BC6H compression) format
+ pub const CU_AD_FORMAT_BC6H_UF16: CUarray_format_enum = CUarray_format_enum(155);
+}
+impl CUarray_format_enum {
+ ///< 3 channel signed half-float block-compressed (BC6H compression) format
+ pub const CU_AD_FORMAT_BC6H_SF16: CUarray_format_enum = CUarray_format_enum(156);
+}
+impl CUarray_format_enum {
+ ///< 4 channel unsigned normalized block-compressed (BC7 compression) format
+ pub const CU_AD_FORMAT_BC7_UNORM: CUarray_format_enum = CUarray_format_enum(157);
+}
+impl CUarray_format_enum {
+ ///< 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding
+ pub const CU_AD_FORMAT_BC7_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum(
+ 158,
+ );
+}
+#[repr(transparent)]
+/// Array formats
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUarray_format_enum(pub ::core::ffi::c_uint);
+/// Array formats
+pub use self::CUarray_format_enum as CUarray_format;
+impl CUaddress_mode_enum {
+ ///< Wrapping address mode
+ pub const CU_TR_ADDRESS_MODE_WRAP: CUaddress_mode_enum = CUaddress_mode_enum(0);
+}
+impl CUaddress_mode_enum {
+ ///< Clamp to edge address mode
+ pub const CU_TR_ADDRESS_MODE_CLAMP: CUaddress_mode_enum = CUaddress_mode_enum(1);
+}
+impl CUaddress_mode_enum {
+ ///< Mirror address mode
+ pub const CU_TR_ADDRESS_MODE_MIRROR: CUaddress_mode_enum = CUaddress_mode_enum(2);
+}
+impl CUaddress_mode_enum {
+ ///< Border address mode
+ pub const CU_TR_ADDRESS_MODE_BORDER: CUaddress_mode_enum = CUaddress_mode_enum(3);
+}
+#[repr(transparent)]
+/// Texture reference addressing modes
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUaddress_mode_enum(pub ::core::ffi::c_uint);
+/// Texture reference addressing modes
+pub use self::CUaddress_mode_enum as CUaddress_mode;
+impl CUfilter_mode_enum {
+ ///< Point filter mode
+ pub const CU_TR_FILTER_MODE_POINT: CUfilter_mode_enum = CUfilter_mode_enum(0);
+}
+impl CUfilter_mode_enum {
+ ///< Linear filter mode
+ pub const CU_TR_FILTER_MODE_LINEAR: CUfilter_mode_enum = CUfilter_mode_enum(1);
+}
+#[repr(transparent)]
+/// Texture reference filtering modes
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUfilter_mode_enum(pub ::core::ffi::c_uint);
+/// Texture reference filtering modes
+pub use self::CUfilter_mode_enum as CUfilter_mode;
+impl CUdevice_attribute_enum {
+ ///< Maximum number of threads per block
+ pub const CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 1,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum block dimension X
+ pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 2,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum block dimension Y
+ pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 3,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum block dimension Z
+ pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 4,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum grid dimension X
+ pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 5,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum grid dimension Y
+ pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 6,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum grid dimension Z
+ pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 7,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum shared memory available per block in bytes
+ pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 8,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK
+ pub const CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 8,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Memory available on device for __constant__ variables in a CUDA C kernel in bytes
+ pub const CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 9,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Warp size in threads
+ pub const CU_DEVICE_ATTRIBUTE_WARP_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 10,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum pitch in bytes allowed by memory copies
+ pub const CU_DEVICE_ATTRIBUTE_MAX_PITCH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 11,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum number of 32-bit registers available per block
+ pub const CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 12,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK
+ pub const CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 12,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Typical clock frequency in kilohertz
+ pub const CU_DEVICE_ATTRIBUTE_CLOCK_RATE: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 13,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Alignment requirement for textures
+ pub const CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 14,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT.
+ pub const CU_DEVICE_ATTRIBUTE_GPU_OVERLAP: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 15,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Number of multiprocessors on device
+ pub const CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 16,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Specifies whether there is a run time limit on kernels
+ pub const CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 17,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device is integrated with host memory
+ pub const CU_DEVICE_ATTRIBUTE_INTEGRATED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 18,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device can map host memory into CUDA address space
+ pub const CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 19,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Compute mode (See ::CUcomputemode for details)
+ pub const CU_DEVICE_ATTRIBUTE_COMPUTE_MODE: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 20,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 1D texture width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 21,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D texture width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 22,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D texture height
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 23,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 3D texture width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 24,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 3D texture height
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 25,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 3D texture depth
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 26,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D layered texture width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 27,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D layered texture height
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 28,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum layers in a 2D layered texture
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 29,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 27,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 28,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 29,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Alignment requirement for surfaces
+ pub const CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 30,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device can possibly execute multiple kernels concurrently
+ pub const CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 31,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device has ECC support enabled
+ pub const CU_DEVICE_ATTRIBUTE_ECC_ENABLED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 32,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< PCI bus ID of the device
+ pub const CU_DEVICE_ATTRIBUTE_PCI_BUS_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 33,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< PCI device ID of the device
+ pub const CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 34,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device is using TCC driver model
+ pub const CU_DEVICE_ATTRIBUTE_TCC_DRIVER: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 35,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Peak memory clock frequency in kilohertz
+ pub const CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 36,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Global memory bus width in bits
+ pub const CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 37,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Size of L2 cache in bytes
+ pub const CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 38,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum resident threads per multiprocessor
+ pub const CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 39,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Number of asynchronous engines
+ pub const CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 40,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device shares a unified address space with the host
+ pub const CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 41,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 1D layered texture width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 42,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum layers in a 1D layered texture
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 43,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Deprecated, do not use.
+ pub const CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 44,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 45,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 46,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Alternate maximum 3D texture width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 47,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Alternate maximum 3D texture height
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 48,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Alternate maximum 3D texture depth
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 49,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< PCI domain ID of the device
+ pub const CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 50,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Pitch alignment requirement for textures
+ pub const CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 51,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum cubemap texture width/height
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 52,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum cubemap layered texture width/height
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 53,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum layers in a cubemap layered texture
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 54,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 1D surface width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 55,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D surface width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 56,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D surface height
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 57,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 3D surface width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 58,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 3D surface height
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 59,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 3D surface depth
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 60,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 1D layered surface width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 61,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum layers in a 1D layered surface
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 62,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D layered surface width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 63,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D layered surface height
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 64,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum layers in a 2D layered surface
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 65,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum cubemap surface width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 66,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum cubemap layered surface width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 67,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum layers in a cubemap layered surface
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 68,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead.
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 69,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D linear texture width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 70,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D linear texture height
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 71,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum 2D linear texture pitch in bytes
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 72,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum mipmapped 2D texture width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 73,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum mipmapped 2D texture height
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 74,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Major compute capability version number
+ pub const CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 75,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Minor compute capability version number
+ pub const CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 76,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum mipmapped 1D texture width
+ pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 77,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports stream priorities
+ pub const CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 78,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports caching globals in L1
+ pub const CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 79,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports caching locals in L1
+ pub const CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 80,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum shared memory available per multiprocessor in bytes
+ pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 81,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum number of 32-bit registers available per multiprocessor
+ pub const CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 82,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device can allocate managed memory on this system
+ pub const CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 83,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device is on a multi-GPU board
+ pub const CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 84,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Unique id for a group of devices on the same multi-GPU board
+ pub const CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 85,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware)
+ pub const CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 86,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Ratio of single precision performance (in floating-point operations per second) to double precision performance
+ pub const CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 87,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports coherently accessing pageable memory without calling cudaHostRegister on it
+ pub const CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 88,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device can coherently access managed memory concurrently with the CPU
+ pub const CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 89,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports compute preemption.
+ pub const CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 90,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device can access host registered memory at the same virtual address as the CPU
+ pub const CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 91,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Deprecated, along with v1 MemOps API, ::cuStreamBatchMemOp and related APIs are supported.
+ pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 92,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Deprecated, along with v1 MemOps API, 64-bit operations are supported in ::cuStreamBatchMemOp and related APIs.
+ pub const CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 93,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Deprecated, along with v1 MemOps API, ::CU_STREAM_WAIT_VALUE_NOR is supported.
+ pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 94,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports launching cooperative kernels via ::cuLaunchCooperativeKernel
+ pub const CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 95,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Deprecated, ::cuLaunchCooperativeKernelMultiDevice is deprecated.
+ pub const CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 96,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum optin shared memory per block
+ pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 97,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. See \ref CUDA_MEMOP for additional details.
+ pub const CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 98,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports host memory registration via ::cudaHostRegister.
+ pub const CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 99,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device accesses pageable memory via the host's page tables.
+ pub const CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 100,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< The host can directly access managed memory on the device without migration.
+ pub const CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 101,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED
+ pub const CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 102,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports virtual memory management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs
+ pub const CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 102,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
+ pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 103,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
+ pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 104,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
+ pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 105,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum number of blocks per multiprocessor
+ pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 106,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports compression of memory
+ pub const CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 107,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum L2 persisting lines capacity setting in bytes.
+ pub const CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 108,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Maximum value of CUaccessPolicyWindow::num_bytes.
+ pub const CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 109,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports specifying the GPUDirect RDMA flag with ::cuMemCreate
+ pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 110,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Shared memory reserved by CUDA driver per block in bytes
+ pub const CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 111,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays
+ pub const CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 112,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports using the ::cuMemHostRegister flag ::CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU
+ pub const CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 113,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< External timeline semaphore interop is supported on the device
+ pub const CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 114,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports using the ::cuMemAllocAsync and ::cuMemPool family of APIs
+ pub const CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 115,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information)
+ pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 116,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the ::CUflushGPUDirectRDMAWritesOptions enum
+ pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 117,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See ::CUGPUDirectRDMAWritesOrdering for the numerical values returned here.
+ pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 118,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Handle types supported with mempool based IPC
+ pub const CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 119,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Indicates device supports cluster launch
+ pub const CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 120,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays
+ pub const CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 121,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< 64-bit operations are supported in ::cuStreamBatchMemOp and related MemOp APIs.
+ pub const CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 122,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< ::CU_STREAM_WAIT_VALUE_NOR is supported by MemOp APIs.
+ pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 123,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports buffer sharing with dma_buf mechanism.
+ pub const CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 124,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports IPC Events.
+ pub const CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 125,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Number of memory domains the device supports.
+ pub const CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 126,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports accessing memory using Tensor Map.
+ pub const CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 127,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports exporting memory to a fabric handle with cuMemExportToShareableHandle() or requested with cuMemCreate()
+ pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 128,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports unified function pointers.
+ pub const CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 129,
+ );
+}
+impl CUdevice_attribute_enum {
+ pub const CU_DEVICE_ATTRIBUTE_NUMA_CONFIG: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 130,
+ );
+}
+impl CUdevice_attribute_enum {
+ pub const CU_DEVICE_ATTRIBUTE_NUMA_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 131,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Device supports switch multicast and reduction operations.
+ pub const CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 132,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< Indicates if contexts created on this device will be shared via MPS
+ pub const CU_DEVICE_ATTRIBUTE_MPS_ENABLED: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 133,
+ );
+}
+impl CUdevice_attribute_enum {
+ ///< NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA.
+ pub const CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 134,
+ );
+}
+impl CUdevice_attribute_enum {
+ pub const CU_DEVICE_ATTRIBUTE_MAX: CUdevice_attribute_enum = CUdevice_attribute_enum(
+ 135,
+ );
+}
+#[repr(transparent)]
+/// Device properties
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUdevice_attribute_enum(pub ::core::ffi::c_uint);
+/// Device properties
+pub use self::CUdevice_attribute_enum as CUdevice_attribute;
+/// Legacy device properties
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUdevprop_st {
+ ///< Maximum number of threads per block
+ pub maxThreadsPerBlock: ::core::ffi::c_int,
+ ///< Maximum size of each dimension of a block
+ pub maxThreadsDim: [::core::ffi::c_int; 3usize],
+ ///< Maximum size of each dimension of a grid
+ pub maxGridSize: [::core::ffi::c_int; 3usize],
+ ///< Shared memory available per block in bytes
+ pub sharedMemPerBlock: ::core::ffi::c_int,
+ ///< Constant memory available on device in bytes
+ pub totalConstantMemory: ::core::ffi::c_int,
+ ///< Warp size in threads
+ pub SIMDWidth: ::core::ffi::c_int,
+ ///< Maximum pitch in bytes allowed by memory copies
+ pub memPitch: ::core::ffi::c_int,
+ ///< 32-bit registers available per block
+ pub regsPerBlock: ::core::ffi::c_int,
+ ///< Clock frequency in kilohertz
+ pub clockRate: ::core::ffi::c_int,
+ ///< Alignment requirement for textures
+ pub textureAlign: ::core::ffi::c_int,
+}
+/// Legacy device properties
+pub type CUdevprop_v1 = CUdevprop_st;
+/// Legacy device properties
+pub type CUdevprop = CUdevprop_v1;
+impl CUpointer_attribute_enum {
+ ///< The ::CUcontext on which a pointer was allocated or registered
+ pub const CU_POINTER_ATTRIBUTE_CONTEXT: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 1,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< The ::CUmemorytype describing the physical location of a pointer
+ pub const CU_POINTER_ATTRIBUTE_MEMORY_TYPE: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 2,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< The address at which a pointer's memory may be accessed on the device
+ pub const CU_POINTER_ATTRIBUTE_DEVICE_POINTER: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 3,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< The address at which a pointer's memory may be accessed on the host
+ pub const CU_POINTER_ATTRIBUTE_HOST_POINTER: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 4,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< A pair of tokens for use with the nv-p2p.h Linux kernel interface
+ pub const CU_POINTER_ATTRIBUTE_P2P_TOKENS: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 5,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< Synchronize every synchronous memory operation initiated on this region
+ pub const CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 6,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< A process-wide unique ID for an allocated memory region
+ pub const CU_POINTER_ATTRIBUTE_BUFFER_ID: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 7,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< Indicates if the pointer points to managed memory
+ pub const CU_POINTER_ATTRIBUTE_IS_MANAGED: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 8,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< A device ordinal of a device on which a pointer was allocated or registered
+ pub const CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 9,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< 1 if this pointer maps to an allocation that is suitable for ::cudaIpcGetMemHandle, 0 otherwise
+ pub const CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 10,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< Starting address for this requested pointer
+ pub const CU_POINTER_ATTRIBUTE_RANGE_START_ADDR: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 11,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< Size of the address range for this requested pointer
+ pub const CU_POINTER_ATTRIBUTE_RANGE_SIZE: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 12,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< 1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise
+ pub const CU_POINTER_ATTRIBUTE_MAPPED: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 13,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< Bitmask of allowed ::CUmemAllocationHandleType for this allocation
+ pub const CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 14,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< 1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API
+ pub const CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 15,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given
+ pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAGS: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 16,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL.
+ pub const CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 17,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< Size of the actual underlying mapping that the pointer belongs to
+ pub const CU_POINTER_ATTRIBUTE_MAPPING_SIZE: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 18,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< The start address of the mapping that the pointer belongs to
+ pub const CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 19,
+ );
+}
+impl CUpointer_attribute_enum {
+ ///< A process-wide unique id corresponding to the physical allocation the pointer belongs to
+ pub const CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID: CUpointer_attribute_enum = CUpointer_attribute_enum(
+ 20,
+ );
+}
+#[repr(transparent)]
+/// Pointer information
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUpointer_attribute_enum(pub ::core::ffi::c_uint);
+/// Pointer information
+pub use self::CUpointer_attribute_enum as CUpointer_attribute;
+impl CUfunction_attribute_enum {
+ /** The maximum number of threads per block, beyond which a launch of the
+ function would fail. This number depends on both the function and the
+ device on which the function is currently loaded.*/
+ pub const CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 0,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** The size in bytes of statically-allocated shared memory required by
+ this function. This does not include dynamically-allocated shared
+ memory requested by the user at runtime.*/
+ pub const CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 1,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** The size in bytes of user-allocated constant memory required by this
+ function.*/
+ pub const CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 2,
+ );
+}
+impl CUfunction_attribute_enum {
+ /// The size in bytes of local memory used by each thread of this function.
+ pub const CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 3,
+ );
+}
+impl CUfunction_attribute_enum {
+ /// The number of registers used by each thread of this function.
+ pub const CU_FUNC_ATTRIBUTE_NUM_REGS: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 4,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** The PTX virtual architecture version for which the function was
+ compiled. This value is the major PTX version * 10 + the minor PTX
+ version, so a PTX version 1.3 function would return the value 13.
+ Note that this may return the undefined value of 0 for cubins
+ compiled prior to CUDA 3.0.*/
+ pub const CU_FUNC_ATTRIBUTE_PTX_VERSION: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 5,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** The binary architecture version for which the function was compiled.
+ This value is the major binary version * 10 + the minor binary version,
+ so a binary version 1.3 function would return the value 13. Note that
+ this will return a value of 10 for legacy cubins that do not have a
+ properly-encoded binary architecture version.*/
+ pub const CU_FUNC_ATTRIBUTE_BINARY_VERSION: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 6,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** The attribute to indicate whether the function has been compiled with
+ user specified option "-Xptxas --dlcm=ca" set .*/
+ pub const CU_FUNC_ATTRIBUTE_CACHE_MODE_CA: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 7,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** The maximum size in bytes of dynamically-allocated shared memory that can be used by
+ this function. If the user-specified dynamic shared memory size is larger than this
+ value, the launch will fail.
+ See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
+ pub const CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 8,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** On devices where the L1 cache and shared memory use the same hardware resources,
+ this sets the shared memory carveout preference, in percent of the total shared memory.
+ Refer to ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR.
+ This is only a hint, and the driver can choose a different ratio if required to execute the function.
+ See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
+ pub const CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 9,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** If this attribute is set, the kernel must launch with a valid cluster
+ size specified.
+ See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
+ pub const CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 10,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** The required cluster width in blocks. The values must either all be 0 or
+ all be positive. The validity of the cluster dimensions is otherwise
+ checked at launch time.
+
+ If the value is set during compile time, it cannot be set at runtime.
+ Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED.
+ See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
+ pub const CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 11,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** The required cluster height in blocks. The values must either all be 0 or
+ all be positive. The validity of the cluster dimensions is otherwise
+ checked at launch time.
+
+ If the value is set during compile time, it cannot be set at runtime.
+ Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED.
+ See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
+ pub const CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 12,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** The required cluster depth in blocks. The values must either all be 0 or
+ all be positive. The validity of the cluster dimensions is otherwise
+ checked at launch time.
+
+ If the value is set during compile time, it cannot be set at runtime.
+ Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED.
+ See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
+ pub const CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 13,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** Whether the function can be launched with non-portable cluster size. 1 is
+ allowed, 0 is disallowed. A non-portable cluster size may only function
+ on the specific SKUs the program is tested on. The launch might fail if
+ the program is run on a different hardware platform.
+
+ CUDA API provides cudaOccupancyMaxActiveClusters to assist with checking
+ whether the desired size can be launched on the current device.
+
+ Portable Cluster Size
+
+ A portable cluster size is guaranteed to be functional on all compute
+ capabilities higher than the target compute capability. The portable
+ cluster size for sm_90 is 8 blocks per cluster. This value may increase
+ for future compute capabilities.
+
+ The specific hardware unit may support higher cluster sizes that’s not
+ guaranteed to be portable.
+ See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
+ pub const CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 14,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** The block scheduling policy of a function. The value type is
+ CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy.
+ See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
+ pub const CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 15,
+ );
+}
+impl CUfunction_attribute_enum {
+ /** The block scheduling policy of a function. The value type is
+ CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy.
+ See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
+ pub const CU_FUNC_ATTRIBUTE_MAX: CUfunction_attribute_enum = CUfunction_attribute_enum(
+ 16,
+ );
+}
+#[repr(transparent)]
+/// Function properties
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUfunction_attribute_enum(pub ::core::ffi::c_uint);
+/// Function properties
+pub use self::CUfunction_attribute_enum as CUfunction_attribute;
+impl CUfunc_cache_enum {
+ ///< no preference for shared memory or L1 (default)
+ pub const CU_FUNC_CACHE_PREFER_NONE: CUfunc_cache_enum = CUfunc_cache_enum(0);
+}
+impl CUfunc_cache_enum {
+ ///< prefer larger shared memory and smaller L1 cache
+ pub const CU_FUNC_CACHE_PREFER_SHARED: CUfunc_cache_enum = CUfunc_cache_enum(1);
+}
+impl CUfunc_cache_enum {
+ ///< prefer larger L1 cache and smaller shared memory
+ pub const CU_FUNC_CACHE_PREFER_L1: CUfunc_cache_enum = CUfunc_cache_enum(2);
+}
+impl CUfunc_cache_enum {
+ ///< prefer equal sized L1 cache and shared memory
+ pub const CU_FUNC_CACHE_PREFER_EQUAL: CUfunc_cache_enum = CUfunc_cache_enum(3);
+}
+#[repr(transparent)]
+/// Function cache configurations
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUfunc_cache_enum(pub ::core::ffi::c_uint);
+/// Function cache configurations
+pub use self::CUfunc_cache_enum as CUfunc_cache;
+impl CUsharedconfig_enum {
+ ///< set default shared memory bank size
+ pub const CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: CUsharedconfig_enum = CUsharedconfig_enum(
+ 0,
+ );
+}
+impl CUsharedconfig_enum {
+ ///< set shared memory bank width to four bytes
+ pub const CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: CUsharedconfig_enum = CUsharedconfig_enum(
+ 1,
+ );
+}
+impl CUsharedconfig_enum {
+ ///< set shared memory bank width to eight bytes
+ pub const CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: CUsharedconfig_enum = CUsharedconfig_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/** \deprecated
+
+ Shared memory configurations*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUsharedconfig_enum(pub ::core::ffi::c_uint);
+/** \deprecated
+
+ Shared memory configurations*/
+pub use self::CUsharedconfig_enum as CUsharedconfig;
+impl CUshared_carveout_enum {
+ ///< No preference for shared memory or L1 (default)
+ pub const CU_SHAREDMEM_CARVEOUT_DEFAULT: CUshared_carveout_enum = CUshared_carveout_enum(
+ -1,
+ );
+}
+impl CUshared_carveout_enum {
+ ///< Prefer maximum available shared memory, minimum L1 cache
+ pub const CU_SHAREDMEM_CARVEOUT_MAX_SHARED: CUshared_carveout_enum = CUshared_carveout_enum(
+ 100,
+ );
+}
+impl CUshared_carveout_enum {
+ ///< Prefer maximum available L1 cache, minimum shared memory
+ pub const CU_SHAREDMEM_CARVEOUT_MAX_L1: CUshared_carveout_enum = CUshared_carveout_enum(
+ 0,
+ );
+}
+#[repr(transparent)]
+/// Shared memory carveout configurations. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUshared_carveout_enum(pub ::core::ffi::c_int);
+/// Shared memory carveout configurations. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute
+pub use self::CUshared_carveout_enum as CUshared_carveout;
+impl CUmemorytype_enum {
+ ///< Host memory
+ pub const CU_MEMORYTYPE_HOST: CUmemorytype_enum = CUmemorytype_enum(1);
+}
+impl CUmemorytype_enum {
+ ///< Device memory
+ pub const CU_MEMORYTYPE_DEVICE: CUmemorytype_enum = CUmemorytype_enum(2);
+}
+impl CUmemorytype_enum {
+ ///< Array memory
+ pub const CU_MEMORYTYPE_ARRAY: CUmemorytype_enum = CUmemorytype_enum(3);
+}
+impl CUmemorytype_enum {
+ ///< Unified device or host memory
+ pub const CU_MEMORYTYPE_UNIFIED: CUmemorytype_enum = CUmemorytype_enum(4);
+}
+#[repr(transparent)]
+/// Memory types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemorytype_enum(pub ::core::ffi::c_uint);
+/// Memory types
+pub use self::CUmemorytype_enum as CUmemorytype;
+impl CUcomputemode_enum {
+ ///< Default compute mode (Multiple contexts allowed per device)
+ pub const CU_COMPUTEMODE_DEFAULT: CUcomputemode_enum = CUcomputemode_enum(0);
+}
+impl CUcomputemode_enum {
+ ///< Compute-prohibited mode (No contexts can be created on this device at this time)
+ pub const CU_COMPUTEMODE_PROHIBITED: CUcomputemode_enum = CUcomputemode_enum(2);
+}
+impl CUcomputemode_enum {
+ ///< Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time)
+ pub const CU_COMPUTEMODE_EXCLUSIVE_PROCESS: CUcomputemode_enum = CUcomputemode_enum(
+ 3,
+ );
+}
+#[repr(transparent)]
+/// Compute Modes
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUcomputemode_enum(pub ::core::ffi::c_uint);
+/// Compute Modes
+pub use self::CUcomputemode_enum as CUcomputemode;
+impl CUmem_advise_enum {
+ ///< Data will mostly be read and only occasionally be written to
+ pub const CU_MEM_ADVISE_SET_READ_MOSTLY: CUmem_advise_enum = CUmem_advise_enum(1);
+}
+impl CUmem_advise_enum {
+ ///< Undo the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY
+ pub const CU_MEM_ADVISE_UNSET_READ_MOSTLY: CUmem_advise_enum = CUmem_advise_enum(2);
+}
+impl CUmem_advise_enum {
+ ///< Set the preferred location for the data as the specified device
+ pub const CU_MEM_ADVISE_SET_PREFERRED_LOCATION: CUmem_advise_enum = CUmem_advise_enum(
+ 3,
+ );
+}
+impl CUmem_advise_enum {
+ ///< Clear the preferred location for the data
+ pub const CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: CUmem_advise_enum = CUmem_advise_enum(
+ 4,
+ );
+}
+impl CUmem_advise_enum {
+ ///< Data will be accessed by the specified device, so prevent page faults as much as possible
+ pub const CU_MEM_ADVISE_SET_ACCESSED_BY: CUmem_advise_enum = CUmem_advise_enum(5);
+}
+impl CUmem_advise_enum {
+ ///< Let the Unified Memory subsystem decide on the page faulting policy for the specified device
+ pub const CU_MEM_ADVISE_UNSET_ACCESSED_BY: CUmem_advise_enum = CUmem_advise_enum(6);
+}
+#[repr(transparent)]
+/// Memory advise values
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmem_advise_enum(pub ::core::ffi::c_uint);
+/// Memory advise values
+pub use self::CUmem_advise_enum as CUmem_advise;
+impl CUmem_range_attribute_enum {
+ ///< Whether the range will mostly be read and only occasionally be written to
+ pub const CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
+ 1,
+ );
+}
+impl CUmem_range_attribute_enum {
+ ///< The preferred location of the range
+ pub const CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
+ 2,
+ );
+}
+impl CUmem_range_attribute_enum {
+ ///< Memory range has ::CU_MEM_ADVISE_SET_ACCESSED_BY set for specified device
+ pub const CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
+ 3,
+ );
+}
+impl CUmem_range_attribute_enum {
+ ///< The last location to which the range was prefetched
+ pub const CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
+ 4,
+ );
+}
+impl CUmem_range_attribute_enum {
+ ///< The preferred location type of the range
+ pub const CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
+ 5,
+ );
+}
+impl CUmem_range_attribute_enum {
+ ///< The preferred location id of the range
+ pub const CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
+ 6,
+ );
+}
+impl CUmem_range_attribute_enum {
+ ///< The last location type to which the range was prefetched
+ pub const CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
+ 7,
+ );
+}
+impl CUmem_range_attribute_enum {
+ ///< The last location id to which the range was prefetched
+ pub const CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
+ 8,
+ );
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmem_range_attribute_enum(pub ::core::ffi::c_uint);
+pub use self::CUmem_range_attribute_enum as CUmem_range_attribute;
+impl CUjit_option_enum {
+ /** Max number of registers that a thread may use.\n
+ Option type: unsigned int\n
+ Applies to: compiler only*/
+ pub const CU_JIT_MAX_REGISTERS: CUjit_option_enum = CUjit_option_enum(0);
+}
+impl CUjit_option_enum {
+ /** IN: Specifies minimum number of threads per block to target compilation
+ for\n
+ OUT: Returns the number of threads the compiler actually targeted.
+ This restricts the resource utilization of the compiler (e.g. max
+ registers) such that a block with the given number of threads should be
+ able to launch based on register limitations. Note, this option does not
+ currently take into account any other resource limitations, such as
+ shared memory utilization.\n
+ Cannot be combined with ::CU_JIT_TARGET.\n
+ Option type: unsigned int\n
+ Applies to: compiler only*/
+ pub const CU_JIT_THREADS_PER_BLOCK: CUjit_option_enum = CUjit_option_enum(1);
+}
+impl CUjit_option_enum {
+ /** Overwrites the option value with the total wall clock time, in
+ milliseconds, spent in the compiler and linker\n
+ Option type: float\n
+ Applies to: compiler and linker*/
+ pub const CU_JIT_WALL_TIME: CUjit_option_enum = CUjit_option_enum(2);
+}
+impl CUjit_option_enum {
+ /** Pointer to a buffer in which to print any log messages
+ that are informational in nature (the buffer size is specified via
+ option ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES)\n
+ Option type: char *\n
+ Applies to: compiler and linker*/
+ pub const CU_JIT_INFO_LOG_BUFFER: CUjit_option_enum = CUjit_option_enum(3);
+}
+impl CUjit_option_enum {
+ /** IN: Log buffer size in bytes. Log messages will be capped at this size
+ (including null terminator)\n
+ OUT: Amount of log buffer filled with messages\n
+ Option type: unsigned int\n
+ Applies to: compiler and linker*/
+ pub const CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: CUjit_option_enum = CUjit_option_enum(
+ 4,
+ );
+}
+impl CUjit_option_enum {
+ /** Pointer to a buffer in which to print any log messages that
+ reflect errors (the buffer size is specified via option
+ ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES)\n
+ Option type: char *\n
+ Applies to: compiler and linker*/
+ pub const CU_JIT_ERROR_LOG_BUFFER: CUjit_option_enum = CUjit_option_enum(5);
+}
+impl CUjit_option_enum {
+ /** IN: Log buffer size in bytes. Log messages will be capped at this size
+ (including null terminator)\n
+ OUT: Amount of log buffer filled with messages\n
+ Option type: unsigned int\n
+ Applies to: compiler and linker*/
+ pub const CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: CUjit_option_enum = CUjit_option_enum(
+ 6,
+ );
+}
+impl CUjit_option_enum {
+ /** Level of optimizations to apply to generated code (0 - 4), with 4
+ being the default and highest level of optimizations.\n
+ Option type: unsigned int\n
+ Applies to: compiler only*/
+ pub const CU_JIT_OPTIMIZATION_LEVEL: CUjit_option_enum = CUjit_option_enum(7);
+}
+impl CUjit_option_enum {
+ /** No option value required. Determines the target based on the current
+ attached context (default)\n
+ Option type: No option value needed\n
+ Applies to: compiler and linker*/
+ pub const CU_JIT_TARGET_FROM_CUCONTEXT: CUjit_option_enum = CUjit_option_enum(8);
+}
+impl CUjit_option_enum {
+ /** Target is chosen based on supplied ::CUjit_target. Cannot be
+ combined with ::CU_JIT_THREADS_PER_BLOCK.\n
+ Option type: unsigned int for enumerated type ::CUjit_target\n
+ Applies to: compiler and linker*/
+ pub const CU_JIT_TARGET: CUjit_option_enum = CUjit_option_enum(9);
+}
+impl CUjit_option_enum {
+ /** Specifies choice of fallback strategy if matching cubin is not found.
+ Choice is based on supplied ::CUjit_fallback. This option cannot be
+ used with cuLink* APIs as the linker requires exact matches.\n
+ Option type: unsigned int for enumerated type ::CUjit_fallback\n
+ Applies to: compiler only*/
+ pub const CU_JIT_FALLBACK_STRATEGY: CUjit_option_enum = CUjit_option_enum(10);
+}
+impl CUjit_option_enum {
+ /** Specifies whether to create debug information in output (-g)
+ (0: false, default)\n
+ Option type: int\n
+ Applies to: compiler and linker*/
+ pub const CU_JIT_GENERATE_DEBUG_INFO: CUjit_option_enum = CUjit_option_enum(11);
+}
+impl CUjit_option_enum {
+ /** Generate verbose log messages (0: false, default)\n
+ Option type: int\n
+ Applies to: compiler and linker*/
+ pub const CU_JIT_LOG_VERBOSE: CUjit_option_enum = CUjit_option_enum(12);
+}
+impl CUjit_option_enum {
+ /** Generate line number information (-lineinfo) (0: false, default)\n
+ Option type: int\n
+ Applies to: compiler only*/
+ pub const CU_JIT_GENERATE_LINE_INFO: CUjit_option_enum = CUjit_option_enum(13);
+}
+impl CUjit_option_enum {
+ /** Specifies whether to enable caching explicitly (-dlcm) \n
+ Choice is based on supplied ::CUjit_cacheMode_enum.\n
+ Option type: unsigned int for enumerated type ::CUjit_cacheMode_enum\n
+ Applies to: compiler only*/
+ pub const CU_JIT_CACHE_MODE: CUjit_option_enum = CUjit_option_enum(14);
+}
+impl CUjit_option_enum {
+ /** \deprecated
+ This jit option is deprecated and should not be used.*/
+ pub const CU_JIT_NEW_SM3X_OPT: CUjit_option_enum = CUjit_option_enum(15);
+}
+impl CUjit_option_enum {
+ /// This jit option is used for internal purpose only.
+ pub const CU_JIT_FAST_COMPILE: CUjit_option_enum = CUjit_option_enum(16);
+}
+impl CUjit_option_enum {
+ /** Array of device symbol names that will be relocated to the corresponding
+ host addresses stored in ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES.\n
+ Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n
+ When loading a device module, driver will relocate all encountered
+ unresolved symbols to the host addresses.\n
+ It is only allowed to register symbols that correspond to unresolved
+ global variables.\n
+ It is illegal to register the same device symbol at multiple addresses.\n
+ Option type: const char **\n
+ Applies to: dynamic linker only*/
+ pub const CU_JIT_GLOBAL_SYMBOL_NAMES: CUjit_option_enum = CUjit_option_enum(17);
+}
+impl CUjit_option_enum {
+ /** Array of host addresses that will be used to relocate corresponding
+ device symbols stored in ::CU_JIT_GLOBAL_SYMBOL_NAMES.\n
+ Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n
+ Option type: void **\n
+ Applies to: dynamic linker only*/
+ pub const CU_JIT_GLOBAL_SYMBOL_ADDRESSES: CUjit_option_enum = CUjit_option_enum(18);
+}
+impl CUjit_option_enum {
+ /** Number of entries in ::CU_JIT_GLOBAL_SYMBOL_NAMES and
+ ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES arrays.\n
+ Option type: unsigned int\n
+ Applies to: dynamic linker only*/
+ pub const CU_JIT_GLOBAL_SYMBOL_COUNT: CUjit_option_enum = CUjit_option_enum(19);
+}
+impl CUjit_option_enum {
+ /** \deprecated
+ Enable link-time optimization (-dlto) for device code (Disabled by default).\n
+ This option is not supported on 32-bit platforms.\n
+ Option type: int\n
+ Applies to: compiler and linker
+
+ Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
+ pub const CU_JIT_LTO: CUjit_option_enum = CUjit_option_enum(20);
+}
+impl CUjit_option_enum {
+ /** \deprecated
+ Control single-precision denormals (-ftz) support (0: false, default).
+ 1 : flushes denormal values to zero
+ 0 : preserves denormal values
+ Option type: int\n
+ Applies to: link-time optimization specified with CU_JIT_LTO
+
+ Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
+ pub const CU_JIT_FTZ: CUjit_option_enum = CUjit_option_enum(21);
+}
+impl CUjit_option_enum {
+ /** \deprecated
+ Control single-precision floating-point division and reciprocals
+ (-prec-div) support (1: true, default).
+ 1 : Enables the IEEE round-to-nearest mode
+ 0 : Enables the fast approximation mode
+ Option type: int\n
+ Applies to: link-time optimization specified with CU_JIT_LTO
+
+ Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
+ pub const CU_JIT_PREC_DIV: CUjit_option_enum = CUjit_option_enum(22);
+}
+impl CUjit_option_enum {
+ /** \deprecated
+ Control single-precision floating-point square root
+ (-prec-sqrt) support (1: true, default).
+ 1 : Enables the IEEE round-to-nearest mode
+ 0 : Enables the fast approximation mode
+ Option type: int\n
+ Applies to: link-time optimization specified with CU_JIT_LTO
+
+ Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
+ pub const CU_JIT_PREC_SQRT: CUjit_option_enum = CUjit_option_enum(23);
+}
+impl CUjit_option_enum {
+ /** \deprecated
+ Enable/Disable the contraction of floating-point multiplies
+ and adds/subtracts into floating-point multiply-add (-fma)
+ operations (1: Enable, default; 0: Disable).
+ Option type: int\n
+ Applies to: link-time optimization specified with CU_JIT_LTO
+
+ Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
+ pub const CU_JIT_FMA: CUjit_option_enum = CUjit_option_enum(24);
+}
+impl CUjit_option_enum {
+ /** \deprecated
+ Array of kernel names that should be preserved at link time while others
+ can be removed.\n
+ Must contain ::CU_JIT_REFERENCED_KERNEL_COUNT entries.\n
+ Note that kernel names can be mangled by the compiler in which case the
+ mangled name needs to be specified.\n
+ Wildcard "*" can be used to represent zero or more characters instead of
+ specifying the full or mangled name.\n
+ It is important to note that the wildcard "*" is also added implicitly.
+ For example, specifying "foo" will match "foobaz", "barfoo", "barfoobaz" and
+ thus preserve all kernels with those names. This can be avoided by providing
+ a more specific name like "barfoobaz".\n
+ Option type: const char **\n
+ Applies to: dynamic linker only
+
+ Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
+ pub const CU_JIT_REFERENCED_KERNEL_NAMES: CUjit_option_enum = CUjit_option_enum(25);
+}
+impl CUjit_option_enum {
+ /** \deprecated
+ Number of entries in ::CU_JIT_REFERENCED_KERNEL_NAMES array.\n
+ Option type: unsigned int\n
+ Applies to: dynamic linker only
+
+ Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
+ pub const CU_JIT_REFERENCED_KERNEL_COUNT: CUjit_option_enum = CUjit_option_enum(26);
+}
+impl CUjit_option_enum {
+ /** \deprecated
+ Array of variable names (__device__ and/or __constant__) that should be
+ preserved at link time while others can be removed.\n
+ Must contain ::CU_JIT_REFERENCED_VARIABLE_COUNT entries.\n
+ Note that variable names can be mangled by the compiler in which case the
+ mangled name needs to be specified.\n
+ Wildcard "*" can be used to represent zero or more characters instead of
+ specifying the full or mangled name.\n
+ It is important to note that the wildcard "*" is also added implicitly.
+ For example, specifying "foo" will match "foobaz", "barfoo", "barfoobaz" and
+ thus preserve all variables with those names. This can be avoided by providing
+ a more specific name like "barfoobaz".\n
+ Option type: const char **\n
+ Applies to: link-time optimization specified with CU_JIT_LTO
+
+ Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
+ pub const CU_JIT_REFERENCED_VARIABLE_NAMES: CUjit_option_enum = CUjit_option_enum(
+ 27,
+ );
+}
+impl CUjit_option_enum {
+ /** \deprecated
+ Number of entries in ::CU_JIT_REFERENCED_VARIABLE_NAMES array.\n
+ Option type: unsigned int\n
+ Applies to: link-time optimization specified with CU_JIT_LTO
+
+ Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
+ pub const CU_JIT_REFERENCED_VARIABLE_COUNT: CUjit_option_enum = CUjit_option_enum(
+ 28,
+ );
+}
+impl CUjit_option_enum {
+ /** \deprecated
+ This option serves as a hint to enable the JIT compiler/linker
+ to remove constant (__constant__) and device (__device__) variables
+ unreferenced in device code (Disabled by default).\n
+ Note that host references to constant and device variables using APIs like
+ ::cuModuleGetGlobal() with this option specified may result in undefined behavior unless
+ the variables are explicitly specified using ::CU_JIT_REFERENCED_VARIABLE_NAMES.\n
+ Option type: int\n
+ Applies to: link-time optimization specified with CU_JIT_LTO
+
+ Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
+ pub const CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES: CUjit_option_enum = CUjit_option_enum(
+ 29,
+ );
+}
+impl CUjit_option_enum {
+ /** Generate position independent code (0: false)\n
+ Option type: int\n
+ Applies to: compiler only*/
+ pub const CU_JIT_POSITION_INDEPENDENT_CODE: CUjit_option_enum = CUjit_option_enum(
+ 30,
+ );
+}
+impl CUjit_option_enum {
+ /** This option hints to the JIT compiler the minimum number of CTAs from the
+ kernel’s grid to be mapped to a SM. This option is ignored when used together
+ with ::CU_JIT_MAX_REGISTERS or ::CU_JIT_THREADS_PER_BLOCK.
+ Optimizations based on this option need ::CU_JIT_MAX_THREADS_PER_BLOCK to
+ be specified as well. For kernels already using PTX directive .minnctapersm,
+ this option will be ignored by default. Use ::CU_JIT_OVERRIDE_DIRECTIVE_VALUES
+ to let this option take precedence over the PTX directive.
+ Option type: unsigned int\n
+ Applies to: compiler only*/
+ pub const CU_JIT_MIN_CTA_PER_SM: CUjit_option_enum = CUjit_option_enum(31);
+}
+impl CUjit_option_enum {
+ /** Maximum number threads in a thread block, computed as the product of
+ the maximum extent specifed for each dimension of the block. This limit
+ is guaranteed not to be exeeded in any invocation of the kernel. Exceeding
+ the the maximum number of threads results in runtime error or kernel launch
+ failure. For kernels already using PTX directive .maxntid, this option will
+ be ignored by default. Use ::CU_JIT_OVERRIDE_DIRECTIVE_VALUES to let this
+ option take precedence over the PTX directive.
+ Option type: int\n
+ Applies to: compiler only*/
+ pub const CU_JIT_MAX_THREADS_PER_BLOCK: CUjit_option_enum = CUjit_option_enum(32);
+}
+impl CUjit_option_enum {
+ /** This option lets the values specified using ::CU_JIT_MAX_REGISTERS,
+ ::CU_JIT_THREADS_PER_BLOCK, ::CU_JIT_MAX_THREADS_PER_BLOCK and
+ ::CU_JIT_MIN_CTA_PER_SM take precedence over any PTX directives.
+ (0: Disable, default; 1: Enable)
+ Option type: int\n
+ Applies to: compiler only*/
+ pub const CU_JIT_OVERRIDE_DIRECTIVE_VALUES: CUjit_option_enum = CUjit_option_enum(
+ 33,
+ );
+}
+impl CUjit_option_enum {
+ /** This option lets the values specified using ::CU_JIT_MAX_REGISTERS,
+ ::CU_JIT_THREADS_PER_BLOCK, ::CU_JIT_MAX_THREADS_PER_BLOCK and
+ ::CU_JIT_MIN_CTA_PER_SM take precedence over any PTX directives.
+ (0: Disable, default; 1: Enable)
+ Option type: int\n
+ Applies to: compiler only*/
+ pub const CU_JIT_NUM_OPTIONS: CUjit_option_enum = CUjit_option_enum(34);
+}
+#[repr(transparent)]
+/// Online compiler and linker options
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUjit_option_enum(pub ::core::ffi::c_uint);
+/// Online compiler and linker options
+pub use self::CUjit_option_enum as CUjit_option;
+impl CUjit_target_enum {
+ ///< Compute device class 3.0
+ pub const CU_TARGET_COMPUTE_30: CUjit_target_enum = CUjit_target_enum(30);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 3.2
+ pub const CU_TARGET_COMPUTE_32: CUjit_target_enum = CUjit_target_enum(32);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 3.5
+ pub const CU_TARGET_COMPUTE_35: CUjit_target_enum = CUjit_target_enum(35);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 3.7
+ pub const CU_TARGET_COMPUTE_37: CUjit_target_enum = CUjit_target_enum(37);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 5.0
+ pub const CU_TARGET_COMPUTE_50: CUjit_target_enum = CUjit_target_enum(50);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 5.2
+ pub const CU_TARGET_COMPUTE_52: CUjit_target_enum = CUjit_target_enum(52);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 5.3
+ pub const CU_TARGET_COMPUTE_53: CUjit_target_enum = CUjit_target_enum(53);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 6.0.
+ pub const CU_TARGET_COMPUTE_60: CUjit_target_enum = CUjit_target_enum(60);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 6.1.
+ pub const CU_TARGET_COMPUTE_61: CUjit_target_enum = CUjit_target_enum(61);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 6.2.
+ pub const CU_TARGET_COMPUTE_62: CUjit_target_enum = CUjit_target_enum(62);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 7.0.
+ pub const CU_TARGET_COMPUTE_70: CUjit_target_enum = CUjit_target_enum(70);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 7.2.
+ pub const CU_TARGET_COMPUTE_72: CUjit_target_enum = CUjit_target_enum(72);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 7.5.
+ pub const CU_TARGET_COMPUTE_75: CUjit_target_enum = CUjit_target_enum(75);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 8.0.
+ pub const CU_TARGET_COMPUTE_80: CUjit_target_enum = CUjit_target_enum(80);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 8.6.
+ pub const CU_TARGET_COMPUTE_86: CUjit_target_enum = CUjit_target_enum(86);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 8.7.
+ pub const CU_TARGET_COMPUTE_87: CUjit_target_enum = CUjit_target_enum(87);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 8.9.
+ pub const CU_TARGET_COMPUTE_89: CUjit_target_enum = CUjit_target_enum(89);
+}
+impl CUjit_target_enum {
+ ///< Compute device class 9.0.
+ pub const CU_TARGET_COMPUTE_90: CUjit_target_enum = CUjit_target_enum(90);
+}
+impl CUjit_target_enum {
+ pub const CU_TARGET_COMPUTE_90A: CUjit_target_enum = CUjit_target_enum(65626);
+}
+#[repr(transparent)]
+/// Online compilation targets
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUjit_target_enum(pub ::core::ffi::c_uint);
+/// Online compilation targets
+pub use self::CUjit_target_enum as CUjit_target;
+impl CUjit_fallback_enum {
+ ///< Prefer to compile ptx if exact binary match not found
+ pub const CU_PREFER_PTX: CUjit_fallback_enum = CUjit_fallback_enum(0);
+}
+impl CUjit_fallback_enum {
+ ///< Prefer to fall back to compatible binary code if exact match not found
+ pub const CU_PREFER_BINARY: CUjit_fallback_enum = CUjit_fallback_enum(1);
+}
+#[repr(transparent)]
+/// Cubin matching fallback strategies
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUjit_fallback_enum(pub ::core::ffi::c_uint);
+/// Cubin matching fallback strategies
+pub use self::CUjit_fallback_enum as CUjit_fallback;
+impl CUjit_cacheMode_enum {
+ ///< Compile with no -dlcm flag specified
+ pub const CU_JIT_CACHE_OPTION_NONE: CUjit_cacheMode_enum = CUjit_cacheMode_enum(0);
+}
+impl CUjit_cacheMode_enum {
+ ///< Compile with L1 cache disabled
+ pub const CU_JIT_CACHE_OPTION_CG: CUjit_cacheMode_enum = CUjit_cacheMode_enum(1);
+}
+impl CUjit_cacheMode_enum {
+ ///< Compile with L1 cache enabled
+ pub const CU_JIT_CACHE_OPTION_CA: CUjit_cacheMode_enum = CUjit_cacheMode_enum(2);
+}
+#[repr(transparent)]
+/// Caching modes for dlcm
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUjit_cacheMode_enum(pub ::core::ffi::c_uint);
+/// Caching modes for dlcm
+pub use self::CUjit_cacheMode_enum as CUjit_cacheMode;
+impl CUjitInputType_enum {
+ /** Compiled device-class-specific device code\n
+ Applicable options: none*/
+ pub const CU_JIT_INPUT_CUBIN: CUjitInputType_enum = CUjitInputType_enum(0);
+}
+impl CUjitInputType_enum {
+ /** PTX source code\n
+ Applicable options: PTX compiler options*/
+ pub const CU_JIT_INPUT_PTX: CUjitInputType_enum = CUjitInputType_enum(1);
+}
+impl CUjitInputType_enum {
+ /** Bundle of multiple cubins and/or PTX of some device code\n
+ Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY*/
+ pub const CU_JIT_INPUT_FATBINARY: CUjitInputType_enum = CUjitInputType_enum(2);
+}
+impl CUjitInputType_enum {
+ /** Host object with embedded device code\n
+ Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY*/
+ pub const CU_JIT_INPUT_OBJECT: CUjitInputType_enum = CUjitInputType_enum(3);
+}
+impl CUjitInputType_enum {
+ /** Archive of host objects with embedded device code\n
+ Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY*/
+ pub const CU_JIT_INPUT_LIBRARY: CUjitInputType_enum = CUjitInputType_enum(4);
+}
+impl CUjitInputType_enum {
+ /** \deprecated
+ High-level intermediate code for link-time optimization\n
+ Applicable options: NVVM compiler options, PTX compiler options
+
+ Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
+ pub const CU_JIT_INPUT_NVVM: CUjitInputType_enum = CUjitInputType_enum(5);
+}
+impl CUjitInputType_enum {
+ /** \deprecated
+ High-level intermediate code for link-time optimization\n
+ Applicable options: NVVM compiler options, PTX compiler options
+
+ Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
+ pub const CU_JIT_NUM_INPUT_TYPES: CUjitInputType_enum = CUjitInputType_enum(6);
+}
+#[repr(transparent)]
+/// Device code formats
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUjitInputType_enum(pub ::core::ffi::c_uint);
+/// Device code formats
+pub use self::CUjitInputType_enum as CUjitInputType;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUlinkState_st {
+ _unused: [u8; 0],
+}
+pub type CUlinkState = *mut CUlinkState_st;
+impl CUgraphicsRegisterFlags_enum {
+ pub const CU_GRAPHICS_REGISTER_FLAGS_NONE: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum(
+ 0,
+ );
+}
+impl CUgraphicsRegisterFlags_enum {
+ pub const CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum(
+ 1,
+ );
+}
+impl CUgraphicsRegisterFlags_enum {
+ pub const CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum(
+ 2,
+ );
+}
+impl CUgraphicsRegisterFlags_enum {
+ pub const CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum(
+ 4,
+ );
+}
+impl CUgraphicsRegisterFlags_enum {
+ pub const CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum(
+ 8,
+ );
+}
+#[repr(transparent)]
+/// Flags to register a graphics resource
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgraphicsRegisterFlags_enum(pub ::core::ffi::c_uint);
+/// Flags to register a graphics resource
+pub use self::CUgraphicsRegisterFlags_enum as CUgraphicsRegisterFlags;
+impl CUgraphicsMapResourceFlags_enum {
+ pub const CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: CUgraphicsMapResourceFlags_enum = CUgraphicsMapResourceFlags_enum(
+ 0,
+ );
+}
+impl CUgraphicsMapResourceFlags_enum {
+ pub const CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: CUgraphicsMapResourceFlags_enum = CUgraphicsMapResourceFlags_enum(
+ 1,
+ );
+}
+impl CUgraphicsMapResourceFlags_enum {
+ pub const CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: CUgraphicsMapResourceFlags_enum = CUgraphicsMapResourceFlags_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Flags for mapping and unmapping interop resources
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgraphicsMapResourceFlags_enum(pub ::core::ffi::c_uint);
+/// Flags for mapping and unmapping interop resources
+pub use self::CUgraphicsMapResourceFlags_enum as CUgraphicsMapResourceFlags;
+impl CUarray_cubemap_face_enum {
+ ///< Positive X face of cubemap
+ pub const CU_CUBEMAP_FACE_POSITIVE_X: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum(
+ 0,
+ );
+}
+impl CUarray_cubemap_face_enum {
+ ///< Negative X face of cubemap
+ pub const CU_CUBEMAP_FACE_NEGATIVE_X: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum(
+ 1,
+ );
+}
+impl CUarray_cubemap_face_enum {
+ ///< Positive Y face of cubemap
+ pub const CU_CUBEMAP_FACE_POSITIVE_Y: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum(
+ 2,
+ );
+}
+impl CUarray_cubemap_face_enum {
+ ///< Negative Y face of cubemap
+ pub const CU_CUBEMAP_FACE_NEGATIVE_Y: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum(
+ 3,
+ );
+}
+impl CUarray_cubemap_face_enum {
+ ///< Positive Z face of cubemap
+ pub const CU_CUBEMAP_FACE_POSITIVE_Z: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum(
+ 4,
+ );
+}
+impl CUarray_cubemap_face_enum {
+ ///< Negative Z face of cubemap
+ pub const CU_CUBEMAP_FACE_NEGATIVE_Z: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum(
+ 5,
+ );
+}
+#[repr(transparent)]
+/// Array indices for cube faces
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUarray_cubemap_face_enum(pub ::core::ffi::c_uint);
+/// Array indices for cube faces
+pub use self::CUarray_cubemap_face_enum as CUarray_cubemap_face;
+impl CUlimit_enum {
+ ///< GPU thread stack size
+ pub const CU_LIMIT_STACK_SIZE: CUlimit_enum = CUlimit_enum(0);
+}
+impl CUlimit_enum {
+ ///< GPU printf FIFO size
+ pub const CU_LIMIT_PRINTF_FIFO_SIZE: CUlimit_enum = CUlimit_enum(1);
+}
+impl CUlimit_enum {
+ ///< GPU malloc heap size
+ pub const CU_LIMIT_MALLOC_HEAP_SIZE: CUlimit_enum = CUlimit_enum(2);
+}
+impl CUlimit_enum {
+ ///< GPU device runtime launch synchronize depth
+ pub const CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH: CUlimit_enum = CUlimit_enum(3);
+}
+impl CUlimit_enum {
+ ///< GPU device runtime pending launch count
+ pub const CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT: CUlimit_enum = CUlimit_enum(4);
+}
+impl CUlimit_enum {
+ ///< A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint
+ pub const CU_LIMIT_MAX_L2_FETCH_GRANULARITY: CUlimit_enum = CUlimit_enum(5);
+}
+impl CUlimit_enum {
+ ///< A size in bytes for L2 persisting lines cache size
+ pub const CU_LIMIT_PERSISTING_L2_CACHE_SIZE: CUlimit_enum = CUlimit_enum(6);
+}
+impl CUlimit_enum {
+ pub const CU_LIMIT_MAX: CUlimit_enum = CUlimit_enum(7);
+}
+#[repr(transparent)]
+/// Limits
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUlimit_enum(pub ::core::ffi::c_uint);
+/// Limits
+pub use self::CUlimit_enum as CUlimit;
+impl CUresourcetype_enum {
+ ///< Array resource
+ pub const CU_RESOURCE_TYPE_ARRAY: CUresourcetype_enum = CUresourcetype_enum(0);
+}
+impl CUresourcetype_enum {
+ ///< Mipmapped array resource
+ pub const CU_RESOURCE_TYPE_MIPMAPPED_ARRAY: CUresourcetype_enum = CUresourcetype_enum(
+ 1,
+ );
+}
+impl CUresourcetype_enum {
+ ///< Linear resource
+ pub const CU_RESOURCE_TYPE_LINEAR: CUresourcetype_enum = CUresourcetype_enum(2);
+}
+impl CUresourcetype_enum {
+ ///< Pitch 2D resource
+ pub const CU_RESOURCE_TYPE_PITCH2D: CUresourcetype_enum = CUresourcetype_enum(3);
+}
+#[repr(transparent)]
+/// Resource types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUresourcetype_enum(pub ::core::ffi::c_uint);
+/// Resource types
+pub use self::CUresourcetype_enum as CUresourcetype;
+/** CUDA host function
+ \param userData Argument value passed to the function*/
+pub type CUhostFn = ::core::option::Option<
+ unsafe extern "system" fn(userData: *mut ::core::ffi::c_void),
+>;
+impl CUaccessProperty_enum {
+ ///< Normal cache persistence.
+ pub const CU_ACCESS_PROPERTY_NORMAL: CUaccessProperty_enum = CUaccessProperty_enum(
+ 0,
+ );
+}
+impl CUaccessProperty_enum {
+ ///< Streaming access is less likely to persit from cache.
+ pub const CU_ACCESS_PROPERTY_STREAMING: CUaccessProperty_enum = CUaccessProperty_enum(
+ 1,
+ );
+}
+impl CUaccessProperty_enum {
+ ///< Persisting access is more likely to persist in cache.
+ pub const CU_ACCESS_PROPERTY_PERSISTING: CUaccessProperty_enum = CUaccessProperty_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Specifies performance hint with ::CUaccessPolicyWindow for hitProp and missProp members.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUaccessProperty_enum(pub ::core::ffi::c_uint);
+/// Specifies performance hint with ::CUaccessPolicyWindow for hitProp and missProp members.
+pub use self::CUaccessProperty_enum as CUaccessProperty;
+/** Specifies an access policy for a window, a contiguous extent of memory
+ beginning at base_ptr and ending at base_ptr + num_bytes.
+ num_bytes is limited by CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE.
+ Partition into many segments and assign segments such that:
+ sum of "hit segments" / window == approx. ratio.
+ sum of "miss segments" / window == approx 1-ratio.
+ Segments and ratio specifications are fitted to the capabilities of
+ the architecture.
+ Accesses in a hit segment apply the hitProp access policy.
+ Accesses in a miss segment apply the missProp access policy.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub struct CUaccessPolicyWindow_st {
+ ///< Starting address of the access policy window. CUDA driver may align it.
+ pub base_ptr: *mut ::core::ffi::c_void,
+ ///< Size in bytes of the window policy. CUDA driver may restrict the maximum size and alignment.
+ pub num_bytes: usize,
+ ///< hitRatio specifies percentage of lines assigned hitProp, rest are assigned missProp.
+ pub hitRatio: f32,
+ ///< ::CUaccessProperty set for hit.
+ pub hitProp: CUaccessProperty,
+ ///< ::CUaccessProperty set for miss. Must be either NORMAL or STREAMING
+ pub missProp: CUaccessProperty,
+}
+/** Specifies an access policy for a window, a contiguous extent of memory
+ beginning at base_ptr and ending at base_ptr + num_bytes.
+ num_bytes is limited by CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE.
+ Partition into many segments and assign segments such that:
+ sum of "hit segments" / window == approx. ratio.
+ sum of "miss segments" / window == approx 1-ratio.
+ Segments and ratio specifications are fitted to the capabilities of
+ the architecture.
+ Accesses in a hit segment apply the hitProp access policy.
+ Accesses in a miss segment apply the missProp access policy.*/
+pub type CUaccessPolicyWindow_v1 = CUaccessPolicyWindow_st;
+/// Access policy window
+pub type CUaccessPolicyWindow = CUaccessPolicyWindow_v1;
+/// GPU kernel node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_KERNEL_NODE_PARAMS_st {
+ ///< Kernel to launch
+ pub func: CUfunction,
+ ///< Width of grid in blocks
+ pub gridDimX: ::core::ffi::c_uint,
+ ///< Height of grid in blocks
+ pub gridDimY: ::core::ffi::c_uint,
+ ///< Depth of grid in blocks
+ pub gridDimZ: ::core::ffi::c_uint,
+ ///< X dimension of each thread block
+ pub blockDimX: ::core::ffi::c_uint,
+ ///< Y dimension of each thread block
+ pub blockDimY: ::core::ffi::c_uint,
+ ///< Z dimension of each thread block
+ pub blockDimZ: ::core::ffi::c_uint,
+ ///< Dynamic shared-memory size per thread block in bytes
+ pub sharedMemBytes: ::core::ffi::c_uint,
+ ///< Array of pointers to kernel parameters
+ pub kernelParams: *mut *mut ::core::ffi::c_void,
+ ///< Extra options
+ pub extra: *mut *mut ::core::ffi::c_void,
+}
+/// GPU kernel node parameters
+pub type CUDA_KERNEL_NODE_PARAMS_v1 = CUDA_KERNEL_NODE_PARAMS_st;
+/// GPU kernel node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_KERNEL_NODE_PARAMS_v2_st {
+ ///< Kernel to launch
+ pub func: CUfunction,
+ ///< Width of grid in blocks
+ pub gridDimX: ::core::ffi::c_uint,
+ ///< Height of grid in blocks
+ pub gridDimY: ::core::ffi::c_uint,
+ ///< Depth of grid in blocks
+ pub gridDimZ: ::core::ffi::c_uint,
+ ///< X dimension of each thread block
+ pub blockDimX: ::core::ffi::c_uint,
+ ///< Y dimension of each thread block
+ pub blockDimY: ::core::ffi::c_uint,
+ ///< Z dimension of each thread block
+ pub blockDimZ: ::core::ffi::c_uint,
+ ///< Dynamic shared-memory size per thread block in bytes
+ pub sharedMemBytes: ::core::ffi::c_uint,
+ ///< Array of pointers to kernel parameters
+ pub kernelParams: *mut *mut ::core::ffi::c_void,
+ ///< Extra options
+ pub extra: *mut *mut ::core::ffi::c_void,
+ ///< Kernel to launch, will only be referenced if func is NULL
+ pub kern: CUkernel,
+ ///< Context for the kernel task to run in. The value NULL will indicate the current context should be used by the api. This field is ignored if func is set.
+ pub ctx: CUcontext,
+}
+/// GPU kernel node parameters
+pub type CUDA_KERNEL_NODE_PARAMS_v2 = CUDA_KERNEL_NODE_PARAMS_v2_st;
+/// GPU kernel node parameters
+pub type CUDA_KERNEL_NODE_PARAMS = CUDA_KERNEL_NODE_PARAMS_v2;
+/// GPU kernel node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_KERNEL_NODE_PARAMS_v3_st {
+ ///< Kernel to launch
+ pub func: CUfunction,
+ ///< Width of grid in blocks
+ pub gridDimX: ::core::ffi::c_uint,
+ ///< Height of grid in blocks
+ pub gridDimY: ::core::ffi::c_uint,
+ ///< Depth of grid in blocks
+ pub gridDimZ: ::core::ffi::c_uint,
+ ///< X dimension of each thread block
+ pub blockDimX: ::core::ffi::c_uint,
+ ///< Y dimension of each thread block
+ pub blockDimY: ::core::ffi::c_uint,
+ ///< Z dimension of each thread block
+ pub blockDimZ: ::core::ffi::c_uint,
+ ///< Dynamic shared-memory size per thread block in bytes
+ pub sharedMemBytes: ::core::ffi::c_uint,
+ ///< Array of pointers to kernel parameters
+ pub kernelParams: *mut *mut ::core::ffi::c_void,
+ ///< Extra options
+ pub extra: *mut *mut ::core::ffi::c_void,
+ ///< Kernel to launch, will only be referenced if func is NULL
+ pub kern: CUkernel,
+ ///< Context for the kernel task to run in. The value NULL will indicate the current context should be used by the api. This field is ignored if func is set.
+ pub ctx: CUcontext,
+}
+/// GPU kernel node parameters
+pub type CUDA_KERNEL_NODE_PARAMS_v3 = CUDA_KERNEL_NODE_PARAMS_v3_st;
+/// Memset node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_MEMSET_NODE_PARAMS_st {
+ ///< Destination device pointer
+ pub dst: CUdeviceptr,
+ ///< Pitch of destination device pointer. Unused if height is 1
+ pub pitch: usize,
+ ///< Value to be set
+ pub value: ::core::ffi::c_uint,
+ ///< Size of each element in bytes. Must be 1, 2, or 4.
+ pub elementSize: ::core::ffi::c_uint,
+ ///< Width of the row in elements
+ pub width: usize,
+ ///< Number of rows
+ pub height: usize,
+}
+/// Memset node parameters
+pub type CUDA_MEMSET_NODE_PARAMS_v1 = CUDA_MEMSET_NODE_PARAMS_st;
+/// Memset node parameters
+pub type CUDA_MEMSET_NODE_PARAMS = CUDA_MEMSET_NODE_PARAMS_v1;
+/// Memset node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_MEMSET_NODE_PARAMS_v2_st {
+ ///< Destination device pointer
+ pub dst: CUdeviceptr,
+ ///< Pitch of destination device pointer. Unused if height is 1
+ pub pitch: usize,
+ ///< Value to be set
+ pub value: ::core::ffi::c_uint,
+ ///< Size of each element in bytes. Must be 1, 2, or 4.
+ pub elementSize: ::core::ffi::c_uint,
+ ///< Width of the row in elements
+ pub width: usize,
+ ///< Number of rows
+ pub height: usize,
+ ///< Context on which to run the node
+ pub ctx: CUcontext,
+}
+/// Memset node parameters
+pub type CUDA_MEMSET_NODE_PARAMS_v2 = CUDA_MEMSET_NODE_PARAMS_v2_st;
+/// Host node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash)]
+pub struct CUDA_HOST_NODE_PARAMS_st {
+ ///< The function to call when the node executes
+ pub fn_: CUhostFn,
+ ///< Argument to pass to the function
+ pub userData: *mut ::core::ffi::c_void,
+}
+/// Host node parameters
+pub type CUDA_HOST_NODE_PARAMS_v1 = CUDA_HOST_NODE_PARAMS_st;
+/// Host node parameters
+pub type CUDA_HOST_NODE_PARAMS = CUDA_HOST_NODE_PARAMS_v1;
+/// Host node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_HOST_NODE_PARAMS_v2_st {
+ ///< The function to call when the node executes
+ pub fn_: CUhostFn,
+ ///< Argument to pass to the function
+ pub userData: *mut ::core::ffi::c_void,
+}
+/// Host node parameters
+pub type CUDA_HOST_NODE_PARAMS_v2 = CUDA_HOST_NODE_PARAMS_v2_st;
+impl CUgraphConditionalNodeType_enum {
+ ///< Conditional 'if' Node. Body executed once if condition value is non-zero.
+ pub const CU_GRAPH_COND_TYPE_IF: CUgraphConditionalNodeType_enum = CUgraphConditionalNodeType_enum(
+ 0,
+ );
+}
+impl CUgraphConditionalNodeType_enum {
+ ///< Conditional 'while' Node. Body executed repeatedly while condition value is non-zero.
+ pub const CU_GRAPH_COND_TYPE_WHILE: CUgraphConditionalNodeType_enum = CUgraphConditionalNodeType_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Conditional node types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgraphConditionalNodeType_enum(pub ::core::ffi::c_uint);
+/// Conditional node types
+pub use self::CUgraphConditionalNodeType_enum as CUgraphConditionalNodeType;
+/// Conditional node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_CONDITIONAL_NODE_PARAMS {
+ /**< Conditional node handle.
+Handles must be created in advance of creating the node
+using ::cuGraphConditionalHandleCreate.*/
+ pub handle: CUgraphConditionalHandle,
+ ///< Type of conditional node.
+ pub type_: CUgraphConditionalNodeType,
+ ///< Size of graph output array. Must be 1.
+ pub size: ::core::ffi::c_uint,
+ /**< CUDA-owned array populated with conditional node child graphs during creation of the node.
+Valid for the lifetime of the conditional node.
+The contents of the graph(s) are subject to the following constraints:
+
+- Allowed node types are kernel nodes, empty nodes, child graphs, memsets,
+memcopies, and conditionals. This applies recursively to child graphs and conditional bodies.
+- All kernels, including kernels in nested conditionals or child graphs at any level,
+must belong to the same CUDA context.
+
+These graphs may be populated using graph node creation APIs or ::cuStreamBeginCaptureToGraph.*/
+ pub phGraph_out: *mut CUgraph,
+ ///< Context on which to run the node. Must match context used to create the handle and all body nodes.
+ pub ctx: CUcontext,
+}
+impl CUgraphNodeType_enum {
+ ///< GPU kernel node
+ pub const CU_GRAPH_NODE_TYPE_KERNEL: CUgraphNodeType_enum = CUgraphNodeType_enum(0);
+}
+impl CUgraphNodeType_enum {
+ ///< Memcpy node
+ pub const CU_GRAPH_NODE_TYPE_MEMCPY: CUgraphNodeType_enum = CUgraphNodeType_enum(1);
+}
+impl CUgraphNodeType_enum {
+ ///< Memset node
+ pub const CU_GRAPH_NODE_TYPE_MEMSET: CUgraphNodeType_enum = CUgraphNodeType_enum(2);
+}
+impl CUgraphNodeType_enum {
+ ///< Host (executable) node
+ pub const CU_GRAPH_NODE_TYPE_HOST: CUgraphNodeType_enum = CUgraphNodeType_enum(3);
+}
+impl CUgraphNodeType_enum {
+ ///< Node which executes an embedded graph
+ pub const CU_GRAPH_NODE_TYPE_GRAPH: CUgraphNodeType_enum = CUgraphNodeType_enum(4);
+}
+impl CUgraphNodeType_enum {
+ ///< Empty (no-op) node
+ pub const CU_GRAPH_NODE_TYPE_EMPTY: CUgraphNodeType_enum = CUgraphNodeType_enum(5);
+}
+impl CUgraphNodeType_enum {
+ ///< External event wait node
+ pub const CU_GRAPH_NODE_TYPE_WAIT_EVENT: CUgraphNodeType_enum = CUgraphNodeType_enum(
+ 6,
+ );
+}
+impl CUgraphNodeType_enum {
+ ///< External event record node
+ pub const CU_GRAPH_NODE_TYPE_EVENT_RECORD: CUgraphNodeType_enum = CUgraphNodeType_enum(
+ 7,
+ );
+}
+impl CUgraphNodeType_enum {
+ ///< External semaphore signal node
+ pub const CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL: CUgraphNodeType_enum = CUgraphNodeType_enum(
+ 8,
+ );
+}
+impl CUgraphNodeType_enum {
+ ///< External semaphore wait node
+ pub const CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT: CUgraphNodeType_enum = CUgraphNodeType_enum(
+ 9,
+ );
+}
+impl CUgraphNodeType_enum {
+ ///< Memory Allocation Node
+ pub const CU_GRAPH_NODE_TYPE_MEM_ALLOC: CUgraphNodeType_enum = CUgraphNodeType_enum(
+ 10,
+ );
+}
+impl CUgraphNodeType_enum {
+ ///< Memory Free Node
+ pub const CU_GRAPH_NODE_TYPE_MEM_FREE: CUgraphNodeType_enum = CUgraphNodeType_enum(
+ 11,
+ );
+}
+impl CUgraphNodeType_enum {
+ ///< Batch MemOp Node
+ pub const CU_GRAPH_NODE_TYPE_BATCH_MEM_OP: CUgraphNodeType_enum = CUgraphNodeType_enum(
+ 12,
+ );
+}
+impl CUgraphNodeType_enum {
+ /**< Conditional Node
+
+May be used to implement a conditional execution path or loop
+inside of a graph. The graph(s) contained within the body of the conditional node
+can be selectively executed or iterated upon based on the value of a conditional
+variable.
+
+Handles must be created in advance of creating the node
+using ::cuGraphConditionalHandleCreate.
+
+The following restrictions apply to graphs which contain conditional nodes:
+The graph cannot be used in a child node.
+Only one instantiation of the graph may exist at any point in time.
+The graph cannot be cloned.
+
+To set the control value, supply a default value when creating the handle and/or
+call ::cudaGraphSetConditional from device code.*/
+ pub const CU_GRAPH_NODE_TYPE_CONDITIONAL: CUgraphNodeType_enum = CUgraphNodeType_enum(
+ 13,
+ );
+}
+#[repr(transparent)]
+/// Graph node types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgraphNodeType_enum(pub ::core::ffi::c_uint);
+/// Graph node types
+pub use self::CUgraphNodeType_enum as CUgraphNodeType;
+impl CUgraphDependencyType_enum {
+ ///< This is an ordinary dependency.
+ pub const CU_GRAPH_DEPENDENCY_TYPE_DEFAULT: CUgraphDependencyType_enum = CUgraphDependencyType_enum(
+ 0,
+ );
+}
+impl CUgraphDependencyType_enum {
+ /**< This dependency type allows the downstream node to
+use \c cudaGridDependencySynchronize(). It may only be used
+between kernel nodes, and must be used with either the
+::CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC or
+::CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER outgoing port.*/
+ pub const CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC: CUgraphDependencyType_enum = CUgraphDependencyType_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Type annotations that can be applied to graph edges as part of ::CUgraphEdgeData.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgraphDependencyType_enum(pub ::core::ffi::c_uint);
+/// Type annotations that can be applied to graph edges as part of ::CUgraphEdgeData.
+pub use self::CUgraphDependencyType_enum as CUgraphDependencyType;
+/** Optional annotation for edges in a CUDA graph. Note, all edges implicitly have annotations and
+ default to a zero-initialized value if not specified. A zero-initialized struct indicates a
+ standard full serialization of two nodes with memory visibility.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgraphEdgeData_st {
+ /**< This indicates when the dependency is triggered from the upstream
+node on the edge. The meaning is specfic to the node type. A value
+of 0 in all cases means full completion of the upstream node, with
+memory visibility to the downstream node or portion thereof
+(indicated by \c to_port).
+<br>
+Only kernel nodes define non-zero ports. A kernel node
+can use the following output port types:
+::CU_GRAPH_KERNEL_NODE_PORT_DEFAULT, ::CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC,
+or ::CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER.*/
+ pub from_port: ::core::ffi::c_uchar,
+ /**< This indicates what portion of the downstream node is dependent on
+the upstream node or portion thereof (indicated by \c from_port). The
+meaning is specific to the node type. A value of 0 in all cases means
+the entirety of the downstream node is dependent on the upstream work.
+<br>
+Currently no node types define non-zero ports. Accordingly, this field
+must be set to zero.*/
+ pub to_port: ::core::ffi::c_uchar,
+ /**< This should be populated with a value from ::CUgraphDependencyType. (It
+is typed as char due to compiler-specific layout of bitfields.) See
+::CUgraphDependencyType.*/
+ pub type_: ::core::ffi::c_uchar,
+ /**< These bytes are unused and must be zeroed. This ensures
+compatibility if additional fields are added in the future.*/
+ pub reserved: [::core::ffi::c_uchar; 5usize],
+}
+/** Optional annotation for edges in a CUDA graph. Note, all edges implicitly have annotations and
+ default to a zero-initialized value if not specified. A zero-initialized struct indicates a
+ standard full serialization of two nodes with memory visibility.*/
+pub type CUgraphEdgeData = CUgraphEdgeData_st;
+impl CUgraphInstantiateResult_enum {
+ ///< Instantiation succeeded
+ pub const CUDA_GRAPH_INSTANTIATE_SUCCESS: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum(
+ 0,
+ );
+}
+impl CUgraphInstantiateResult_enum {
+ ///< Instantiation failed for an unexpected reason which is described in the return value of the function
+ pub const CUDA_GRAPH_INSTANTIATE_ERROR: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum(
+ 1,
+ );
+}
+impl CUgraphInstantiateResult_enum {
+ ///< Instantiation failed due to invalid structure, such as cycles
+ pub const CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum(
+ 2,
+ );
+}
+impl CUgraphInstantiateResult_enum {
+ ///< Instantiation for device launch failed because the graph contained an unsupported operation
+ pub const CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum(
+ 3,
+ );
+}
+impl CUgraphInstantiateResult_enum {
+ ///< Instantiation for device launch failed due to the nodes belonging to different contexts
+ pub const CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum(
+ 4,
+ );
+}
+#[repr(transparent)]
+/// Graph instantiation results
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgraphInstantiateResult_enum(pub ::core::ffi::c_uint);
+/// Graph instantiation results
+pub use self::CUgraphInstantiateResult_enum as CUgraphInstantiateResult;
+/// Graph instantiation parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_GRAPH_INSTANTIATE_PARAMS_st {
+ ///< Instantiation flags
+ pub flags: cuuint64_t,
+ ///< Upload stream
+ pub hUploadStream: CUstream,
+ ///< The node which caused instantiation to fail, if any
+ pub hErrNode_out: CUgraphNode,
+ ///< Whether instantiation was successful. If it failed, the reason why
+ pub result_out: CUgraphInstantiateResult,
+}
+/// Graph instantiation parameters
+pub type CUDA_GRAPH_INSTANTIATE_PARAMS = CUDA_GRAPH_INSTANTIATE_PARAMS_st;
+impl CUsynchronizationPolicy_enum {
+ pub const CU_SYNC_POLICY_AUTO: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(
+ 1,
+ );
+}
+impl CUsynchronizationPolicy_enum {
+ pub const CU_SYNC_POLICY_SPIN: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(
+ 2,
+ );
+}
+impl CUsynchronizationPolicy_enum {
+ pub const CU_SYNC_POLICY_YIELD: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(
+ 3,
+ );
+}
+impl CUsynchronizationPolicy_enum {
+ pub const CU_SYNC_POLICY_BLOCKING_SYNC: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(
+ 4,
+ );
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUsynchronizationPolicy_enum(pub ::core::ffi::c_uint);
+pub use self::CUsynchronizationPolicy_enum as CUsynchronizationPolicy;
+impl CUclusterSchedulingPolicy_enum {
+ ///< the default policy
+ pub const CU_CLUSTER_SCHEDULING_POLICY_DEFAULT: CUclusterSchedulingPolicy_enum = CUclusterSchedulingPolicy_enum(
+ 0,
+ );
+}
+impl CUclusterSchedulingPolicy_enum {
+ ///< spread the blocks within a cluster to the SMs
+ pub const CU_CLUSTER_SCHEDULING_POLICY_SPREAD: CUclusterSchedulingPolicy_enum = CUclusterSchedulingPolicy_enum(
+ 1,
+ );
+}
+impl CUclusterSchedulingPolicy_enum {
+ ///< allow the hardware to load-balance the blocks in a cluster to the SMs
+ pub const CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING: CUclusterSchedulingPolicy_enum = CUclusterSchedulingPolicy_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Cluster scheduling policies. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUclusterSchedulingPolicy_enum(pub ::core::ffi::c_uint);
+/// Cluster scheduling policies. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute
+pub use self::CUclusterSchedulingPolicy_enum as CUclusterSchedulingPolicy;
+impl CUlaunchMemSyncDomain_enum {
+ ///< Launch kernels in the default domain
+ pub const CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT: CUlaunchMemSyncDomain_enum = CUlaunchMemSyncDomain_enum(
+ 0,
+ );
+}
+impl CUlaunchMemSyncDomain_enum {
+ ///< Launch kernels in the remote domain
+ pub const CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE: CUlaunchMemSyncDomain_enum = CUlaunchMemSyncDomain_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/** Memory Synchronization Domain
+
+ A kernel can be launched in a specified memory synchronization domain that affects all memory operations issued by
+ that kernel. A memory barrier issued in one domain will only order memory operations in that domain, thus eliminating
+ latency increase from memory barriers ordering unrelated traffic.
+
+ By default, kernels are launched in domain 0. Kernel launched with ::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a
+ different domain ID. User may also alter the domain ID with ::CUlaunchMemSyncDomainMap for a specific stream /
+ graph node / kernel launch. See ::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN, ::cuStreamSetAttribute, ::cuLaunchKernelEx,
+ ::cuGraphKernelNodeSetAttribute.
+
+ Memory operations done in kernels launched in different domains are considered system-scope distanced. In other
+ words, a GPU scoped memory synchronization is not sufficient for memory order to be observed by kernels in another
+ memory synchronization domain even if they are on the same GPU.*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUlaunchMemSyncDomain_enum(pub ::core::ffi::c_uint);
+/** Memory Synchronization Domain
+
+ A kernel can be launched in a specified memory synchronization domain that affects all memory operations issued by
+ that kernel. A memory barrier issued in one domain will only order memory operations in that domain, thus eliminating
+ latency increase from memory barriers ordering unrelated traffic.
+
+ By default, kernels are launched in domain 0. Kernel launched with ::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a
+ different domain ID. User may also alter the domain ID with ::CUlaunchMemSyncDomainMap for a specific stream /
+ graph node / kernel launch. See ::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN, ::cuStreamSetAttribute, ::cuLaunchKernelEx,
+ ::cuGraphKernelNodeSetAttribute.
+
+ Memory operations done in kernels launched in different domains are considered system-scope distanced. In other
+ words, a GPU scoped memory synchronization is not sufficient for memory order to be observed by kernels in another
+ memory synchronization domain even if they are on the same GPU.*/
+pub use self::CUlaunchMemSyncDomain_enum as CUlaunchMemSyncDomain;
+/** Memory Synchronization Domain map
+
+ See ::cudaLaunchMemSyncDomain.
+
+ By default, kernels are launched in domain 0. Kernel launched with ::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a
+ different domain ID. User may also alter the domain ID with ::CUlaunchMemSyncDomainMap for a specific stream /
+ graph node / kernel launch. See ::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP.
+
+ Domain ID range is available through ::CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUlaunchMemSyncDomainMap_st {
+ ///< The default domain ID to use for designated kernels
+ pub default_: ::core::ffi::c_uchar,
+ ///< The remote domain ID to use for designated kernels
+ pub remote: ::core::ffi::c_uchar,
+}
+/** Memory Synchronization Domain map
+
+ See ::cudaLaunchMemSyncDomain.
+
+ By default, kernels are launched in domain 0. Kernel launched with ::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a
+ different domain ID. User may also alter the domain ID with ::CUlaunchMemSyncDomainMap for a specific stream /
+ graph node / kernel launch. See ::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP.
+
+ Domain ID range is available through ::CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT.*/
+pub type CUlaunchMemSyncDomainMap = CUlaunchMemSyncDomainMap_st;
+impl CUlaunchAttributeID_enum {
+ ///< Ignored entry, for convenient composition
+ pub const CU_LAUNCH_ATTRIBUTE_IGNORE: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 0,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ /**< Valid for streams, graph nodes, launches. See
+::CUlaunchAttributeValue::accessPolicyWindow.*/
+ pub const CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 1,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ /**< Valid for graph nodes, launches. See
+::CUlaunchAttributeValue::cooperative.*/
+ pub const CU_LAUNCH_ATTRIBUTE_COOPERATIVE: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 2,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ /**< Valid for streams. See
+::CUlaunchAttributeValue::syncPolicy.*/
+ pub const CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 3,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ ///< Valid for graph nodes, launches. See ::CUlaunchAttributeValue::clusterDim.
+ pub const CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 4,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ ///< Valid for graph nodes, launches. See ::CUlaunchAttributeValue::clusterSchedulingPolicyPreference.
+ pub const CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 5,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ /**< Valid for launches. Setting
+::CUlaunchAttributeValue::programmaticStreamSerializationAllowed
+to non-0 signals that the kernel will use programmatic
+means to resolve its stream dependency, so that the
+CUDA runtime should opportunistically allow the grid's
+execution to overlap with the previous kernel in the
+stream, if that kernel requests the overlap. The
+dependent launches can choose to wait on the
+dependency using the programmatic sync
+(cudaGridDependencySynchronize() or equivalent PTX
+instructions).*/
+ pub const CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 6,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ /**< Valid for launches. Set
+::CUlaunchAttributeValue::programmaticEvent to
+record the event. Event recorded through this
+launch attribute is guaranteed to only trigger
+after all block in the associated kernel trigger
+the event. A block can trigger the event through
+PTX launchdep.release or CUDA builtin function
+cudaTriggerProgrammaticLaunchCompletion(). A
+trigger can also be inserted at the beginning of
+each block's execution if triggerAtBlockStart is
+set to non-0. The dependent launches can choose to
+wait on the dependency using the programmatic sync
+(cudaGridDependencySynchronize() or equivalent PTX
+instructions). Note that dependents (including the
+CPU thread calling cuEventSynchronize()) are not
+guaranteed to observe the release precisely when
+it is released. For example, cuEventSynchronize()
+may only observe the event trigger long after the
+associated kernel has completed. This recording
+type is primarily meant for establishing
+programmatic dependency between device tasks. Note
+also this type of dependency allows, but does not
+guarantee, concurrent execution of tasks.
+<br>
+The event supplied must not be an interprocess or
+interop event. The event must disable timing (i.e.
+must be created with the ::CU_EVENT_DISABLE_TIMING
+flag set).*/
+ pub const CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 7,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ /**< Valid for streams, graph nodes, launches. See
+::CUlaunchAttributeValue::priority.*/
+ pub const CU_LAUNCH_ATTRIBUTE_PRIORITY: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 8,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ /**< Valid for streams, graph nodes, launches. See
+::CUlaunchAttributeValue::memSyncDomainMap.*/
+ pub const CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 9,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ /**< Valid for streams, graph nodes, launches. See
+::CUlaunchAttributeValue::memSyncDomain.*/
+ pub const CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 10,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ /**< Valid for launches. Set
+::CUlaunchAttributeValue::launchCompletionEvent to record the
+event.
+<br>
+Nominally, the event is triggered once all blocks of the kernel
+have begun execution. Currently this is a best effort. If a kernel
+B has a launch completion dependency on a kernel A, B may wait
+until A is complete. Alternatively, blocks of B may begin before
+all blocks of A have begun, for example if B can claim execution
+resources unavailable to A (e.g. they run on different GPUs) or
+if B is a higher priority than A.
+Exercise caution if such an ordering inversion could lead
+to deadlock.
+<br>
+A launch completion event is nominally similar to a programmatic
+event with \c triggerAtBlockStart set except that it is not
+visible to \c cudaGridDependencySynchronize() and can be used with
+compute capability less than 9.0.
+<br>
+The event supplied must not be an interprocess or interop
+event. The event must disable timing (i.e. must be created
+with the ::CU_EVENT_DISABLE_TIMING flag set).*/
+ pub const CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 12,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ /**< Valid for graph nodes, launches. This attribute is graphs-only,
+and passing it to a launch in a non-capturing stream will result
+in an error.
+<br>
+::CUlaunchAttributeValue::deviceUpdatableKernelNode::deviceUpdatable can
+only be set to 0 or 1. Setting the field to 1 indicates that the
+corresponding kernel node should be device-updatable. On success, a handle
+will be returned via
+::CUlaunchAttributeValue::deviceUpdatableKernelNode::devNode which can be
+passed to the various device-side update functions to update the node's
+kernel parameters from within another kernel. For more information on the
+types of device updates that can be made, as well as the relevant limitations
+thereof, see ::cudaGraphKernelNodeUpdatesApply.
+<br>
+Nodes which are device-updatable have additional restrictions compared to
+regular kernel nodes. Firstly, device-updatable nodes cannot be removed
+from their graph via ::cuGraphDestroyNode. Additionally, once opted-in
+to this functionality, a node cannot opt out, and any attempt to set the
+deviceUpdatable attribute to 0 will result in an error. Device-updatable
+kernel nodes also cannot have their attributes copied to/from another kernel
+node via ::cuGraphKernelNodeCopyAttributes. Graphs containing one or more
+device-updatable nodes also do not allow multiple instantiation, and neither
+the graph nor its instantiated version can be passed to ::cuGraphExecUpdate.
+<br>
+If a graph contains device-updatable nodes and updates those nodes from the device
+from within the graph, the graph must be uploaded with ::cuGraphUpload before it
+is launched. For such a graph, if host-side executable graph updates are made to the
+device-updatable nodes, the graph must be uploaded before it is launched again.*/
+ pub const CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 13,
+ );
+}
+impl CUlaunchAttributeID_enum {
+ pub const CU_LAUNCH_ATTRIBUTE_MAX: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
+ 14,
+ );
+}
+#[repr(transparent)]
+/// Launch attributes enum; used as id field of ::CUlaunchAttribute
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUlaunchAttributeID_enum(pub ::core::ffi::c_uint);
+/// Launch attributes enum; used as id field of ::CUlaunchAttribute
+pub use self::CUlaunchAttributeID_enum as CUlaunchAttributeID;
+/// Launch attributes union; used as value field of ::CUlaunchAttribute
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUlaunchAttributeValue_union {
+ pub pad: [::core::ffi::c_char; 64usize],
+ ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.
+ pub accessPolicyWindow: CUaccessPolicyWindow,
+ /**< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero indicates a cooperative
+kernel (see ::cuLaunchCooperativeKernel).*/
+ pub cooperative: ::core::ffi::c_int,
+ /**< Value of launch attribute
+::CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. ::CUsynchronizationPolicy for
+work queued up in this stream*/
+ pub syncPolicy: CUsynchronizationPolicy,
+ pub clusterDim: CUlaunchAttributeValue_union__bindgen_ty_1,
+ /**< Value of launch attribute
+::CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster
+scheduling policy preference for the kernel.*/
+ pub clusterSchedulingPolicyPreference: CUclusterSchedulingPolicy,
+ /**< Value of launch attribute
+::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.*/
+ pub programmaticStreamSerializationAllowed: ::core::ffi::c_int,
+ ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT.
+ pub programmaticEvent: CUlaunchAttributeValue_union__bindgen_ty_2,
+ ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT.
+ pub launchCompletionEvent: CUlaunchAttributeValue_union__bindgen_ty_3,
+ ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution priority of the kernel.
+ pub priority: ::core::ffi::c_int,
+ /**< Value of launch attribute
+::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. See
+::CUlaunchMemSyncDomainMap.*/
+ pub memSyncDomainMap: CUlaunchMemSyncDomainMap,
+ /**< Value of launch attribute
+::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. See::CUlaunchMemSyncDomain*/
+ pub memSyncDomain: CUlaunchMemSyncDomain,
+ ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE.
+ pub deviceUpdatableKernelNode: CUlaunchAttributeValue_union__bindgen_ty_4,
+}
+/** Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION that
+ represents the desired cluster dimensions for the kernel. Opaque type
+ with the following fields:
+ - \p x - The X dimension of the cluster, in blocks. Must be a divisor
+ of the grid X dimension.
+ - \p y - The Y dimension of the cluster, in blocks. Must be a divisor
+ of the grid Y dimension.
+ - \p z - The Z dimension of the cluster, in blocks. Must be a divisor
+ of the grid Z dimension.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUlaunchAttributeValue_union__bindgen_ty_1 {
+ pub x: ::core::ffi::c_uint,
+ pub y: ::core::ffi::c_uint,
+ pub z: ::core::ffi::c_uint,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUlaunchAttributeValue_union__bindgen_ty_2 {
+ ///< Event to fire when all blocks trigger it
+ pub event: CUevent,
+ /**< Event record flags, see ::cuEventRecordWithFlags. Does not accept
+::CU_EVENT_RECORD_EXTERNAL.*/
+ pub flags: ::core::ffi::c_int,
+ ///< If this is set to non-0, each block launch will automatically trigger the event
+ pub triggerAtBlockStart: ::core::ffi::c_int,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUlaunchAttributeValue_union__bindgen_ty_3 {
+ ///< Event to fire when the last block launches
+ pub event: CUevent,
+ ///< Event record flags, see ::cuEventRecordWithFlags. Does not accept ::CU_EVENT_RECORD_EXTERNAL.
+ pub flags: ::core::ffi::c_int,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUlaunchAttributeValue_union__bindgen_ty_4 {
+ ///< Whether or not the resulting kernel node should be device-updatable.
+ pub deviceUpdatable: ::core::ffi::c_int,
+ ///< Returns a handle to pass to the various device-side update functions.
+ pub devNode: CUgraphDeviceNode,
+}
+/// Launch attributes union; used as value field of ::CUlaunchAttribute
+pub type CUlaunchAttributeValue = CUlaunchAttributeValue_union;
+/// Launch attribute
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUlaunchAttribute_st {
+ ///< Attribute to set
+ pub id: CUlaunchAttributeID,
+ pub pad: [::core::ffi::c_char; 4usize],
+ ///< Value of the attribute
+ pub value: CUlaunchAttributeValue,
+}
+/// Launch attribute
+pub type CUlaunchAttribute = CUlaunchAttribute_st;
+/// CUDA extensible launch configuration
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUlaunchConfig_st {
+ ///< Width of grid in blocks
+ pub gridDimX: ::core::ffi::c_uint,
+ ///< Height of grid in blocks
+ pub gridDimY: ::core::ffi::c_uint,
+ ///< Depth of grid in blocks
+ pub gridDimZ: ::core::ffi::c_uint,
+ ///< X dimension of each thread block
+ pub blockDimX: ::core::ffi::c_uint,
+ ///< Y dimension of each thread block
+ pub blockDimY: ::core::ffi::c_uint,
+ ///< Z dimension of each thread block
+ pub blockDimZ: ::core::ffi::c_uint,
+ ///< Dynamic shared-memory size per thread block in bytes
+ pub sharedMemBytes: ::core::ffi::c_uint,
+ ///< Stream identifier
+ pub hStream: CUstream,
+ ///< List of attributes; nullable if ::CUlaunchConfig::numAttrs == 0
+ pub attrs: *mut CUlaunchAttribute,
+ ///< Number of attributes populated in ::CUlaunchConfig::attrs
+ pub numAttrs: ::core::ffi::c_uint,
+}
+/// CUDA extensible launch configuration
+pub type CUlaunchConfig = CUlaunchConfig_st;
+/// Launch attributes enum; used as id field of ::CUlaunchAttribute
+pub use self::CUlaunchAttributeID as CUkernelNodeAttrID;
+/// Launch attributes union; used as value field of ::CUlaunchAttribute
+pub type CUkernelNodeAttrValue_v1 = CUlaunchAttributeValue;
+/// Launch attributes union; used as value field of ::CUlaunchAttribute
+pub type CUkernelNodeAttrValue = CUkernelNodeAttrValue_v1;
+impl CUstreamCaptureStatus_enum {
+ ///< Stream is not capturing
+ pub const CU_STREAM_CAPTURE_STATUS_NONE: CUstreamCaptureStatus_enum = CUstreamCaptureStatus_enum(
+ 0,
+ );
+}
+impl CUstreamCaptureStatus_enum {
+ ///< Stream is actively capturing
+ pub const CU_STREAM_CAPTURE_STATUS_ACTIVE: CUstreamCaptureStatus_enum = CUstreamCaptureStatus_enum(
+ 1,
+ );
+}
+impl CUstreamCaptureStatus_enum {
+ /**< Stream is part of a capture sequence that
+has been invalidated, but not terminated*/
+ pub const CU_STREAM_CAPTURE_STATUS_INVALIDATED: CUstreamCaptureStatus_enum = CUstreamCaptureStatus_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Possible stream capture statuses returned by ::cuStreamIsCapturing
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUstreamCaptureStatus_enum(pub ::core::ffi::c_uint);
+/// Possible stream capture statuses returned by ::cuStreamIsCapturing
+pub use self::CUstreamCaptureStatus_enum as CUstreamCaptureStatus;
+impl CUstreamCaptureMode_enum {
+ pub const CU_STREAM_CAPTURE_MODE_GLOBAL: CUstreamCaptureMode_enum = CUstreamCaptureMode_enum(
+ 0,
+ );
+}
+impl CUstreamCaptureMode_enum {
+ pub const CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: CUstreamCaptureMode_enum = CUstreamCaptureMode_enum(
+ 1,
+ );
+}
+impl CUstreamCaptureMode_enum {
+ pub const CU_STREAM_CAPTURE_MODE_RELAXED: CUstreamCaptureMode_enum = CUstreamCaptureMode_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/** Possible modes for stream capture thread interactions. For more details see
+ ::cuStreamBeginCapture and ::cuThreadExchangeStreamCaptureMode*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUstreamCaptureMode_enum(pub ::core::ffi::c_uint);
+/// Launch attributes enum; used as id field of ::CUlaunchAttribute
+pub use self::CUlaunchAttributeID as CUstreamAttrID;
+/** Possible modes for stream capture thread interactions. For more details see
+ ::cuStreamBeginCapture and ::cuThreadExchangeStreamCaptureMode*/
+pub use self::CUstreamCaptureMode_enum as CUstreamCaptureMode;
+/// Launch attributes union; used as value field of ::CUlaunchAttribute
+pub type CUstreamAttrValue_v1 = CUlaunchAttributeValue;
+/// Launch attributes union; used as value field of ::CUlaunchAttribute
+pub type CUstreamAttrValue = CUstreamAttrValue_v1;
+impl CUdriverProcAddress_flags_enum {
+ ///< Default search mode for driver symbols.
+ pub const CU_GET_PROC_ADDRESS_DEFAULT: CUdriverProcAddress_flags_enum = CUdriverProcAddress_flags_enum(
+ 0,
+ );
+}
+impl CUdriverProcAddress_flags_enum {
+ ///< Search for legacy versions of driver symbols.
+ pub const CU_GET_PROC_ADDRESS_LEGACY_STREAM: CUdriverProcAddress_flags_enum = CUdriverProcAddress_flags_enum(
+ 1,
+ );
+}
+impl CUdriverProcAddress_flags_enum {
+ ///< Search for per-thread versions of driver symbols.
+ pub const CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM: CUdriverProcAddress_flags_enum = CUdriverProcAddress_flags_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Flags to specify search options. For more details see ::cuGetProcAddress
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUdriverProcAddress_flags_enum(pub ::core::ffi::c_uint);
+/// Flags to specify search options. For more details see ::cuGetProcAddress
+pub use self::CUdriverProcAddress_flags_enum as CUdriverProcAddress_flags;
+impl CUdriverProcAddressQueryResult_enum {
+ ///< Symbol was succesfully found
+ pub const CU_GET_PROC_ADDRESS_SUCCESS: CUdriverProcAddressQueryResult_enum = CUdriverProcAddressQueryResult_enum(
+ 0,
+ );
+}
+impl CUdriverProcAddressQueryResult_enum {
+ ///< Symbol was not found in search
+ pub const CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND: CUdriverProcAddressQueryResult_enum = CUdriverProcAddressQueryResult_enum(
+ 1,
+ );
+}
+impl CUdriverProcAddressQueryResult_enum {
+ ///< Symbol was found but version supplied was not sufficient
+ pub const CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT: CUdriverProcAddressQueryResult_enum = CUdriverProcAddressQueryResult_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Flags to indicate search status. For more details see ::cuGetProcAddress
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUdriverProcAddressQueryResult_enum(pub ::core::ffi::c_uint);
+/// Flags to indicate search status. For more details see ::cuGetProcAddress
+pub use self::CUdriverProcAddressQueryResult_enum as CUdriverProcAddressQueryResult;
+impl CUexecAffinityType_enum {
+ ///< Create a context with limited SMs.
+ pub const CU_EXEC_AFFINITY_TYPE_SM_COUNT: CUexecAffinityType_enum = CUexecAffinityType_enum(
+ 0,
+ );
+}
+impl CUexecAffinityType_enum {
+ pub const CU_EXEC_AFFINITY_TYPE_MAX: CUexecAffinityType_enum = CUexecAffinityType_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Execution Affinity Types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUexecAffinityType_enum(pub ::core::ffi::c_uint);
+/// Execution Affinity Types
+pub use self::CUexecAffinityType_enum as CUexecAffinityType;
+/// Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUexecAffinitySmCount_st {
+ ///< The number of SMs the context is limited to use.
+ pub val: ::core::ffi::c_uint,
+}
+/// Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT
+pub type CUexecAffinitySmCount_v1 = CUexecAffinitySmCount_st;
+/// Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT
+pub type CUexecAffinitySmCount = CUexecAffinitySmCount_v1;
+/// Execution Affinity Parameters
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUexecAffinityParam_st {
+ pub type_: CUexecAffinityType,
+ pub param: CUexecAffinityParam_st__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUexecAffinityParam_st__bindgen_ty_1 {
+ pub smCount: CUexecAffinitySmCount,
+}
+/// Execution Affinity Parameters
+pub type CUexecAffinityParam_v1 = CUexecAffinityParam_st;
+/// Execution Affinity Parameters
+pub type CUexecAffinityParam = CUexecAffinityParam_v1;
+impl CUlibraryOption_enum {
+ pub const CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE: CUlibraryOption_enum = CUlibraryOption_enum(
+ 0,
+ );
+}
+impl CUlibraryOption_enum {
+ /** Specifes that the argument \p code passed to ::cuLibraryLoadData() will be preserved.
+ Specifying this option will let the driver know that \p code can be accessed at any point
+ until ::cuLibraryUnload(). The default behavior is for the driver to allocate and
+ maintain its own copy of \p code. Note that this is only a memory usage optimization
+ hint and the driver can choose to ignore it if required.
+ Specifying this option with ::cuLibraryLoadFromFile() is invalid and
+ will return ::CUDA_ERROR_INVALID_VALUE.*/
+ pub const CU_LIBRARY_BINARY_IS_PRESERVED: CUlibraryOption_enum = CUlibraryOption_enum(
+ 1,
+ );
+}
+impl CUlibraryOption_enum {
+ /** Specifes that the argument \p code passed to ::cuLibraryLoadData() will be preserved.
+ Specifying this option will let the driver know that \p code can be accessed at any point
+ until ::cuLibraryUnload(). The default behavior is for the driver to allocate and
+ maintain its own copy of \p code. Note that this is only a memory usage optimization
+ hint and the driver can choose to ignore it if required.
+ Specifying this option with ::cuLibraryLoadFromFile() is invalid and
+ will return ::CUDA_ERROR_INVALID_VALUE.*/
+ pub const CU_LIBRARY_NUM_OPTIONS: CUlibraryOption_enum = CUlibraryOption_enum(2);
+}
+#[repr(transparent)]
+/// Library options to be specified with ::cuLibraryLoadData() or ::cuLibraryLoadFromFile()
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUlibraryOption_enum(pub ::core::ffi::c_uint);
+/// Library options to be specified with ::cuLibraryLoadData() or ::cuLibraryLoadFromFile()
+pub use self::CUlibraryOption_enum as CUlibraryOption;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUlibraryHostUniversalFunctionAndDataTable_st {
+ pub functionTable: *mut ::core::ffi::c_void,
+ pub functionWindowSize: usize,
+ pub dataTable: *mut ::core::ffi::c_void,
+ pub dataWindowSize: usize,
+}
+pub type CUlibraryHostUniversalFunctionAndDataTable = CUlibraryHostUniversalFunctionAndDataTable_st;
+/// Error codes
+#[must_use]
+pub type cudaError_enum = ::core::ffi::c_uint;
+impl CUdevice_P2PAttribute_enum {
+ ///< A relative value indicating the performance of the link between two devices
+ pub const CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum(
+ 1,
+ );
+}
+impl CUdevice_P2PAttribute_enum {
+ ///< P2P Access is enable
+ pub const CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum(
+ 2,
+ );
+}
+impl CUdevice_P2PAttribute_enum {
+ ///< Atomic operation over the link supported
+ pub const CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum(
+ 3,
+ );
+}
+impl CUdevice_P2PAttribute_enum {
+ ///< \deprecated use CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED instead
+ pub const CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum(
+ 4,
+ );
+}
+impl CUdevice_P2PAttribute_enum {
+ ///< Accessing CUDA arrays over the link supported
+ pub const CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum(
+ 4,
+ );
+}
+#[repr(transparent)]
+/// P2P Attributes
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUdevice_P2PAttribute_enum(pub ::core::ffi::c_uint);
+/// P2P Attributes
+pub use self::CUdevice_P2PAttribute_enum as CUdevice_P2PAttribute;
+/** CUDA stream callback
+ \param hStream The stream the callback was added to, as passed to ::cuStreamAddCallback. May be NULL.
+ \param status ::CUDA_SUCCESS or any persistent error on the stream.
+ \param userData User parameter provided at registration.*/
+pub type CUstreamCallback = ::core::option::Option<
+ unsafe extern "system" fn(
+ hStream: CUstream,
+ status: CUresult,
+ userData: *mut ::core::ffi::c_void,
+ ),
+>;
+/** Block size to per-block dynamic shared memory mapping for a certain
+ kernel \param blockSize Block size of the kernel.
+
+ \return The dynamic shared memory needed by a block.*/
+pub type CUoccupancyB2DSize = ::core::option::Option<
+ unsafe extern "system" fn(blockSize: ::core::ffi::c_int) -> usize,
+>;
+/// 2D memory copy parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_MEMCPY2D_st {
+ ///< Source X in bytes
+ pub srcXInBytes: usize,
+ ///< Source Y
+ pub srcY: usize,
+ ///< Source memory type (host, device, array)
+ pub srcMemoryType: CUmemorytype,
+ ///< Source host pointer
+ pub srcHost: *const ::core::ffi::c_void,
+ ///< Source device pointer
+ pub srcDevice: CUdeviceptr,
+ ///< Source array reference
+ pub srcArray: CUarray,
+ ///< Source pitch (ignored when src is array)
+ pub srcPitch: usize,
+ ///< Destination X in bytes
+ pub dstXInBytes: usize,
+ ///< Destination Y
+ pub dstY: usize,
+ ///< Destination memory type (host, device, array)
+ pub dstMemoryType: CUmemorytype,
+ ///< Destination host pointer
+ pub dstHost: *mut ::core::ffi::c_void,
+ ///< Destination device pointer
+ pub dstDevice: CUdeviceptr,
+ ///< Destination array reference
+ pub dstArray: CUarray,
+ ///< Destination pitch (ignored when dst is array)
+ pub dstPitch: usize,
+ ///< Width of 2D memory copy in bytes
+ pub WidthInBytes: usize,
+ ///< Height of 2D memory copy
+ pub Height: usize,
+}
+/// 2D memory copy parameters
+pub type CUDA_MEMCPY2D_v2 = CUDA_MEMCPY2D_st;
+/// 2D memory copy parameters
+pub type CUDA_MEMCPY2D = CUDA_MEMCPY2D_v2;
+/// 3D memory copy parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_MEMCPY3D_st {
+ ///< Source X in bytes
+ pub srcXInBytes: usize,
+ ///< Source Y
+ pub srcY: usize,
+ ///< Source Z
+ pub srcZ: usize,
+ ///< Source LOD
+ pub srcLOD: usize,
+ ///< Source memory type (host, device, array)
+ pub srcMemoryType: CUmemorytype,
+ ///< Source host pointer
+ pub srcHost: *const ::core::ffi::c_void,
+ ///< Source device pointer
+ pub srcDevice: CUdeviceptr,
+ ///< Source array reference
+ pub srcArray: CUarray,
+ ///< Must be NULL
+ pub reserved0: *mut ::core::ffi::c_void,
+ ///< Source pitch (ignored when src is array)
+ pub srcPitch: usize,
+ ///< Source height (ignored when src is array; may be 0 if Depth==1)
+ pub srcHeight: usize,
+ ///< Destination X in bytes
+ pub dstXInBytes: usize,
+ ///< Destination Y
+ pub dstY: usize,
+ ///< Destination Z
+ pub dstZ: usize,
+ ///< Destination LOD
+ pub dstLOD: usize,
+ ///< Destination memory type (host, device, array)
+ pub dstMemoryType: CUmemorytype,
+ ///< Destination host pointer
+ pub dstHost: *mut ::core::ffi::c_void,
+ ///< Destination device pointer
+ pub dstDevice: CUdeviceptr,
+ ///< Destination array reference
+ pub dstArray: CUarray,
+ ///< Must be NULL
+ pub reserved1: *mut ::core::ffi::c_void,
+ ///< Destination pitch (ignored when dst is array)
+ pub dstPitch: usize,
+ ///< Destination height (ignored when dst is array; may be 0 if Depth==1)
+ pub dstHeight: usize,
+ ///< Width of 3D memory copy in bytes
+ pub WidthInBytes: usize,
+ ///< Height of 3D memory copy
+ pub Height: usize,
+ ///< Depth of 3D memory copy
+ pub Depth: usize,
+}
+/// 3D memory copy parameters
+pub type CUDA_MEMCPY3D_v2 = CUDA_MEMCPY3D_st;
+/// 3D memory copy parameters
+pub type CUDA_MEMCPY3D = CUDA_MEMCPY3D_v2;
+/// 3D memory cross-context copy parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_MEMCPY3D_PEER_st {
+ ///< Source X in bytes
+ pub srcXInBytes: usize,
+ ///< Source Y
+ pub srcY: usize,
+ ///< Source Z
+ pub srcZ: usize,
+ ///< Source LOD
+ pub srcLOD: usize,
+ ///< Source memory type (host, device, array)
+ pub srcMemoryType: CUmemorytype,
+ ///< Source host pointer
+ pub srcHost: *const ::core::ffi::c_void,
+ ///< Source device pointer
+ pub srcDevice: CUdeviceptr,
+ ///< Source array reference
+ pub srcArray: CUarray,
+ ///< Source context (ignored with srcMemoryType is ::CU_MEMORYTYPE_ARRAY)
+ pub srcContext: CUcontext,
+ ///< Source pitch (ignored when src is array)
+ pub srcPitch: usize,
+ ///< Source height (ignored when src is array; may be 0 if Depth==1)
+ pub srcHeight: usize,
+ ///< Destination X in bytes
+ pub dstXInBytes: usize,
+ ///< Destination Y
+ pub dstY: usize,
+ ///< Destination Z
+ pub dstZ: usize,
+ ///< Destination LOD
+ pub dstLOD: usize,
+ ///< Destination memory type (host, device, array)
+ pub dstMemoryType: CUmemorytype,
+ ///< Destination host pointer
+ pub dstHost: *mut ::core::ffi::c_void,
+ ///< Destination device pointer
+ pub dstDevice: CUdeviceptr,
+ ///< Destination array reference
+ pub dstArray: CUarray,
+ ///< Destination context (ignored with dstMemoryType is ::CU_MEMORYTYPE_ARRAY)
+ pub dstContext: CUcontext,
+ ///< Destination pitch (ignored when dst is array)
+ pub dstPitch: usize,
+ ///< Destination height (ignored when dst is array; may be 0 if Depth==1)
+ pub dstHeight: usize,
+ ///< Width of 3D memory copy in bytes
+ pub WidthInBytes: usize,
+ ///< Height of 3D memory copy
+ pub Height: usize,
+ ///< Depth of 3D memory copy
+ pub Depth: usize,
+}
+/// 3D memory cross-context copy parameters
+pub type CUDA_MEMCPY3D_PEER_v1 = CUDA_MEMCPY3D_PEER_st;
+/// 3D memory cross-context copy parameters
+pub type CUDA_MEMCPY3D_PEER = CUDA_MEMCPY3D_PEER_v1;
+/// Memcpy node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_MEMCPY_NODE_PARAMS_st {
+ ///< Must be zero
+ pub flags: ::core::ffi::c_int,
+ ///< Must be zero
+ pub reserved: ::core::ffi::c_int,
+ ///< Context on which to run the node
+ pub copyCtx: CUcontext,
+ ///< Parameters for the memory copy
+ pub copyParams: CUDA_MEMCPY3D,
+}
+/// Memcpy node parameters
+pub type CUDA_MEMCPY_NODE_PARAMS = CUDA_MEMCPY_NODE_PARAMS_st;
+/// Array descriptor
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_ARRAY_DESCRIPTOR_st {
+ ///< Width of array
+ pub Width: usize,
+ ///< Height of array
+ pub Height: usize,
+ ///< Array format
+ pub Format: CUarray_format,
+ ///< Channels per array element
+ pub NumChannels: ::core::ffi::c_uint,
+}
+/// Array descriptor
+pub type CUDA_ARRAY_DESCRIPTOR_v2 = CUDA_ARRAY_DESCRIPTOR_st;
+/// Array descriptor
+pub type CUDA_ARRAY_DESCRIPTOR = CUDA_ARRAY_DESCRIPTOR_v2;
+/// 3D array descriptor
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_ARRAY3D_DESCRIPTOR_st {
+ ///< Width of 3D array
+ pub Width: usize,
+ ///< Height of 3D array
+ pub Height: usize,
+ ///< Depth of 3D array
+ pub Depth: usize,
+ ///< Array format
+ pub Format: CUarray_format,
+ ///< Channels per array element
+ pub NumChannels: ::core::ffi::c_uint,
+ ///< Flags
+ pub Flags: ::core::ffi::c_uint,
+}
+/// 3D array descriptor
+pub type CUDA_ARRAY3D_DESCRIPTOR_v2 = CUDA_ARRAY3D_DESCRIPTOR_st;
+/// 3D array descriptor
+pub type CUDA_ARRAY3D_DESCRIPTOR = CUDA_ARRAY3D_DESCRIPTOR_v2;
+/// CUDA array sparse properties
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_ARRAY_SPARSE_PROPERTIES_st {
+ pub tileExtent: CUDA_ARRAY_SPARSE_PROPERTIES_st__bindgen_ty_1,
+ /// First mip level at which the mip tail begins.
+ pub miptailFirstLevel: ::core::ffi::c_uint,
+ /// Total size of the mip tail.
+ pub miptailSize: ::core::ffi::c_ulonglong,
+ /// Flags will either be zero or ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL
+ pub flags: ::core::ffi::c_uint,
+ pub reserved: [::core::ffi::c_uint; 4usize],
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_ARRAY_SPARSE_PROPERTIES_st__bindgen_ty_1 {
+ ///< Width of sparse tile in elements
+ pub width: ::core::ffi::c_uint,
+ ///< Height of sparse tile in elements
+ pub height: ::core::ffi::c_uint,
+ ///< Depth of sparse tile in elements
+ pub depth: ::core::ffi::c_uint,
+}
+/// CUDA array sparse properties
+pub type CUDA_ARRAY_SPARSE_PROPERTIES_v1 = CUDA_ARRAY_SPARSE_PROPERTIES_st;
+/// CUDA array sparse properties
+pub type CUDA_ARRAY_SPARSE_PROPERTIES = CUDA_ARRAY_SPARSE_PROPERTIES_v1;
+/// CUDA array memory requirements
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_ARRAY_MEMORY_REQUIREMENTS_st {
+ ///< Total required memory size
+ pub size: usize,
+ ///< alignment requirement
+ pub alignment: usize,
+ pub reserved: [::core::ffi::c_uint; 4usize],
+}
+/// CUDA array memory requirements
+pub type CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 = CUDA_ARRAY_MEMORY_REQUIREMENTS_st;
+/// CUDA array memory requirements
+pub type CUDA_ARRAY_MEMORY_REQUIREMENTS = CUDA_ARRAY_MEMORY_REQUIREMENTS_v1;
+/// CUDA Resource descriptor
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUDA_RESOURCE_DESC_st {
+ ///< Resource type
+ pub resType: CUresourcetype,
+ pub res: CUDA_RESOURCE_DESC_st__bindgen_ty_1,
+ ///< Flags (must be zero)
+ pub flags: ::core::ffi::c_uint,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUDA_RESOURCE_DESC_st__bindgen_ty_1 {
+ pub array: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1,
+ pub mipmap: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2,
+ pub linear: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3,
+ pub pitch2D: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4,
+ pub reserved: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
+ ///< CUDA array
+ pub hArray: CUarray,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2 {
+ ///< CUDA mipmapped array
+ pub hMipmappedArray: CUmipmappedArray,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3 {
+ ///< Device pointer
+ pub devPtr: CUdeviceptr,
+ ///< Array format
+ pub format: CUarray_format,
+ ///< Channels per array element
+ pub numChannels: ::core::ffi::c_uint,
+ ///< Size in bytes
+ pub sizeInBytes: usize,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4 {
+ ///< Device pointer
+ pub devPtr: CUdeviceptr,
+ ///< Array format
+ pub format: CUarray_format,
+ ///< Channels per array element
+ pub numChannels: ::core::ffi::c_uint,
+ ///< Width of the array in elements
+ pub width: usize,
+ ///< Height of the array in elements
+ pub height: usize,
+ ///< Pitch between two rows in bytes
+ pub pitchInBytes: usize,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5 {
+ pub reserved: [::core::ffi::c_int; 32usize],
+}
+/// CUDA Resource descriptor
+pub type CUDA_RESOURCE_DESC_v1 = CUDA_RESOURCE_DESC_st;
+/// CUDA Resource descriptor
+pub type CUDA_RESOURCE_DESC = CUDA_RESOURCE_DESC_v1;
+/// Texture descriptor
+#[repr(C)]
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub struct CUDA_TEXTURE_DESC_st {
+ ///< Address modes
+ pub addressMode: [CUaddress_mode; 3usize],
+ ///< Filter mode
+ pub filterMode: CUfilter_mode,
+ ///< Flags
+ pub flags: ::core::ffi::c_uint,
+ ///< Maximum anisotropy ratio
+ pub maxAnisotropy: ::core::ffi::c_uint,
+ ///< Mipmap filter mode
+ pub mipmapFilterMode: CUfilter_mode,
+ ///< Mipmap level bias
+ pub mipmapLevelBias: f32,
+ ///< Mipmap minimum level clamp
+ pub minMipmapLevelClamp: f32,
+ ///< Mipmap maximum level clamp
+ pub maxMipmapLevelClamp: f32,
+ ///< Border Color
+ pub borderColor: [f32; 4usize],
+ pub reserved: [::core::ffi::c_int; 12usize],
+}
+/// Texture descriptor
+pub type CUDA_TEXTURE_DESC_v1 = CUDA_TEXTURE_DESC_st;
+/// Texture descriptor
+pub type CUDA_TEXTURE_DESC = CUDA_TEXTURE_DESC_v1;
+impl CUresourceViewFormat_enum {
+ ///< No resource view format (use underlying resource format)
+ pub const CU_RES_VIEW_FORMAT_NONE: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 0,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 1 channel unsigned 8-bit integers
+ pub const CU_RES_VIEW_FORMAT_UINT_1X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 1,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 2 channel unsigned 8-bit integers
+ pub const CU_RES_VIEW_FORMAT_UINT_2X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 2,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 4 channel unsigned 8-bit integers
+ pub const CU_RES_VIEW_FORMAT_UINT_4X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 3,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 1 channel signed 8-bit integers
+ pub const CU_RES_VIEW_FORMAT_SINT_1X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 4,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 2 channel signed 8-bit integers
+ pub const CU_RES_VIEW_FORMAT_SINT_2X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 5,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 4 channel signed 8-bit integers
+ pub const CU_RES_VIEW_FORMAT_SINT_4X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 6,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 1 channel unsigned 16-bit integers
+ pub const CU_RES_VIEW_FORMAT_UINT_1X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 7,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 2 channel unsigned 16-bit integers
+ pub const CU_RES_VIEW_FORMAT_UINT_2X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 8,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 4 channel unsigned 16-bit integers
+ pub const CU_RES_VIEW_FORMAT_UINT_4X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 9,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 1 channel signed 16-bit integers
+ pub const CU_RES_VIEW_FORMAT_SINT_1X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 10,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 2 channel signed 16-bit integers
+ pub const CU_RES_VIEW_FORMAT_SINT_2X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 11,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 4 channel signed 16-bit integers
+ pub const CU_RES_VIEW_FORMAT_SINT_4X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 12,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 1 channel unsigned 32-bit integers
+ pub const CU_RES_VIEW_FORMAT_UINT_1X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 13,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 2 channel unsigned 32-bit integers
+ pub const CU_RES_VIEW_FORMAT_UINT_2X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 14,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 4 channel unsigned 32-bit integers
+ pub const CU_RES_VIEW_FORMAT_UINT_4X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 15,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 1 channel signed 32-bit integers
+ pub const CU_RES_VIEW_FORMAT_SINT_1X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 16,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 2 channel signed 32-bit integers
+ pub const CU_RES_VIEW_FORMAT_SINT_2X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 17,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 4 channel signed 32-bit integers
+ pub const CU_RES_VIEW_FORMAT_SINT_4X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 18,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 1 channel 16-bit floating point
+ pub const CU_RES_VIEW_FORMAT_FLOAT_1X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 19,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 2 channel 16-bit floating point
+ pub const CU_RES_VIEW_FORMAT_FLOAT_2X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 20,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 4 channel 16-bit floating point
+ pub const CU_RES_VIEW_FORMAT_FLOAT_4X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 21,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 1 channel 32-bit floating point
+ pub const CU_RES_VIEW_FORMAT_FLOAT_1X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 22,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 2 channel 32-bit floating point
+ pub const CU_RES_VIEW_FORMAT_FLOAT_2X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 23,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< 4 channel 32-bit floating point
+ pub const CU_RES_VIEW_FORMAT_FLOAT_4X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 24,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< Block compressed 1
+ pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC1: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 25,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< Block compressed 2
+ pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC2: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 26,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< Block compressed 3
+ pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC3: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 27,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< Block compressed 4 unsigned
+ pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC4: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 28,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< Block compressed 4 signed
+ pub const CU_RES_VIEW_FORMAT_SIGNED_BC4: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 29,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< Block compressed 5 unsigned
+ pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC5: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 30,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< Block compressed 5 signed
+ pub const CU_RES_VIEW_FORMAT_SIGNED_BC5: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 31,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< Block compressed 6 unsigned half-float
+ pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC6H: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 32,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< Block compressed 6 signed half-float
+ pub const CU_RES_VIEW_FORMAT_SIGNED_BC6H: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 33,
+ );
+}
+impl CUresourceViewFormat_enum {
+ ///< Block compressed 7
+ pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC7: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
+ 34,
+ );
+}
+#[repr(transparent)]
+/// Resource view format
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUresourceViewFormat_enum(pub ::core::ffi::c_uint);
+/// Resource view format
+pub use self::CUresourceViewFormat_enum as CUresourceViewFormat;
+/// Resource view descriptor
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_RESOURCE_VIEW_DESC_st {
+ ///< Resource view format
+ pub format: CUresourceViewFormat,
+ ///< Width of the resource view
+ pub width: usize,
+ ///< Height of the resource view
+ pub height: usize,
+ ///< Depth of the resource view
+ pub depth: usize,
+ ///< First defined mipmap level
+ pub firstMipmapLevel: ::core::ffi::c_uint,
+ ///< Last defined mipmap level
+ pub lastMipmapLevel: ::core::ffi::c_uint,
+ ///< First layer index
+ pub firstLayer: ::core::ffi::c_uint,
+ ///< Last layer index
+ pub lastLayer: ::core::ffi::c_uint,
+ pub reserved: [::core::ffi::c_uint; 16usize],
+}
+/// Resource view descriptor
+pub type CUDA_RESOURCE_VIEW_DESC_v1 = CUDA_RESOURCE_VIEW_DESC_st;
+/// Resource view descriptor
+pub type CUDA_RESOURCE_VIEW_DESC = CUDA_RESOURCE_VIEW_DESC_v1;
+/// Tensor map descriptor. Requires compiler support for aligning to 64 bytes.
+#[repr(C)]
+#[repr(align(64))]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUtensorMap_st {
+ pub opaque: [cuuint64_t; 16usize],
+}
+/// Tensor map descriptor. Requires compiler support for aligning to 64 bytes.
+pub type CUtensorMap = CUtensorMap_st;
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_UINT8: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 0,
+ );
+}
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_UINT16: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 1,
+ );
+}
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_UINT32: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 2,
+ );
+}
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_INT32: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 3,
+ );
+}
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_UINT64: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 4,
+ );
+}
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_INT64: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 5,
+ );
+}
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_FLOAT16: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 6,
+ );
+}
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_FLOAT32: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 7,
+ );
+}
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_FLOAT64: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 8,
+ );
+}
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_BFLOAT16: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 9,
+ );
+}
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 10,
+ );
+}
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_TFLOAT32: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 11,
+ );
+}
+impl CUtensorMapDataType_enum {
+ pub const CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
+ 12,
+ );
+}
+#[repr(transparent)]
+/// Tensor map data type
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUtensorMapDataType_enum(pub ::core::ffi::c_uint);
+/// Tensor map data type
+pub use self::CUtensorMapDataType_enum as CUtensorMapDataType;
+impl CUtensorMapInterleave_enum {
+ pub const CU_TENSOR_MAP_INTERLEAVE_NONE: CUtensorMapInterleave_enum = CUtensorMapInterleave_enum(
+ 0,
+ );
+}
+impl CUtensorMapInterleave_enum {
+ pub const CU_TENSOR_MAP_INTERLEAVE_16B: CUtensorMapInterleave_enum = CUtensorMapInterleave_enum(
+ 1,
+ );
+}
+impl CUtensorMapInterleave_enum {
+ pub const CU_TENSOR_MAP_INTERLEAVE_32B: CUtensorMapInterleave_enum = CUtensorMapInterleave_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Tensor map interleave layout type
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUtensorMapInterleave_enum(pub ::core::ffi::c_uint);
+/// Tensor map interleave layout type
+pub use self::CUtensorMapInterleave_enum as CUtensorMapInterleave;
+impl CUtensorMapSwizzle_enum {
+ pub const CU_TENSOR_MAP_SWIZZLE_NONE: CUtensorMapSwizzle_enum = CUtensorMapSwizzle_enum(
+ 0,
+ );
+}
+impl CUtensorMapSwizzle_enum {
+ pub const CU_TENSOR_MAP_SWIZZLE_32B: CUtensorMapSwizzle_enum = CUtensorMapSwizzle_enum(
+ 1,
+ );
+}
+impl CUtensorMapSwizzle_enum {
+ pub const CU_TENSOR_MAP_SWIZZLE_64B: CUtensorMapSwizzle_enum = CUtensorMapSwizzle_enum(
+ 2,
+ );
+}
+impl CUtensorMapSwizzle_enum {
+ pub const CU_TENSOR_MAP_SWIZZLE_128B: CUtensorMapSwizzle_enum = CUtensorMapSwizzle_enum(
+ 3,
+ );
+}
+#[repr(transparent)]
+/// Tensor map swizzling mode of shared memory banks
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUtensorMapSwizzle_enum(pub ::core::ffi::c_uint);
+/// Tensor map swizzling mode of shared memory banks
+pub use self::CUtensorMapSwizzle_enum as CUtensorMapSwizzle;
+impl CUtensorMapL2promotion_enum {
+ pub const CU_TENSOR_MAP_L2_PROMOTION_NONE: CUtensorMapL2promotion_enum = CUtensorMapL2promotion_enum(
+ 0,
+ );
+}
+impl CUtensorMapL2promotion_enum {
+ pub const CU_TENSOR_MAP_L2_PROMOTION_L2_64B: CUtensorMapL2promotion_enum = CUtensorMapL2promotion_enum(
+ 1,
+ );
+}
+impl CUtensorMapL2promotion_enum {
+ pub const CU_TENSOR_MAP_L2_PROMOTION_L2_128B: CUtensorMapL2promotion_enum = CUtensorMapL2promotion_enum(
+ 2,
+ );
+}
+impl CUtensorMapL2promotion_enum {
+ pub const CU_TENSOR_MAP_L2_PROMOTION_L2_256B: CUtensorMapL2promotion_enum = CUtensorMapL2promotion_enum(
+ 3,
+ );
+}
+#[repr(transparent)]
+/// Tensor map L2 promotion type
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUtensorMapL2promotion_enum(pub ::core::ffi::c_uint);
+/// Tensor map L2 promotion type
+pub use self::CUtensorMapL2promotion_enum as CUtensorMapL2promotion;
+impl CUtensorMapFloatOOBfill_enum {
+ pub const CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE: CUtensorMapFloatOOBfill_enum = CUtensorMapFloatOOBfill_enum(
+ 0,
+ );
+}
+impl CUtensorMapFloatOOBfill_enum {
+ pub const CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA: CUtensorMapFloatOOBfill_enum = CUtensorMapFloatOOBfill_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Tensor map out-of-bounds fill type
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUtensorMapFloatOOBfill_enum(pub ::core::ffi::c_uint);
+/// Tensor map out-of-bounds fill type
+pub use self::CUtensorMapFloatOOBfill_enum as CUtensorMapFloatOOBfill;
+/// GPU Direct v3 tokens
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st {
+ pub p2pToken: ::core::ffi::c_ulonglong,
+ pub vaSpaceToken: ::core::ffi::c_uint,
+}
+/// GPU Direct v3 tokens
+pub type CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1 = CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st;
+/// GPU Direct v3 tokens
+pub type CUDA_POINTER_ATTRIBUTE_P2P_TOKENS = CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1;
+impl CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum {
+ ///< No access, meaning the device cannot access this memory at all, thus must be staged through accessible memory in order to complete certain operations
+ pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE: CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum = CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum(
+ 0,
+ );
+}
+impl CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum {
+ ///< Read-only access, meaning writes to this memory are considered invalid accesses and thus return error in that case.
+ pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ: CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum = CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum(
+ 1,
+ );
+}
+impl CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum {
+ ///< Read-write access, the device has full read-write access to the memory
+ pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE: CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum = CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum(
+ 3,
+ );
+}
+#[repr(transparent)]
+/** Access flags that specify the level of access the current context's device has
+ on the memory referenced.*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum(pub ::core::ffi::c_uint);
+/** Access flags that specify the level of access the current context's device has
+ on the memory referenced.*/
+pub use self::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum as CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS;
+/// Kernel launch parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_LAUNCH_PARAMS_st {
+ ///< Kernel to launch
+ pub function: CUfunction,
+ ///< Width of grid in blocks
+ pub gridDimX: ::core::ffi::c_uint,
+ ///< Height of grid in blocks
+ pub gridDimY: ::core::ffi::c_uint,
+ ///< Depth of grid in blocks
+ pub gridDimZ: ::core::ffi::c_uint,
+ ///< X dimension of each thread block
+ pub blockDimX: ::core::ffi::c_uint,
+ ///< Y dimension of each thread block
+ pub blockDimY: ::core::ffi::c_uint,
+ ///< Z dimension of each thread block
+ pub blockDimZ: ::core::ffi::c_uint,
+ ///< Dynamic shared-memory size per thread block in bytes
+ pub sharedMemBytes: ::core::ffi::c_uint,
+ ///< Stream identifier
+ pub hStream: CUstream,
+ ///< Array of pointers to kernel parameters
+ pub kernelParams: *mut *mut ::core::ffi::c_void,
+}
+/// Kernel launch parameters
+pub type CUDA_LAUNCH_PARAMS_v1 = CUDA_LAUNCH_PARAMS_st;
+/// Kernel launch parameters
+pub type CUDA_LAUNCH_PARAMS = CUDA_LAUNCH_PARAMS_v1;
+impl CUexternalMemoryHandleType_enum {
+ /// Handle is an opaque file descriptor
+ pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
+ 1,
+ );
+}
+impl CUexternalMemoryHandleType_enum {
+ /// Handle is an opaque shared NT handle
+ pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
+ 2,
+ );
+}
+impl CUexternalMemoryHandleType_enum {
+ /// Handle is an opaque, globally shared handle
+ pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
+ 3,
+ );
+}
+impl CUexternalMemoryHandleType_enum {
+ /// Handle is a D3D12 heap object
+ pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
+ 4,
+ );
+}
+impl CUexternalMemoryHandleType_enum {
+ /// Handle is a D3D12 committed resource
+ pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
+ 5,
+ );
+}
+impl CUexternalMemoryHandleType_enum {
+ /// Handle is a shared NT handle to a D3D11 resource
+ pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
+ 6,
+ );
+}
+impl CUexternalMemoryHandleType_enum {
+ /// Handle is a globally shared handle to a D3D11 resource
+ pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
+ 7,
+ );
+}
+impl CUexternalMemoryHandleType_enum {
+ /// Handle is an NvSciBuf object
+ pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
+ 8,
+ );
+}
+#[repr(transparent)]
+/// External memory handle types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUexternalMemoryHandleType_enum(pub ::core::ffi::c_uint);
+/// External memory handle types
+pub use self::CUexternalMemoryHandleType_enum as CUexternalMemoryHandleType;
+/// External memory handle descriptor
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st {
+ /// Type of the handle
+ pub type_: CUexternalMemoryHandleType,
+ pub handle: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1,
+ /// Size of the memory allocation
+ pub size: ::core::ffi::c_ulonglong,
+ /// Flags must either be zero or ::CUDA_EXTERNAL_MEMORY_DEDICATED
+ pub flags: ::core::ffi::c_uint,
+ pub reserved: [::core::ffi::c_uint; 16usize],
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1 {
+ /** File descriptor referencing the memory object. Valid
+ when type is
+ ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD*/
+ pub fd: ::core::ffi::c_int,
+ pub win32: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1,
+ /** A handle representing an NvSciBuf Object. Valid when type
+ is ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF*/
+ pub nvSciBufObject: *const ::core::ffi::c_void,
+}
+/** Win32 handle referencing the semaphore object. Valid when
+ type is one of the following:
+ - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32
+ - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT
+ - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP
+ - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE
+ - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE
+ - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT
+ Exactly one of 'handle' and 'name' must be non-NULL. If
+ type is one of the following:
+ ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT
+ ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT
+ then 'name' must be NULL.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
+ /// Valid NT handle. Must be NULL if 'name' is non-NULL
+ pub handle: *mut ::core::ffi::c_void,
+ /** Name of a valid memory object.
+ Must be NULL if 'handle' is non-NULL.*/
+ pub name: *const ::core::ffi::c_void,
+}
+/// External memory handle descriptor
+pub type CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 = CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st;
+/// External memory handle descriptor
+pub type CUDA_EXTERNAL_MEMORY_HANDLE_DESC = CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1;
+/// External memory buffer descriptor
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st {
+ /// Offset into the memory object where the buffer's base is
+ pub offset: ::core::ffi::c_ulonglong,
+ /// Size of the buffer
+ pub size: ::core::ffi::c_ulonglong,
+ /// Flags reserved for future use. Must be zero.
+ pub flags: ::core::ffi::c_uint,
+ pub reserved: [::core::ffi::c_uint; 16usize],
+}
+/// External memory buffer descriptor
+pub type CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 = CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st;
+/// External memory buffer descriptor
+pub type CUDA_EXTERNAL_MEMORY_BUFFER_DESC = CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1;
+/// External memory mipmap descriptor
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st {
+ /** Offset into the memory object where the base level of the
+ mipmap chain is.*/
+ pub offset: ::core::ffi::c_ulonglong,
+ /// Format, dimension and type of base level of the mipmap chain
+ pub arrayDesc: CUDA_ARRAY3D_DESCRIPTOR,
+ /// Total number of levels in the mipmap chain
+ pub numLevels: ::core::ffi::c_uint,
+ pub reserved: [::core::ffi::c_uint; 16usize],
+}
+/// External memory mipmap descriptor
+pub type CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 = CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st;
+/// External memory mipmap descriptor
+pub type CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC = CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1;
+impl CUexternalSemaphoreHandleType_enum {
+ /// Handle is an opaque file descriptor
+ pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
+ 1,
+ );
+}
+impl CUexternalSemaphoreHandleType_enum {
+ /// Handle is an opaque shared NT handle
+ pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
+ 2,
+ );
+}
+impl CUexternalSemaphoreHandleType_enum {
+ /// Handle is an opaque, globally shared handle
+ pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
+ 3,
+ );
+}
+impl CUexternalSemaphoreHandleType_enum {
+ /// Handle is a shared NT handle referencing a D3D12 fence object
+ pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
+ 4,
+ );
+}
+impl CUexternalSemaphoreHandleType_enum {
+ /// Handle is a shared NT handle referencing a D3D11 fence object
+ pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
+ 5,
+ );
+}
+impl CUexternalSemaphoreHandleType_enum {
+ /// Opaque handle to NvSciSync Object
+ pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
+ 6,
+ );
+}
+impl CUexternalSemaphoreHandleType_enum {
+ /// Handle is a shared NT handle referencing a D3D11 keyed mutex object
+ pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
+ 7,
+ );
+}
+impl CUexternalSemaphoreHandleType_enum {
+ /// Handle is a globally shared handle referencing a D3D11 keyed mutex object
+ pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
+ 8,
+ );
+}
+impl CUexternalSemaphoreHandleType_enum {
+ /// Handle is an opaque file descriptor referencing a timeline semaphore
+ pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
+ 9,
+ );
+}
+impl CUexternalSemaphoreHandleType_enum {
+ /// Handle is an opaque shared NT handle referencing a timeline semaphore
+ pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
+ 10,
+ );
+}
+#[repr(transparent)]
+/// External semaphore handle types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUexternalSemaphoreHandleType_enum(pub ::core::ffi::c_uint);
+/// External semaphore handle types
+pub use self::CUexternalSemaphoreHandleType_enum as CUexternalSemaphoreHandleType;
+/// External semaphore handle descriptor
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st {
+ /// Type of the handle
+ pub type_: CUexternalSemaphoreHandleType,
+ pub handle: CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1,
+ /// Flags reserved for the future. Must be zero.
+ pub flags: ::core::ffi::c_uint,
+ pub reserved: [::core::ffi::c_uint; 16usize],
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1 {
+ /** File descriptor referencing the semaphore object. Valid
+ when type is one of the following:
+ - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD
+ - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD*/
+ pub fd: ::core::ffi::c_int,
+ pub win32: CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1,
+ /// Valid NvSciSyncObj. Must be non NULL
+ pub nvSciSyncObj: *const ::core::ffi::c_void,
+}
+/** Win32 handle referencing the semaphore object. Valid when
+ type is one of the following:
+ - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32
+ - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT
+ - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE
+ - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE
+ - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX
+ - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32
+ Exactly one of 'handle' and 'name' must be non-NULL. If
+ type is one of the following:
+ - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT
+ - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT
+ then 'name' must be NULL.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
+ /// Valid NT handle. Must be NULL if 'name' is non-NULL
+ pub handle: *mut ::core::ffi::c_void,
+ /** Name of a valid synchronization primitive.
+ Must be NULL if 'handle' is non-NULL.*/
+ pub name: *const ::core::ffi::c_void,
+}
+/// External semaphore handle descriptor
+pub type CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 = CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st;
+/// External semaphore handle descriptor
+pub type CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC = CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1;
+/// External semaphore signal parameters
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st {
+ pub params: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1,
+ /** Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to
+ signal a ::CUexternalSemaphore of type
+ ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is
+ ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which indicates
+ that while signaling the ::CUexternalSemaphore, no memory synchronization
+ operations should be performed for any external memory object imported
+ as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF.
+ For all other types of ::CUexternalSemaphore, flags must be zero.*/
+ pub flags: ::core::ffi::c_uint,
+ pub reserved: [::core::ffi::c_uint; 16usize],
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1 {
+ pub fence: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1,
+ pub nvSciSync: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_2,
+ pub keyedMutex: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3,
+ pub reserved: [::core::ffi::c_uint; 12usize],
+}
+/// Parameters for fence objects
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1 {
+ /// Value of fence to be signaled
+ pub value: ::core::ffi::c_ulonglong,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_2 {
+ /** Pointer to NvSciSyncFence. Valid if ::CUexternalSemaphoreHandleType
+ is of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC.*/
+ pub fence: *mut ::core::ffi::c_void,
+ pub reserved: ::core::ffi::c_ulonglong,
+}
+/// Parameters for keyed mutex objects
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3 {
+ /// Value of key to release the mutex with
+ pub key: ::core::ffi::c_ulonglong,
+}
+/// External semaphore signal parameters
+pub type CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 = CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st;
+/// External semaphore signal parameters
+pub type CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS = CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1;
+/// External semaphore wait parameters
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st {
+ pub params: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1,
+ /** Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on
+ a ::CUexternalSemaphore of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC,
+ the valid flag is ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC
+ which indicates that while waiting for the ::CUexternalSemaphore, no memory
+ synchronization operations should be performed for any external memory
+ object imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF.
+ For all other types of ::CUexternalSemaphore, flags must be zero.*/
+ pub flags: ::core::ffi::c_uint,
+ pub reserved: [::core::ffi::c_uint; 16usize],
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1 {
+ pub fence: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1,
+ pub nvSciSync: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_2,
+ pub keyedMutex: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3,
+ pub reserved: [::core::ffi::c_uint; 10usize],
+}
+/// Parameters for fence objects
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1 {
+ /// Value of fence to be waited on
+ pub value: ::core::ffi::c_ulonglong,
+}
+/** Pointer to NvSciSyncFence. Valid if CUexternalSemaphoreHandleType
+ is of type CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC.*/
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_2 {
+ pub fence: *mut ::core::ffi::c_void,
+ pub reserved: ::core::ffi::c_ulonglong,
+}
+/// Parameters for keyed mutex objects
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3 {
+ /// Value of key to acquire the mutex with
+ pub key: ::core::ffi::c_ulonglong,
+ /// Timeout in milliseconds to wait to acquire the mutex
+ pub timeoutMs: ::core::ffi::c_uint,
+}
+/// External semaphore wait parameters
+pub type CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 = CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st;
+/// External semaphore wait parameters
+pub type CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS = CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1;
+/// Semaphore signal node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st {
+ ///< Array of external semaphore handles.
+ pub extSemArray: *mut CUexternalSemaphore,
+ ///< Array of external semaphore signal parameters.
+ pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
+ ///< Number of handles and parameters supplied in extSemArray and paramsArray.
+ pub numExtSems: ::core::ffi::c_uint,
+}
+/// Semaphore signal node parameters
+pub type CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 = CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st;
+/// Semaphore signal node parameters
+pub type CUDA_EXT_SEM_SIGNAL_NODE_PARAMS = CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1;
+/// Semaphore signal node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st {
+ ///< Array of external semaphore handles.
+ pub extSemArray: *mut CUexternalSemaphore,
+ ///< Array of external semaphore signal parameters.
+ pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
+ ///< Number of handles and parameters supplied in extSemArray and paramsArray.
+ pub numExtSems: ::core::ffi::c_uint,
+}
+/// Semaphore signal node parameters
+pub type CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2 = CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st;
+/// Semaphore wait node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_st {
+ ///< Array of external semaphore handles.
+ pub extSemArray: *mut CUexternalSemaphore,
+ ///< Array of external semaphore wait parameters.
+ pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
+ ///< Number of handles and parameters supplied in extSemArray and paramsArray.
+ pub numExtSems: ::core::ffi::c_uint,
+}
+/// Semaphore wait node parameters
+pub type CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 = CUDA_EXT_SEM_WAIT_NODE_PARAMS_st;
+/// Semaphore wait node parameters
+pub type CUDA_EXT_SEM_WAIT_NODE_PARAMS = CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1;
+/// Semaphore wait node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st {
+ ///< Array of external semaphore handles.
+ pub extSemArray: *mut CUexternalSemaphore,
+ ///< Array of external semaphore wait parameters.
+ pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
+ ///< Number of handles and parameters supplied in extSemArray and paramsArray.
+ pub numExtSems: ::core::ffi::c_uint,
+}
+/// Semaphore wait node parameters
+pub type CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2 = CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st;
+pub type CUmemGenericAllocationHandle_v1 = ::core::ffi::c_ulonglong;
+pub type CUmemGenericAllocationHandle = CUmemGenericAllocationHandle_v1;
+impl CUmemAllocationHandleType_enum {
+ ///< Does not allow any export mechanism. >
+ pub const CU_MEM_HANDLE_TYPE_NONE: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum(
+ 0,
+ );
+}
+impl CUmemAllocationHandleType_enum {
+ ///< Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int)
+ pub const CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum(
+ 1,
+ );
+}
+impl CUmemAllocationHandleType_enum {
+ ///< Allows a Win32 NT handle to be used for exporting. (HANDLE)
+ pub const CU_MEM_HANDLE_TYPE_WIN32: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum(
+ 2,
+ );
+}
+impl CUmemAllocationHandleType_enum {
+ ///< Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE)
+ pub const CU_MEM_HANDLE_TYPE_WIN32_KMT: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum(
+ 4,
+ );
+}
+impl CUmemAllocationHandleType_enum {
+ ///< Allows a fabric handle to be used for exporting. (CUmemFabricHandle)
+ pub const CU_MEM_HANDLE_TYPE_FABRIC: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum(
+ 8,
+ );
+}
+impl CUmemAllocationHandleType_enum {
+ pub const CU_MEM_HANDLE_TYPE_MAX: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum(
+ 2147483647,
+ );
+}
+#[repr(transparent)]
+/// Flags for specifying particular handle types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemAllocationHandleType_enum(pub ::core::ffi::c_uint);
+/// Flags for specifying particular handle types
+pub use self::CUmemAllocationHandleType_enum as CUmemAllocationHandleType;
+impl CUmemAccess_flags_enum {
+ ///< Default, make the address range not accessible
+ pub const CU_MEM_ACCESS_FLAGS_PROT_NONE: CUmemAccess_flags_enum = CUmemAccess_flags_enum(
+ 0,
+ );
+}
+impl CUmemAccess_flags_enum {
+ ///< Make the address range read accessible
+ pub const CU_MEM_ACCESS_FLAGS_PROT_READ: CUmemAccess_flags_enum = CUmemAccess_flags_enum(
+ 1,
+ );
+}
+impl CUmemAccess_flags_enum {
+ ///< Make the address range read-write accessible
+ pub const CU_MEM_ACCESS_FLAGS_PROT_READWRITE: CUmemAccess_flags_enum = CUmemAccess_flags_enum(
+ 3,
+ );
+}
+impl CUmemAccess_flags_enum {
+ pub const CU_MEM_ACCESS_FLAGS_PROT_MAX: CUmemAccess_flags_enum = CUmemAccess_flags_enum(
+ 2147483647,
+ );
+}
+#[repr(transparent)]
+/// Specifies the memory protection flags for mapping.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemAccess_flags_enum(pub ::core::ffi::c_uint);
+/// Specifies the memory protection flags for mapping.
+pub use self::CUmemAccess_flags_enum as CUmemAccess_flags;
+impl CUmemLocationType_enum {
+ pub const CU_MEM_LOCATION_TYPE_INVALID: CUmemLocationType_enum = CUmemLocationType_enum(
+ 0,
+ );
+}
+impl CUmemLocationType_enum {
+ ///< Location is a device location, thus id is a device ordinal
+ pub const CU_MEM_LOCATION_TYPE_DEVICE: CUmemLocationType_enum = CUmemLocationType_enum(
+ 1,
+ );
+}
+impl CUmemLocationType_enum {
+ ///< Location is host, id is ignored
+ pub const CU_MEM_LOCATION_TYPE_HOST: CUmemLocationType_enum = CUmemLocationType_enum(
+ 2,
+ );
+}
+impl CUmemLocationType_enum {
+ ///< Location is a host NUMA node, thus id is a host NUMA node id
+ pub const CU_MEM_LOCATION_TYPE_HOST_NUMA: CUmemLocationType_enum = CUmemLocationType_enum(
+ 3,
+ );
+}
+impl CUmemLocationType_enum {
+ ///< Location is a host NUMA node of the current thread, id is ignored
+ pub const CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT: CUmemLocationType_enum = CUmemLocationType_enum(
+ 4,
+ );
+}
+impl CUmemLocationType_enum {
+ pub const CU_MEM_LOCATION_TYPE_MAX: CUmemLocationType_enum = CUmemLocationType_enum(
+ 2147483647,
+ );
+}
+#[repr(transparent)]
+/// Specifies the type of location
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemLocationType_enum(pub ::core::ffi::c_uint);
+/// Specifies the type of location
+pub use self::CUmemLocationType_enum as CUmemLocationType;
+impl CUmemAllocationType_enum {
+ pub const CU_MEM_ALLOCATION_TYPE_INVALID: CUmemAllocationType_enum = CUmemAllocationType_enum(
+ 0,
+ );
+}
+impl CUmemAllocationType_enum {
+ /** This allocation type is 'pinned', i.e. cannot migrate from its current
+ location while the application is actively using it*/
+ pub const CU_MEM_ALLOCATION_TYPE_PINNED: CUmemAllocationType_enum = CUmemAllocationType_enum(
+ 1,
+ );
+}
+impl CUmemAllocationType_enum {
+ /** This allocation type is 'pinned', i.e. cannot migrate from its current
+ location while the application is actively using it*/
+ pub const CU_MEM_ALLOCATION_TYPE_MAX: CUmemAllocationType_enum = CUmemAllocationType_enum(
+ 2147483647,
+ );
+}
+#[repr(transparent)]
+/// Defines the allocation types available
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemAllocationType_enum(pub ::core::ffi::c_uint);
+/// Defines the allocation types available
+pub use self::CUmemAllocationType_enum as CUmemAllocationType;
+impl CUmemAllocationGranularity_flags_enum {
+ ///< Minimum required granularity for allocation
+ pub const CU_MEM_ALLOC_GRANULARITY_MINIMUM: CUmemAllocationGranularity_flags_enum = CUmemAllocationGranularity_flags_enum(
+ 0,
+ );
+}
+impl CUmemAllocationGranularity_flags_enum {
+ ///< Recommended granularity for allocation for best performance
+ pub const CU_MEM_ALLOC_GRANULARITY_RECOMMENDED: CUmemAllocationGranularity_flags_enum = CUmemAllocationGranularity_flags_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Flag for requesting different optimal and required granularities for an allocation.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemAllocationGranularity_flags_enum(pub ::core::ffi::c_uint);
+/// Flag for requesting different optimal and required granularities for an allocation.
+pub use self::CUmemAllocationGranularity_flags_enum as CUmemAllocationGranularity_flags;
+impl CUmemRangeHandleType_enum {
+ pub const CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD: CUmemRangeHandleType_enum = CUmemRangeHandleType_enum(
+ 1,
+ );
+}
+impl CUmemRangeHandleType_enum {
+ pub const CU_MEM_RANGE_HANDLE_TYPE_MAX: CUmemRangeHandleType_enum = CUmemRangeHandleType_enum(
+ 2147483647,
+ );
+}
+#[repr(transparent)]
+/// Specifies the handle type for address range
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemRangeHandleType_enum(pub ::core::ffi::c_uint);
+/// Specifies the handle type for address range
+pub use self::CUmemRangeHandleType_enum as CUmemRangeHandleType;
+impl CUarraySparseSubresourceType_enum {
+ pub const CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL: CUarraySparseSubresourceType_enum = CUarraySparseSubresourceType_enum(
+ 0,
+ );
+}
+impl CUarraySparseSubresourceType_enum {
+ pub const CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL: CUarraySparseSubresourceType_enum = CUarraySparseSubresourceType_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Sparse subresource types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUarraySparseSubresourceType_enum(pub ::core::ffi::c_uint);
+/// Sparse subresource types
+pub use self::CUarraySparseSubresourceType_enum as CUarraySparseSubresourceType;
+impl CUmemOperationType_enum {
+ pub const CU_MEM_OPERATION_TYPE_MAP: CUmemOperationType_enum = CUmemOperationType_enum(
+ 1,
+ );
+}
+impl CUmemOperationType_enum {
+ pub const CU_MEM_OPERATION_TYPE_UNMAP: CUmemOperationType_enum = CUmemOperationType_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Memory operation types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemOperationType_enum(pub ::core::ffi::c_uint);
+/// Memory operation types
+pub use self::CUmemOperationType_enum as CUmemOperationType;
+impl CUmemHandleType_enum {
+ pub const CU_MEM_HANDLE_TYPE_GENERIC: CUmemHandleType_enum = CUmemHandleType_enum(0);
+}
+#[repr(transparent)]
+/// Memory handle types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemHandleType_enum(pub ::core::ffi::c_uint);
+/// Memory handle types
+pub use self::CUmemHandleType_enum as CUmemHandleType;
+/// Specifies the CUDA array or CUDA mipmapped array memory mapping information
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUarrayMapInfo_st {
+ ///< Resource type
+ pub resourceType: CUresourcetype,
+ pub resource: CUarrayMapInfo_st__bindgen_ty_1,
+ ///< Sparse subresource type
+ pub subresourceType: CUarraySparseSubresourceType,
+ pub subresource: CUarrayMapInfo_st__bindgen_ty_2,
+ ///< Memory operation type
+ pub memOperationType: CUmemOperationType,
+ ///< Memory handle type
+ pub memHandleType: CUmemHandleType,
+ pub memHandle: CUarrayMapInfo_st__bindgen_ty_3,
+ ///< Offset within the memory
+ pub offset: ::core::ffi::c_ulonglong,
+ ///< Device ordinal bit mask
+ pub deviceBitMask: ::core::ffi::c_uint,
+ ///< flags for future use, must be zero now.
+ pub flags: ::core::ffi::c_uint,
+ ///< Reserved for future use, must be zero now.
+ pub reserved: [::core::ffi::c_uint; 2usize],
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUarrayMapInfo_st__bindgen_ty_1 {
+ pub mipmap: CUmipmappedArray,
+ pub array: CUarray,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUarrayMapInfo_st__bindgen_ty_2 {
+ pub sparseLevel: CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_1,
+ pub miptail: CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_2,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_1 {
+ ///< For CUDA mipmapped arrays must a valid mipmap level. For CUDA arrays must be zero
+ pub level: ::core::ffi::c_uint,
+ ///< For CUDA layered arrays must be a valid layer index. Otherwise, must be zero
+ pub layer: ::core::ffi::c_uint,
+ ///< Starting X offset in elements
+ pub offsetX: ::core::ffi::c_uint,
+ ///< Starting Y offset in elements
+ pub offsetY: ::core::ffi::c_uint,
+ ///< Starting Z offset in elements
+ pub offsetZ: ::core::ffi::c_uint,
+ ///< Width in elements
+ pub extentWidth: ::core::ffi::c_uint,
+ ///< Height in elements
+ pub extentHeight: ::core::ffi::c_uint,
+ ///< Depth in elements
+ pub extentDepth: ::core::ffi::c_uint,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_2 {
+ ///< For CUDA layered arrays must be a valid layer index. Otherwise, must be zero
+ pub layer: ::core::ffi::c_uint,
+ ///< Offset within mip tail
+ pub offset: ::core::ffi::c_ulonglong,
+ ///< Extent in bytes
+ pub size: ::core::ffi::c_ulonglong,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUarrayMapInfo_st__bindgen_ty_3 {
+ pub memHandle: CUmemGenericAllocationHandle,
+}
+/// Specifies the CUDA array or CUDA mipmapped array memory mapping information
+pub type CUarrayMapInfo_v1 = CUarrayMapInfo_st;
+/// Specifies the CUDA array or CUDA mipmapped array memory mapping information
+pub type CUarrayMapInfo = CUarrayMapInfo_v1;
+/// Specifies a memory location.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemLocation_st {
+ ///< Specifies the location type, which modifies the meaning of id.
+ pub type_: CUmemLocationType,
+ ///< identifier for a given this location's ::CUmemLocationType.
+ pub id: ::core::ffi::c_int,
+}
+/// Specifies a memory location.
+pub type CUmemLocation_v1 = CUmemLocation_st;
+/// Specifies a memory location.
+pub type CUmemLocation = CUmemLocation_v1;
+impl CUmemAllocationCompType_enum {
+ ///< Allocating non-compressible memory
+ pub const CU_MEM_ALLOCATION_COMP_NONE: CUmemAllocationCompType_enum = CUmemAllocationCompType_enum(
+ 0,
+ );
+}
+impl CUmemAllocationCompType_enum {
+ ///< Allocating compressible memory
+ pub const CU_MEM_ALLOCATION_COMP_GENERIC: CUmemAllocationCompType_enum = CUmemAllocationCompType_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Specifies compression attribute for an allocation.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemAllocationCompType_enum(pub ::core::ffi::c_uint);
+/// Specifies compression attribute for an allocation.
+pub use self::CUmemAllocationCompType_enum as CUmemAllocationCompType;
+/// Specifies the allocation properties for a allocation.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemAllocationProp_st {
+ /// Allocation type
+ pub type_: CUmemAllocationType,
+ /// requested ::CUmemAllocationHandleType
+ pub requestedHandleTypes: CUmemAllocationHandleType,
+ /// Location of allocation
+ pub location: CUmemLocation,
+ /** Windows-specific POBJECT_ATTRIBUTES required when
+ ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This object attributes structure
+ includes security attributes that define
+ the scope of which exported allocations may be transferred to other
+ processes. In all other cases, this field is required to be zero.*/
+ pub win32HandleMetaData: *mut ::core::ffi::c_void,
+ pub allocFlags: CUmemAllocationProp_st__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemAllocationProp_st__bindgen_ty_1 {
+ /** Allocation hint for requesting compressible memory.
+ On devices that support Compute Data Compression, compressible
+ memory can be used to accelerate accesses to data with unstructured
+ sparsity and other compressible data patterns. Applications are
+ expected to query allocation property of the handle obtained with
+ ::cuMemCreate using ::cuMemGetAllocationPropertiesFromHandle to
+ validate if the obtained allocation is compressible or not. Note that
+ compressed memory may not be mappable on all devices.*/
+ pub compressionType: ::core::ffi::c_uchar,
+ pub gpuDirectRDMACapable: ::core::ffi::c_uchar,
+ /// Bitmask indicating intended usage for this allocation
+ pub usage: ::core::ffi::c_ushort,
+ pub reserved: [::core::ffi::c_uchar; 4usize],
+}
+/// Specifies the allocation properties for a allocation.
+pub type CUmemAllocationProp_v1 = CUmemAllocationProp_st;
+/// Specifies the allocation properties for a allocation.
+pub type CUmemAllocationProp = CUmemAllocationProp_v1;
+impl CUmulticastGranularity_flags_enum {
+ ///< Minimum required granularity
+ pub const CU_MULTICAST_GRANULARITY_MINIMUM: CUmulticastGranularity_flags_enum = CUmulticastGranularity_flags_enum(
+ 0,
+ );
+}
+impl CUmulticastGranularity_flags_enum {
+ ///< Recommended granularity for best performance
+ pub const CU_MULTICAST_GRANULARITY_RECOMMENDED: CUmulticastGranularity_flags_enum = CUmulticastGranularity_flags_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Flags for querying different granularities for a multicast object
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmulticastGranularity_flags_enum(pub ::core::ffi::c_uint);
+/// Flags for querying different granularities for a multicast object
+pub use self::CUmulticastGranularity_flags_enum as CUmulticastGranularity_flags;
+/// Specifies the properties for a multicast object.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmulticastObjectProp_st {
+ /** The number of devices in the multicast team that will bind memory to this
+ object*/
+ pub numDevices: ::core::ffi::c_uint,
+ /** The maximum amount of memory that can be bound to this multicast object
+ per device*/
+ pub size: usize,
+ /** Bitmask of exportable handle types (see ::CUmemAllocationHandleType) for
+ this object*/
+ pub handleTypes: ::core::ffi::c_ulonglong,
+ /// Flags for future use, must be zero now
+ pub flags: ::core::ffi::c_ulonglong,
+}
+/// Specifies the properties for a multicast object.
+pub type CUmulticastObjectProp_v1 = CUmulticastObjectProp_st;
+/// Specifies the properties for a multicast object.
+pub type CUmulticastObjectProp = CUmulticastObjectProp_v1;
+/// Memory access descriptor
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemAccessDesc_st {
+ ///< Location on which the request is to change it's accessibility
+ pub location: CUmemLocation,
+ ///< ::CUmemProt accessibility flags to set on the request
+ pub flags: CUmemAccess_flags,
+}
+/// Memory access descriptor
+pub type CUmemAccessDesc_v1 = CUmemAccessDesc_st;
+/// Memory access descriptor
+pub type CUmemAccessDesc = CUmemAccessDesc_v1;
+impl CUgraphExecUpdateResult_enum {
+ ///< The update succeeded
+ pub const CU_GRAPH_EXEC_UPDATE_SUCCESS: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
+ 0,
+ );
+}
+impl CUgraphExecUpdateResult_enum {
+ ///< The update failed for an unexpected reason which is described in the return value of the function
+ pub const CU_GRAPH_EXEC_UPDATE_ERROR: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
+ 1,
+ );
+}
+impl CUgraphExecUpdateResult_enum {
+ ///< The update failed because the topology changed
+ pub const CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
+ 2,
+ );
+}
+impl CUgraphExecUpdateResult_enum {
+ ///< The update failed because a node type changed
+ pub const CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
+ 3,
+ );
+}
+impl CUgraphExecUpdateResult_enum {
+ ///< The update failed because the function of a kernel node changed (CUDA driver < 11.2)
+ pub const CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
+ 4,
+ );
+}
+impl CUgraphExecUpdateResult_enum {
+ ///< The update failed because the parameters changed in a way that is not supported
+ pub const CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
+ 5,
+ );
+}
+impl CUgraphExecUpdateResult_enum {
+ ///< The update failed because something about the node is not supported
+ pub const CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
+ 6,
+ );
+}
+impl CUgraphExecUpdateResult_enum {
+ ///< The update failed because the function of a kernel node changed in an unsupported way
+ pub const CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
+ 7,
+ );
+}
+impl CUgraphExecUpdateResult_enum {
+ ///< The update failed because the node attributes changed in a way that is not supported
+ pub const CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
+ 8,
+ );
+}
+#[repr(transparent)]
+/// CUDA Graph Update error types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgraphExecUpdateResult_enum(pub ::core::ffi::c_uint);
+/// CUDA Graph Update error types
+pub use self::CUgraphExecUpdateResult_enum as CUgraphExecUpdateResult;
+/// Result information returned by cuGraphExecUpdate
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgraphExecUpdateResultInfo_st {
+ /// Gives more specific detail when a cuda graph update fails.
+ pub result: CUgraphExecUpdateResult,
+ /** The "to node" of the error edge when the topologies do not match.
+ The error node when the error is associated with a specific node.
+ NULL when the error is generic.*/
+ pub errorNode: CUgraphNode,
+ /// The from node of error edge when the topologies do not match. Otherwise NULL.
+ pub errorFromNode: CUgraphNode,
+}
+/// Result information returned by cuGraphExecUpdate
+pub type CUgraphExecUpdateResultInfo_v1 = CUgraphExecUpdateResultInfo_st;
+/// Result information returned by cuGraphExecUpdate
+pub type CUgraphExecUpdateResultInfo = CUgraphExecUpdateResultInfo_v1;
+impl CUmemPool_attribute_enum {
+ /** (value type = int)
+ Allow cuMemAllocAsync to use memory asynchronously freed
+ in another streams as long as a stream ordering dependency
+ of the allocating stream on the free action exists.
+ Cuda events and null stream interactions can create the required
+ stream ordered dependencies. (default enabled)*/
+ pub const CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
+ 1,
+ );
+}
+impl CUmemPool_attribute_enum {
+ /** (value type = int)
+ Allow reuse of already completed frees when there is no dependency
+ between the free and allocation. (default enabled)*/
+ pub const CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
+ 2,
+ );
+}
+impl CUmemPool_attribute_enum {
+ /** (value type = int)
+ Allow cuMemAllocAsync to insert new stream dependencies
+ in order to establish the stream ordering required to reuse
+ a piece of memory released by cuFreeAsync (default enabled).*/
+ pub const CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
+ 3,
+ );
+}
+impl CUmemPool_attribute_enum {
+ /** (value type = cuuint64_t)
+ Amount of reserved memory in bytes to hold onto before trying
+ to release memory back to the OS. When more than the release
+ threshold bytes of memory are held by the memory pool, the
+ allocator will try to release memory back to the OS on the
+ next call to stream, event or context synchronize. (default 0)*/
+ pub const CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
+ 4,
+ );
+}
+impl CUmemPool_attribute_enum {
+ /** (value type = cuuint64_t)
+ Amount of backing memory currently allocated for the mempool.*/
+ pub const CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
+ 5,
+ );
+}
+impl CUmemPool_attribute_enum {
+ /** (value type = cuuint64_t)
+ High watermark of backing memory allocated for the mempool since the
+ last time it was reset. High watermark can only be reset to zero.*/
+ pub const CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
+ 6,
+ );
+}
+impl CUmemPool_attribute_enum {
+ /** (value type = cuuint64_t)
+ Amount of memory from the pool that is currently in use by the application.*/
+ pub const CU_MEMPOOL_ATTR_USED_MEM_CURRENT: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
+ 7,
+ );
+}
+impl CUmemPool_attribute_enum {
+ /** (value type = cuuint64_t)
+ High watermark of the amount of memory from the pool that was in use by the application since
+ the last time it was reset. High watermark can only be reset to zero.*/
+ pub const CU_MEMPOOL_ATTR_USED_MEM_HIGH: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
+ 8,
+ );
+}
+#[repr(transparent)]
+/// CUDA memory pool attributes
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemPool_attribute_enum(pub ::core::ffi::c_uint);
+/// CUDA memory pool attributes
+pub use self::CUmemPool_attribute_enum as CUmemPool_attribute;
+/// Specifies the properties of allocations made from the pool.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemPoolProps_st {
+ ///< Allocation type. Currently must be specified as CU_MEM_ALLOCATION_TYPE_PINNED
+ pub allocType: CUmemAllocationType,
+ ///< Handle types that will be supported by allocations from the pool.
+ pub handleTypes: CUmemAllocationHandleType,
+ ///< Location where allocations should reside.
+ pub location: CUmemLocation,
+ /** Windows-specific LPSECURITYATTRIBUTES required when
+ ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute defines
+ the scope of which exported allocations may be transferred to other
+ processes. In all other cases, this field is required to be zero.*/
+ pub win32SecurityAttributes: *mut ::core::ffi::c_void,
+ ///< Maximum pool size. When set to 0, defaults to a system dependent value.
+ pub maxSize: usize,
+ ///< reserved for future use, must be 0
+ pub reserved: [::core::ffi::c_uchar; 56usize],
+}
+/// Specifies the properties of allocations made from the pool.
+pub type CUmemPoolProps_v1 = CUmemPoolProps_st;
+/// Specifies the properties of allocations made from the pool.
+pub type CUmemPoolProps = CUmemPoolProps_v1;
+/// Opaque data for exporting a pool allocation
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmemPoolPtrExportData_st {
+ pub reserved: [::core::ffi::c_uchar; 64usize],
+}
+/// Opaque data for exporting a pool allocation
+pub type CUmemPoolPtrExportData_v1 = CUmemPoolPtrExportData_st;
+/// Opaque data for exporting a pool allocation
+pub type CUmemPoolPtrExportData = CUmemPoolPtrExportData_v1;
+/// Memory allocation node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_MEM_ALLOC_NODE_PARAMS_v1_st {
+ /** in: location where the allocation should reside (specified in ::location).
+ ::handleTypes must be ::CU_MEM_HANDLE_TYPE_NONE. IPC is not supported.*/
+ pub poolProps: CUmemPoolProps,
+ ///< in: array of memory access descriptors. Used to describe peer GPU access
+ pub accessDescs: *const CUmemAccessDesc,
+ ///< in: number of memory access descriptors. Must not exceed the number of GPUs.
+ pub accessDescCount: usize,
+ ///< in: size in bytes of the requested allocation
+ pub bytesize: usize,
+ ///< out: address of the allocation returned by CUDA
+ pub dptr: CUdeviceptr,
+}
+/// Memory allocation node parameters
+pub type CUDA_MEM_ALLOC_NODE_PARAMS_v1 = CUDA_MEM_ALLOC_NODE_PARAMS_v1_st;
+/// Memory allocation node parameters
+pub type CUDA_MEM_ALLOC_NODE_PARAMS = CUDA_MEM_ALLOC_NODE_PARAMS_v1;
+/// Memory allocation node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_MEM_ALLOC_NODE_PARAMS_v2_st {
+ /** in: location where the allocation should reside (specified in ::location).
+ ::handleTypes must be ::CU_MEM_HANDLE_TYPE_NONE. IPC is not supported.*/
+ pub poolProps: CUmemPoolProps,
+ ///< in: array of memory access descriptors. Used to describe peer GPU access
+ pub accessDescs: *const CUmemAccessDesc,
+ ///< in: number of memory access descriptors. Must not exceed the number of GPUs.
+ pub accessDescCount: usize,
+ ///< in: size in bytes of the requested allocation
+ pub bytesize: usize,
+ ///< out: address of the allocation returned by CUDA
+ pub dptr: CUdeviceptr,
+}
+/// Memory allocation node parameters
+pub type CUDA_MEM_ALLOC_NODE_PARAMS_v2 = CUDA_MEM_ALLOC_NODE_PARAMS_v2_st;
+/// Memory free node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_MEM_FREE_NODE_PARAMS_st {
+ ///< in: the pointer to free
+ pub dptr: CUdeviceptr,
+}
+/// Memory free node parameters
+pub type CUDA_MEM_FREE_NODE_PARAMS = CUDA_MEM_FREE_NODE_PARAMS_st;
+impl CUgraphMem_attribute_enum {
+ /** (value type = cuuint64_t)
+ Amount of memory, in bytes, currently associated with graphs*/
+ pub const CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT: CUgraphMem_attribute_enum = CUgraphMem_attribute_enum(
+ 0,
+ );
+}
+impl CUgraphMem_attribute_enum {
+ /** (value type = cuuint64_t)
+ High watermark of memory, in bytes, associated with graphs since the
+ last time it was reset. High watermark can only be reset to zero.*/
+ pub const CU_GRAPH_MEM_ATTR_USED_MEM_HIGH: CUgraphMem_attribute_enum = CUgraphMem_attribute_enum(
+ 1,
+ );
+}
+impl CUgraphMem_attribute_enum {
+ /** (value type = cuuint64_t)
+ Amount of memory, in bytes, currently allocated for use by
+ the CUDA graphs asynchronous allocator.*/
+ pub const CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT: CUgraphMem_attribute_enum = CUgraphMem_attribute_enum(
+ 2,
+ );
+}
+impl CUgraphMem_attribute_enum {
+ /** (value type = cuuint64_t)
+ High watermark of memory, in bytes, currently allocated for use by
+ the CUDA graphs asynchronous allocator.*/
+ pub const CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH: CUgraphMem_attribute_enum = CUgraphMem_attribute_enum(
+ 3,
+ );
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgraphMem_attribute_enum(pub ::core::ffi::c_uint);
+pub use self::CUgraphMem_attribute_enum as CUgraphMem_attribute;
+/// Child graph node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_CHILD_GRAPH_NODE_PARAMS_st {
+ /**< The child graph to clone into the node for node creation, or
+a handle to the graph owned by the node for node query*/
+ pub graph: CUgraph,
+}
+/// Child graph node parameters
+pub type CUDA_CHILD_GRAPH_NODE_PARAMS = CUDA_CHILD_GRAPH_NODE_PARAMS_st;
+/// Event record node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EVENT_RECORD_NODE_PARAMS_st {
+ ///< The event to record when the node executes
+ pub event: CUevent,
+}
+/// Event record node parameters
+pub type CUDA_EVENT_RECORD_NODE_PARAMS = CUDA_EVENT_RECORD_NODE_PARAMS_st;
+/// Event wait node parameters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_EVENT_WAIT_NODE_PARAMS_st {
+ ///< The event to wait on from the node
+ pub event: CUevent,
+}
+/// Event wait node parameters
+pub type CUDA_EVENT_WAIT_NODE_PARAMS = CUDA_EVENT_WAIT_NODE_PARAMS_st;
+/// Graph node parameters. See ::cuGraphAddNode.
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUgraphNodeParams_st {
+ ///< Type of the node
+ pub type_: CUgraphNodeType,
+ ///< Reserved. Must be zero.
+ pub reserved0: [::core::ffi::c_int; 3usize],
+ pub __bindgen_anon_1: CUgraphNodeParams_st__bindgen_ty_1,
+ ///< Reserved bytes. Must be zero.
+ pub reserved2: ::core::ffi::c_longlong,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUgraphNodeParams_st__bindgen_ty_1 {
+ ///< Padding. Unused bytes must be zero.
+ pub reserved1: [::core::ffi::c_longlong; 29usize],
+ ///< Kernel node parameters.
+ pub kernel: CUDA_KERNEL_NODE_PARAMS_v3,
+ ///< Memcpy node parameters.
+ pub memcpy: CUDA_MEMCPY_NODE_PARAMS,
+ ///< Memset node parameters.
+ pub memset: CUDA_MEMSET_NODE_PARAMS_v2,
+ ///< Host node parameters.
+ pub host: CUDA_HOST_NODE_PARAMS_v2,
+ ///< Child graph node parameters.
+ pub graph: CUDA_CHILD_GRAPH_NODE_PARAMS,
+ ///< Event wait node parameters.
+ pub eventWait: CUDA_EVENT_WAIT_NODE_PARAMS,
+ ///< Event record node parameters.
+ pub eventRecord: CUDA_EVENT_RECORD_NODE_PARAMS,
+ ///< External semaphore signal node parameters.
+ pub extSemSignal: CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2,
+ ///< External semaphore wait node parameters.
+ pub extSemWait: CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2,
+ ///< Memory allocation node parameters.
+ pub alloc: CUDA_MEM_ALLOC_NODE_PARAMS_v2,
+ ///< Memory free node parameters.
+ pub free: CUDA_MEM_FREE_NODE_PARAMS,
+ ///< MemOp node parameters.
+ pub memOp: CUDA_BATCH_MEM_OP_NODE_PARAMS_v2,
+ ///< Conditional node parameters.
+ pub conditional: CUDA_CONDITIONAL_NODE_PARAMS,
+}
+/// Graph node parameters. See ::cuGraphAddNode.
+pub type CUgraphNodeParams = CUgraphNodeParams_st;
+impl CUflushGPUDirectRDMAWritesOptions_enum {
+ ///< ::cuFlushGPUDirectRDMAWrites() and its CUDA Runtime API counterpart are supported on the device.
+ pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST: CUflushGPUDirectRDMAWritesOptions_enum = CUflushGPUDirectRDMAWritesOptions_enum(
+ 1,
+ );
+}
+impl CUflushGPUDirectRDMAWritesOptions_enum {
+ ///< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device.
+ pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS: CUflushGPUDirectRDMAWritesOptions_enum = CUflushGPUDirectRDMAWritesOptions_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Bitmasks for ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUflushGPUDirectRDMAWritesOptions_enum(pub ::core::ffi::c_uint);
+/// Bitmasks for ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS
+pub use self::CUflushGPUDirectRDMAWritesOptions_enum as CUflushGPUDirectRDMAWritesOptions;
+impl CUGPUDirectRDMAWritesOrdering_enum {
+ ///< The device does not natively support ordering of remote writes. ::cuFlushGPUDirectRDMAWrites() can be leveraged if supported.
+ pub const CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE: CUGPUDirectRDMAWritesOrdering_enum = CUGPUDirectRDMAWritesOrdering_enum(
+ 0,
+ );
+}
+impl CUGPUDirectRDMAWritesOrdering_enum {
+ ///< Natively, the device can consistently consume remote writes, although other CUDA devices may not.
+ pub const CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER: CUGPUDirectRDMAWritesOrdering_enum = CUGPUDirectRDMAWritesOrdering_enum(
+ 100,
+ );
+}
+impl CUGPUDirectRDMAWritesOrdering_enum {
+ ///< Any CUDA device in the system can consistently consume remote writes to this device.
+ pub const CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES: CUGPUDirectRDMAWritesOrdering_enum = CUGPUDirectRDMAWritesOrdering_enum(
+ 200,
+ );
+}
+#[repr(transparent)]
+/// Platform native ordering for GPUDirect RDMA writes
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUGPUDirectRDMAWritesOrdering_enum(pub ::core::ffi::c_uint);
+/// Platform native ordering for GPUDirect RDMA writes
+pub use self::CUGPUDirectRDMAWritesOrdering_enum as CUGPUDirectRDMAWritesOrdering;
+impl CUflushGPUDirectRDMAWritesScope_enum {
+ ///< Blocks until remote writes are visible to the CUDA device context owning the data.
+ pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER: CUflushGPUDirectRDMAWritesScope_enum = CUflushGPUDirectRDMAWritesScope_enum(
+ 100,
+ );
+}
+impl CUflushGPUDirectRDMAWritesScope_enum {
+ ///< Blocks until remote writes are visible to all CUDA device contexts.
+ pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES: CUflushGPUDirectRDMAWritesScope_enum = CUflushGPUDirectRDMAWritesScope_enum(
+ 200,
+ );
+}
+#[repr(transparent)]
+/// The scopes for ::cuFlushGPUDirectRDMAWrites
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUflushGPUDirectRDMAWritesScope_enum(pub ::core::ffi::c_uint);
+/// The scopes for ::cuFlushGPUDirectRDMAWrites
+pub use self::CUflushGPUDirectRDMAWritesScope_enum as CUflushGPUDirectRDMAWritesScope;
+impl CUflushGPUDirectRDMAWritesTarget_enum {
+ ///< Sets the target for ::cuFlushGPUDirectRDMAWrites() to the currently active CUDA device context.
+ pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX: CUflushGPUDirectRDMAWritesTarget_enum = CUflushGPUDirectRDMAWritesTarget_enum(
+ 0,
+ );
+}
+#[repr(transparent)]
+/// The targets for ::cuFlushGPUDirectRDMAWrites
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUflushGPUDirectRDMAWritesTarget_enum(pub ::core::ffi::c_uint);
+/// The targets for ::cuFlushGPUDirectRDMAWrites
+pub use self::CUflushGPUDirectRDMAWritesTarget_enum as CUflushGPUDirectRDMAWritesTarget;
+impl CUgraphDebugDot_flags_enum {
+ ///< Output all debug data as if every debug flag is enabled
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 1,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Use CUDA Runtime structures for output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 2,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds CUDA_KERNEL_NODE_PARAMS values to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 4,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds CUDA_MEMCPY3D values to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 8,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds CUDA_MEMSET_NODE_PARAMS values to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 16,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds CUDA_HOST_NODE_PARAMS values to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 32,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds CUevent handle from record and wait nodes to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 64,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 128,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 256,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds CUkernelNodeAttrValue values to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 512,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds node handles and every kernel function handle to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 1024,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds memory alloc node parameters to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 2048,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds memory free node parameters to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 4096,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds batch mem op node parameters to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 8192,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds edge numbering information
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 16384,
+ );
+}
+impl CUgraphDebugDot_flags_enum {
+ ///< Adds conditional node parameters to output
+ pub const CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
+ 32768,
+ );
+}
+#[repr(transparent)]
+/// The additional write options for ::cuGraphDebugDotPrint
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgraphDebugDot_flags_enum(pub ::core::ffi::c_uint);
+/// The additional write options for ::cuGraphDebugDotPrint
+pub use self::CUgraphDebugDot_flags_enum as CUgraphDebugDot_flags;
+impl CUuserObject_flags_enum {
+ ///< Indicates the destructor execution is not synchronized by any CUDA handle.
+ pub const CU_USER_OBJECT_NO_DESTRUCTOR_SYNC: CUuserObject_flags_enum = CUuserObject_flags_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Flags for user objects for graphs
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUuserObject_flags_enum(pub ::core::ffi::c_uint);
+/// Flags for user objects for graphs
+pub use self::CUuserObject_flags_enum as CUuserObject_flags;
+impl CUuserObjectRetain_flags_enum {
+ ///< Transfer references from the caller rather than creating new references.
+ pub const CU_GRAPH_USER_OBJECT_MOVE: CUuserObjectRetain_flags_enum = CUuserObjectRetain_flags_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Flags for retaining user object references for graphs
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUuserObjectRetain_flags_enum(pub ::core::ffi::c_uint);
+/// Flags for retaining user object references for graphs
+pub use self::CUuserObjectRetain_flags_enum as CUuserObjectRetain_flags;
+impl CUgraphInstantiate_flags_enum {
+ ///< Automatically free memory allocated in a graph before relaunching.
+ pub const CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH: CUgraphInstantiate_flags_enum = CUgraphInstantiate_flags_enum(
+ 1,
+ );
+}
+impl CUgraphInstantiate_flags_enum {
+ /**< Automatically upload the graph after instantiation. Only supported by
+::cuGraphInstantiateWithParams. The upload will be performed using the
+stream provided in \p instantiateParams.*/
+ pub const CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD: CUgraphInstantiate_flags_enum = CUgraphInstantiate_flags_enum(
+ 2,
+ );
+}
+impl CUgraphInstantiate_flags_enum {
+ /**< Instantiate the graph to be launchable from the device. This flag can only
+be used on platforms which support unified addressing. This flag cannot be
+used in conjunction with CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH.*/
+ pub const CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH: CUgraphInstantiate_flags_enum = CUgraphInstantiate_flags_enum(
+ 4,
+ );
+}
+impl CUgraphInstantiate_flags_enum {
+ /**< Run the graph using the per-node priority attributes rather than the
+priority of the stream it is launched into.*/
+ pub const CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY: CUgraphInstantiate_flags_enum = CUgraphInstantiate_flags_enum(
+ 8,
+ );
+}
+#[repr(transparent)]
+/// Flags for instantiating a graph
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgraphInstantiate_flags_enum(pub ::core::ffi::c_uint);
+/// Flags for instantiating a graph
+pub use self::CUgraphInstantiate_flags_enum as CUgraphInstantiate_flags;
+impl CUdeviceNumaConfig_enum {
+ ///< The GPU is not a NUMA node
+ pub const CU_DEVICE_NUMA_CONFIG_NONE: CUdeviceNumaConfig_enum = CUdeviceNumaConfig_enum(
+ 0,
+ );
+}
+impl CUdeviceNumaConfig_enum {
+ ///< The GPU is a NUMA node, CU_DEVICE_ATTRIBUTE_NUMA_ID contains its NUMA ID
+ pub const CU_DEVICE_NUMA_CONFIG_NUMA_NODE: CUdeviceNumaConfig_enum = CUdeviceNumaConfig_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUdeviceNumaConfig_enum(pub ::core::ffi::c_uint);
+pub use self::CUdeviceNumaConfig_enum as CUdeviceNumaConfig;
+impl CUmoduleLoadingMode_enum {
+ ///< Lazy Kernel Loading is not enabled
+ pub const CU_MODULE_EAGER_LOADING: CUmoduleLoadingMode_enum = CUmoduleLoadingMode_enum(
+ 1,
+ );
+}
+impl CUmoduleLoadingMode_enum {
+ ///< Lazy Kernel Loading is enabled
+ pub const CU_MODULE_LAZY_LOADING: CUmoduleLoadingMode_enum = CUmoduleLoadingMode_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// CUDA Lazy Loading status
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUmoduleLoadingMode_enum(pub ::core::ffi::c_uint);
+/// CUDA Lazy Loading status
+pub use self::CUmoduleLoadingMode_enum as CUmoduleLoadingMode;
+impl CUfunctionLoadingState_enum {
+ pub const CU_FUNCTION_LOADING_STATE_UNLOADED: CUfunctionLoadingState_enum = CUfunctionLoadingState_enum(
+ 0,
+ );
+}
+impl CUfunctionLoadingState_enum {
+ pub const CU_FUNCTION_LOADING_STATE_LOADED: CUfunctionLoadingState_enum = CUfunctionLoadingState_enum(
+ 1,
+ );
+}
+impl CUfunctionLoadingState_enum {
+ pub const CU_FUNCTION_LOADING_STATE_MAX: CUfunctionLoadingState_enum = CUfunctionLoadingState_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUfunctionLoadingState_enum(pub ::core::ffi::c_uint);
+pub use self::CUfunctionLoadingState_enum as CUfunctionLoadingState;
+impl CUcoredumpSettings_enum {
+ pub const CU_COREDUMP_ENABLE_ON_EXCEPTION: CUcoredumpSettings_enum = CUcoredumpSettings_enum(
+ 1,
+ );
+}
+impl CUcoredumpSettings_enum {
+ pub const CU_COREDUMP_TRIGGER_HOST: CUcoredumpSettings_enum = CUcoredumpSettings_enum(
+ 2,
+ );
+}
+impl CUcoredumpSettings_enum {
+ pub const CU_COREDUMP_LIGHTWEIGHT: CUcoredumpSettings_enum = CUcoredumpSettings_enum(
+ 3,
+ );
+}
+impl CUcoredumpSettings_enum {
+ pub const CU_COREDUMP_ENABLE_USER_TRIGGER: CUcoredumpSettings_enum = CUcoredumpSettings_enum(
+ 4,
+ );
+}
+impl CUcoredumpSettings_enum {
+ pub const CU_COREDUMP_FILE: CUcoredumpSettings_enum = CUcoredumpSettings_enum(5);
+}
+impl CUcoredumpSettings_enum {
+ pub const CU_COREDUMP_PIPE: CUcoredumpSettings_enum = CUcoredumpSettings_enum(6);
+}
+impl CUcoredumpSettings_enum {
+ pub const CU_COREDUMP_MAX: CUcoredumpSettings_enum = CUcoredumpSettings_enum(7);
+}
+#[repr(transparent)]
+/// Flags for choosing a coredump attribute to get/set
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUcoredumpSettings_enum(pub ::core::ffi::c_uint);
+/// Flags for choosing a coredump attribute to get/set
+pub use self::CUcoredumpSettings_enum as CUcoredumpSettings;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUgreenCtx_st {
+ _unused: [u8; 0],
+}
+/** \typedef typedef struct CUgreenCtx_st* CUgreenCtx
+ A green context handle. This handle can be used safely from only one CPU thread at a time.
+ Created via ::cuGreenCtxCreate*/
+pub type CUgreenCtx = *mut CUgreenCtx_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUdevResourceDesc_st {
+ _unused: [u8; 0],
+}
+/** \typedef struct CUdevResourceDesc_st* CUdevResourceDesc;
+ An opaque descriptor handle. The descriptor encapsulates multiple created and configured resources.
+ Created via ::cuDevResourceGenerateDesc*/
+pub type CUdevResourceDesc = *mut CUdevResourceDesc_st;
+impl CUgreenCtxCreate_flags {
+ ///< Required. Creates a default stream to use inside the green context
+ pub const CU_GREEN_CTX_DEFAULT_STREAM: CUgreenCtxCreate_flags = CUgreenCtxCreate_flags(
+ 1,
+ );
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUgreenCtxCreate_flags(pub ::core::ffi::c_uint);
+impl CUdevResourceType {
+ pub const CU_DEV_RESOURCE_TYPE_INVALID: CUdevResourceType = CUdevResourceType(0);
+}
+impl CUdevResourceType {
+ ///< Streaming multiprocessors related information
+ pub const CU_DEV_RESOURCE_TYPE_SM: CUdevResourceType = CUdevResourceType(1);
+}
+impl CUdevResourceType {
+ pub const CU_DEV_RESOURCE_TYPE_MAX: CUdevResourceType = CUdevResourceType(2);
+}
+#[repr(transparent)]
+/** \typedef enum CUdevResourceType
+ Type of resource*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUdevResourceType(pub ::core::ffi::c_uint);
+/** \struct CUdevSmResource
+ Data for SM-related resources*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUdevSmResource_st {
+ ///< The amount of streaming multiprocessors available in this resource. This is an output parameter only, do not write to this field.
+ pub smCount: ::core::ffi::c_uint,
+}
+/** \struct CUdevSmResource
+ Data for SM-related resources*/
+pub type CUdevSmResource = CUdevSmResource_st;
+/** \struct CUdevResource
+ A tagged union describing different resources identified by the type field. This structure should not be directly modified outside of the API that created it.
+ \code
+ struct {
+ CUdevResourceType type;
+ union {
+ CUdevSmResource sm;
+ };
+ };
+ \endcode
+ - If \p type is \p CU_DEV_RESOURCE_TYPE_INVALID, this resoure is not valid and cannot be further accessed.
+ - If \p type is \p CU_DEV_RESOURCE_TYPE_SM, the ::CUdevSmResource structure \p sm is filled in. For example,
+ \p sm.smCount will reflect the amount of streaming multiprocessors available in this resource.*/
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUdevResource_st {
+ ///< Type of resource, dictates which union field was last set
+ pub type_: CUdevResourceType,
+ pub _internal_padding: [::core::ffi::c_uchar; 92usize],
+ pub __bindgen_anon_1: CUdevResource_st__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUdevResource_st__bindgen_ty_1 {
+ ///< Resource corresponding to CU_DEV_RESOURCE_TYPE_SM \p. type.
+ pub sm: CUdevSmResource,
+ pub _oversize: [::core::ffi::c_uchar; 48usize],
+}
+/** \struct CUdevResource
+ A tagged union describing different resources identified by the type field. This structure should not be directly modified outside of the API that created it.
+ \code
+ struct {
+ CUdevResourceType type;
+ union {
+ CUdevSmResource sm;
+ };
+ };
+ \endcode
+ - If \p type is \p CU_DEV_RESOURCE_TYPE_INVALID, this resoure is not valid and cannot be further accessed.
+ - If \p type is \p CU_DEV_RESOURCE_TYPE_SM, the ::CUdevSmResource structure \p sm is filled in. For example,
+ \p sm.smCount will reflect the amount of streaming multiprocessors available in this resource.*/
+pub type CUdevResource_v1 = CUdevResource_st;
+/** \struct CUdevResource
+ A tagged union describing different resources identified by the type field. This structure should not be directly modified outside of the API that created it.
+ \code
+ struct {
+ CUdevResourceType type;
+ union {
+ CUdevSmResource sm;
+ };
+ };
+ \endcode
+ - If \p type is \p CU_DEV_RESOURCE_TYPE_INVALID, this resoure is not valid and cannot be further accessed.
+ - If \p type is \p CU_DEV_RESOURCE_TYPE_SM, the ::CUdevSmResource structure \p sm is filled in. For example,
+ \p sm.smCount will reflect the amount of streaming multiprocessors available in this resource.*/
+pub type CUdevResource = CUdevResource_v1;
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUdeviceptr_v1(pub ::core::ffi::c_uint);
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_MEMCPY2D_v1_st {
+ ///< Source X in bytes
+ pub srcXInBytes: ::core::ffi::c_uint,
+ ///< Source Y
+ pub srcY: ::core::ffi::c_uint,
+ ///< Source memory type (host, device, array)
+ pub srcMemoryType: CUmemorytype,
+ ///< Source host pointer
+ pub srcHost: *const ::core::ffi::c_void,
+ ///< Source device pointer
+ pub srcDevice: CUdeviceptr_v1,
+ ///< Source array reference
+ pub srcArray: CUarray,
+ ///< Source pitch (ignored when src is array)
+ pub srcPitch: ::core::ffi::c_uint,
+ ///< Destination X in bytes
+ pub dstXInBytes: ::core::ffi::c_uint,
+ ///< Destination Y
+ pub dstY: ::core::ffi::c_uint,
+ ///< Destination memory type (host, device, array)
+ pub dstMemoryType: CUmemorytype,
+ ///< Destination host pointer
+ pub dstHost: *mut ::core::ffi::c_void,
+ ///< Destination device pointer
+ pub dstDevice: CUdeviceptr_v1,
+ ///< Destination array reference
+ pub dstArray: CUarray,
+ ///< Destination pitch (ignored when dst is array)
+ pub dstPitch: ::core::ffi::c_uint,
+ ///< Width of 2D memory copy in bytes
+ pub WidthInBytes: ::core::ffi::c_uint,
+ ///< Height of 2D memory copy
+ pub Height: ::core::ffi::c_uint,
+}
+pub type CUDA_MEMCPY2D_v1 = CUDA_MEMCPY2D_v1_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_MEMCPY3D_v1_st {
+ ///< Source X in bytes
+ pub srcXInBytes: ::core::ffi::c_uint,
+ ///< Source Y
+ pub srcY: ::core::ffi::c_uint,
+ ///< Source Z
+ pub srcZ: ::core::ffi::c_uint,
+ ///< Source LOD
+ pub srcLOD: ::core::ffi::c_uint,
+ ///< Source memory type (host, device, array)
+ pub srcMemoryType: CUmemorytype,
+ ///< Source host pointer
+ pub srcHost: *const ::core::ffi::c_void,
+ ///< Source device pointer
+ pub srcDevice: CUdeviceptr_v1,
+ ///< Source array reference
+ pub srcArray: CUarray,
+ ///< Must be NULL
+ pub reserved0: *mut ::core::ffi::c_void,
+ ///< Source pitch (ignored when src is array)
+ pub srcPitch: ::core::ffi::c_uint,
+ ///< Source height (ignored when src is array; may be 0 if Depth==1)
+ pub srcHeight: ::core::ffi::c_uint,
+ ///< Destination X in bytes
+ pub dstXInBytes: ::core::ffi::c_uint,
+ ///< Destination Y
+ pub dstY: ::core::ffi::c_uint,
+ ///< Destination Z
+ pub dstZ: ::core::ffi::c_uint,
+ ///< Destination LOD
+ pub dstLOD: ::core::ffi::c_uint,
+ ///< Destination memory type (host, device, array)
+ pub dstMemoryType: CUmemorytype,
+ ///< Destination host pointer
+ pub dstHost: *mut ::core::ffi::c_void,
+ ///< Destination device pointer
+ pub dstDevice: CUdeviceptr_v1,
+ ///< Destination array reference
+ pub dstArray: CUarray,
+ ///< Must be NULL
+ pub reserved1: *mut ::core::ffi::c_void,
+ ///< Destination pitch (ignored when dst is array)
+ pub dstPitch: ::core::ffi::c_uint,
+ ///< Destination height (ignored when dst is array; may be 0 if Depth==1)
+ pub dstHeight: ::core::ffi::c_uint,
+ ///< Width of 3D memory copy in bytes
+ pub WidthInBytes: ::core::ffi::c_uint,
+ ///< Height of 3D memory copy
+ pub Height: ::core::ffi::c_uint,
+ ///< Depth of 3D memory copy
+ pub Depth: ::core::ffi::c_uint,
+}
+pub type CUDA_MEMCPY3D_v1 = CUDA_MEMCPY3D_v1_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_ARRAY_DESCRIPTOR_v1_st {
+ ///< Width of array
+ pub Width: ::core::ffi::c_uint,
+ ///< Height of array
+ pub Height: ::core::ffi::c_uint,
+ ///< Array format
+ pub Format: CUarray_format,
+ ///< Channels per array element
+ pub NumChannels: ::core::ffi::c_uint,
+}
+pub type CUDA_ARRAY_DESCRIPTOR_v1 = CUDA_ARRAY_DESCRIPTOR_v1_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUDA_ARRAY3D_DESCRIPTOR_v1_st {
+ ///< Width of 3D array
+ pub Width: ::core::ffi::c_uint,
+ ///< Height of 3D array
+ pub Height: ::core::ffi::c_uint,
+ ///< Depth of 3D array
+ pub Depth: ::core::ffi::c_uint,
+ ///< Array format
+ pub Format: CUarray_format,
+ ///< Channels per array element
+ pub NumChannels: ::core::ffi::c_uint,
+ ///< Flags
+ pub Flags: ::core::ffi::c_uint,
+}
+pub type CUDA_ARRAY3D_DESCRIPTOR_v1 = CUDA_ARRAY3D_DESCRIPTOR_v1_st;
+impl CUoutput_mode_enum {
+ ///< Output mode Key-Value pair format.
+ pub const CU_OUT_KEY_VALUE_PAIR: CUoutput_mode_enum = CUoutput_mode_enum(0);
+}
+impl CUoutput_mode_enum {
+ ///< Output mode Comma separated values format.
+ pub const CU_OUT_CSV: CUoutput_mode_enum = CUoutput_mode_enum(1);
+}
+#[repr(transparent)]
+/// Profiler Output Modes
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUoutput_mode_enum(pub ::core::ffi::c_uint);
+/// Profiler Output Modes
+pub use self::CUoutput_mode_enum as CUoutput_mode;
+pub type GLenum = ::core::ffi::c_uint;
+pub type GLuint = ::core::ffi::c_uint;
+pub type khronos_int32_t = i32;
+impl CUGLDeviceList_enum {
+ ///< The CUDA devices for all GPUs used by the current OpenGL context
+ pub const CU_GL_DEVICE_LIST_ALL: CUGLDeviceList_enum = CUGLDeviceList_enum(1);
+}
+impl CUGLDeviceList_enum {
+ ///< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame
+ pub const CU_GL_DEVICE_LIST_CURRENT_FRAME: CUGLDeviceList_enum = CUGLDeviceList_enum(
+ 2,
+ );
+}
+impl CUGLDeviceList_enum {
+ ///< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame
+ pub const CU_GL_DEVICE_LIST_NEXT_FRAME: CUGLDeviceList_enum = CUGLDeviceList_enum(3);
+}
+#[repr(transparent)]
+/// CUDA devices corresponding to an OpenGL device
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUGLDeviceList_enum(pub ::core::ffi::c_uint);
+/// CUDA devices corresponding to an OpenGL device
+pub use self::CUGLDeviceList_enum as CUGLDeviceList;
+impl CUGLmap_flags_enum {
+ pub const CU_GL_MAP_RESOURCE_FLAGS_NONE: CUGLmap_flags_enum = CUGLmap_flags_enum(0);
+}
+impl CUGLmap_flags_enum {
+ pub const CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY: CUGLmap_flags_enum = CUGLmap_flags_enum(
+ 1,
+ );
+}
+impl CUGLmap_flags_enum {
+ pub const CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD: CUGLmap_flags_enum = CUGLmap_flags_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Flags to map or unmap a resource
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUGLmap_flags_enum(pub ::core::ffi::c_uint);
+/// Flags to map or unmap a resource
+pub use self::CUGLmap_flags_enum as CUGLmap_flags;
+pub type EGLint = khronos_int32_t;
+pub type EGLSyncKHR = *mut ::core::ffi::c_void;
+pub type EGLImageKHR = *mut ::core::ffi::c_void;
+pub type EGLStreamKHR = *mut ::core::ffi::c_void;
+impl CUeglFrameType_enum {
+ ///< Frame type CUDA array
+ pub const CU_EGL_FRAME_TYPE_ARRAY: CUeglFrameType_enum = CUeglFrameType_enum(0);
+}
+impl CUeglFrameType_enum {
+ ///< Frame type pointer
+ pub const CU_EGL_FRAME_TYPE_PITCH: CUeglFrameType_enum = CUeglFrameType_enum(1);
+}
+#[repr(transparent)]
+/// CUDA EglFrame type - array or pointer
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUeglFrameType_enum(pub ::core::ffi::c_uint);
+/// CUDA EglFrame type - array or pointer
+pub use self::CUeglFrameType_enum as CUeglFrameType;
+impl CUeglResourceLocationFlags_enum {
+ ///< Resource location sysmem
+ pub const CU_EGL_RESOURCE_LOCATION_SYSMEM: CUeglResourceLocationFlags_enum = CUeglResourceLocationFlags_enum(
+ 0,
+ );
+}
+impl CUeglResourceLocationFlags_enum {
+ ///< Resource location vidmem
+ pub const CU_EGL_RESOURCE_LOCATION_VIDMEM: CUeglResourceLocationFlags_enum = CUeglResourceLocationFlags_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/** Resource location flags- sysmem or vidmem
+
+ For CUDA context on iGPU, since video and system memory are equivalent -
+ these flags will not have an effect on the execution.
+
+ For CUDA context on dGPU, applications can use the flag ::CUeglResourceLocationFlags
+ to give a hint about the desired location.
+
+ ::CU_EGL_RESOURCE_LOCATION_SYSMEM - the frame data is made resident on the system memory
+ to be accessed by CUDA.
+
+ ::CU_EGL_RESOURCE_LOCATION_VIDMEM - the frame data is made resident on the dedicated
+ video memory to be accessed by CUDA.
+
+ There may be an additional latency due to new allocation and data migration,
+ if the frame is produced on a different memory.
+*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUeglResourceLocationFlags_enum(pub ::core::ffi::c_uint);
+/** Resource location flags- sysmem or vidmem
+
+ For CUDA context on iGPU, since video and system memory are equivalent -
+ these flags will not have an effect on the execution.
+
+ For CUDA context on dGPU, applications can use the flag ::CUeglResourceLocationFlags
+ to give a hint about the desired location.
+
+ ::CU_EGL_RESOURCE_LOCATION_SYSMEM - the frame data is made resident on the system memory
+ to be accessed by CUDA.
+
+ ::CU_EGL_RESOURCE_LOCATION_VIDMEM - the frame data is made resident on the dedicated
+ video memory to be accessed by CUDA.
+
+ There may be an additional latency due to new allocation and data migration,
+ if the frame is produced on a different memory.
+*/
+pub use self::CUeglResourceLocationFlags_enum as CUeglResourceLocationFlags;
+impl CUeglColorFormat_enum {
+ ///< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YUV420_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 0,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar.
+ pub const CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 1,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YUV422_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 2,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar.
+ pub const CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 3,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported.
+ pub const CU_EGL_COLOR_FORMAT_RGB: CUeglColorFormat_enum = CUeglColorFormat_enum(4);
+}
+impl CUeglColorFormat_enum {
+ ///< R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported.
+ pub const CU_EGL_COLOR_FORMAT_BGR: CUeglColorFormat_enum = CUeglColorFormat_enum(5);
+}
+impl CUeglColorFormat_enum {
+ ///< R/G/B/A four channels in one surface with BGRA byte ordering.
+ pub const CU_EGL_COLOR_FORMAT_ARGB: CUeglColorFormat_enum = CUeglColorFormat_enum(6);
+}
+impl CUeglColorFormat_enum {
+ ///< R/G/B/A four channels in one surface with ABGR byte ordering.
+ pub const CU_EGL_COLOR_FORMAT_RGBA: CUeglColorFormat_enum = CUeglColorFormat_enum(7);
+}
+impl CUeglColorFormat_enum {
+ ///< single luminance channel in one surface.
+ pub const CU_EGL_COLOR_FORMAT_L: CUeglColorFormat_enum = CUeglColorFormat_enum(8);
+}
+impl CUeglColorFormat_enum {
+ ///< single color channel in one surface.
+ pub const CU_EGL_COLOR_FORMAT_R: CUeglColorFormat_enum = CUeglColorFormat_enum(9);
+}
+impl CUeglColorFormat_enum {
+ ///< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YUV444_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 10,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar.
+ pub const CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 11,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, U, V in one surface, interleaved as UYVY in one channel.
+ pub const CU_EGL_COLOR_FORMAT_YUYV_422: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 12,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, U, V in one surface, interleaved as YUYV in one channel.
+ pub const CU_EGL_COLOR_FORMAT_UYVY_422: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 13,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< R/G/B/A four channels in one surface with RGBA byte ordering.
+ pub const CU_EGL_COLOR_FORMAT_ABGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 14,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< R/G/B/A four channels in one surface with ARGB byte ordering.
+ pub const CU_EGL_COLOR_FORMAT_BGRA: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 15,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Alpha color format - one channel in one surface.
+ pub const CU_EGL_COLOR_FORMAT_A: CUeglColorFormat_enum = CUeglColorFormat_enum(16);
+}
+impl CUeglColorFormat_enum {
+ ///< R/G color format - two channels in one surface with GR byte ordering
+ pub const CU_EGL_COLOR_FORMAT_RG: CUeglColorFormat_enum = CUeglColorFormat_enum(17);
+}
+impl CUeglColorFormat_enum {
+ ///< Y, U, V, A four channels in one surface, interleaved as VUYA.
+ pub const CU_EGL_COLOR_FORMAT_AYUV: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 18,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 19,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 20,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 21,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 22,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 23,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 24,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 25,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel.
+ pub const CU_EGL_COLOR_FORMAT_VYUY_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 26,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel.
+ pub const CU_EGL_COLOR_FORMAT_UYVY_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 27,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel.
+ pub const CU_EGL_COLOR_FORMAT_YUYV_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 28,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel.
+ pub const CU_EGL_COLOR_FORMAT_YVYU_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 29,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported.
+ pub const CU_EGL_COLOR_FORMAT_YUV_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 30,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY.
+ pub const CU_EGL_COLOR_FORMAT_YUVA_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 31,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA.
+ pub const CU_EGL_COLOR_FORMAT_AYUV_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 32,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 33,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 34,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 35,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 36,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 37,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 38,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 39,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 40,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 41,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 42,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 43,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 44,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer format - one channel in one surface with interleaved RGGB ordering.
+ pub const CU_EGL_COLOR_FORMAT_BAYER_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 45,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer format - one channel in one surface with interleaved BGGR ordering.
+ pub const CU_EGL_COLOR_FORMAT_BAYER_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 46,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer format - one channel in one surface with interleaved GRBG ordering.
+ pub const CU_EGL_COLOR_FORMAT_BAYER_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 47,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer format - one channel in one surface with interleaved GBRG ordering.
+ pub const CU_EGL_COLOR_FORMAT_BAYER_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 48,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER10_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 49,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER10_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 50,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER10_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 51,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER10_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 52,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER12_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 53,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER12_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 54,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER12_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 55,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER12_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 56,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER14_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 57,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER14_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 58,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER14_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 59,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER14_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 60,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER20_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 61,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER20_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 62,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER20_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 63,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER20_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 64,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU444_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 65,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU422_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 66,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU420_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 67,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype.
+ pub const CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 68,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype.
+ pub const CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 69,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype.
+ pub const CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 70,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype.
+ pub const CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 71,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer format - one channel in one surface with interleaved BCCR ordering.
+ pub const CU_EGL_COLOR_FORMAT_BAYER_BCCR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 72,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer format - one channel in one surface with interleaved RCCB ordering.
+ pub const CU_EGL_COLOR_FORMAT_BAYER_RCCB: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 73,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer format - one channel in one surface with interleaved CRBC ordering.
+ pub const CU_EGL_COLOR_FORMAT_BAYER_CRBC: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 74,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer format - one channel in one surface with interleaved CBRC ordering.
+ pub const CU_EGL_COLOR_FORMAT_BAYER_CBRC: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 75,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER10_CCCC: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 76,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER12_BCCR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 77,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER12_RCCB: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 78,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER12_CRBC: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 79,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER12_CBRC: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 80,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op.
+ pub const CU_EGL_COLOR_FORMAT_BAYER12_CCCC: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 81,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Color format for single Y plane.
+ pub const CU_EGL_COLOR_FORMAT_Y: CUeglColorFormat_enum = CUeglColorFormat_enum(82);
+}
+impl CUeglColorFormat_enum {
+ ///< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 83,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 84,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 85,
+ );
+}
+impl CUeglColorFormat_enum {
+ /**< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height
+= 1/2 Y height.*/
+ pub const CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 86,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 87,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 88,
+ );
+}
+impl CUeglColorFormat_enum {
+ /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height
+= 1/2 Y height.*/
+ pub const CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 89,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 90,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 91,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 92,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 93,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 94,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 95,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Color format for single Y plane.
+ pub const CU_EGL_COLOR_FORMAT_Y_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 96,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Color format for single Y plane.
+ pub const CU_EGL_COLOR_FORMAT_Y_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 97,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Color format for single Y10 plane.
+ pub const CU_EGL_COLOR_FORMAT_Y10_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 98,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Color format for single Y10 plane.
+ pub const CU_EGL_COLOR_FORMAT_Y10_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 99,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Color format for single Y12 plane.
+ pub const CU_EGL_COLOR_FORMAT_Y12_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 100,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Color format for single Y12 plane.
+ pub const CU_EGL_COLOR_FORMAT_Y12_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 101,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, U, V, A four channels in one surface, interleaved as AVUY.
+ pub const CU_EGL_COLOR_FORMAT_YUVA: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 102,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported.
+ pub const CU_EGL_COLOR_FORMAT_YUV: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 103,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, U, V in one surface, interleaved as YVYU in one channel.
+ pub const CU_EGL_COLOR_FORMAT_YVYU: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 104,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Y, U, V in one surface, interleaved as VYUY in one channel.
+ pub const CU_EGL_COLOR_FORMAT_VYUY: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 105,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 106,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 107,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 108,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 109,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 110,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 111,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 112,
+ );
+}
+impl CUeglColorFormat_enum {
+ ///< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.
+ pub const CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 113,
+ );
+}
+impl CUeglColorFormat_enum {
+ pub const CU_EGL_COLOR_FORMAT_MAX: CUeglColorFormat_enum = CUeglColorFormat_enum(
+ 114,
+ );
+}
+#[repr(transparent)]
+/** CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops.
+ Three channel formats are currently not supported for ::CU_EGL_FRAME_TYPE_ARRAY*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct CUeglColorFormat_enum(pub ::core::ffi::c_uint);
+/** CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops.
+ Three channel formats are currently not supported for ::CU_EGL_FRAME_TYPE_ARRAY*/
+pub use self::CUeglColorFormat_enum as CUeglColorFormat;
+/** CUDA EGLFrame structure Descriptor - structure defining one frame of EGL.
+
+ Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not.*/
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CUeglFrame_st {
+ pub frame: CUeglFrame_st__bindgen_ty_1,
+ ///< Width of first plane
+ pub width: ::core::ffi::c_uint,
+ ///< Height of first plane
+ pub height: ::core::ffi::c_uint,
+ ///< Depth of first plane
+ pub depth: ::core::ffi::c_uint,
+ ///< Pitch of first plane
+ pub pitch: ::core::ffi::c_uint,
+ ///< Number of planes
+ pub planeCount: ::core::ffi::c_uint,
+ ///< Number of channels for the plane
+ pub numChannels: ::core::ffi::c_uint,
+ ///< Array or Pitch
+ pub frameType: CUeglFrameType,
+ ///< CUDA EGL Color Format
+ pub eglColorFormat: CUeglColorFormat,
+ ///< CUDA Array Format
+ pub cuFormat: CUarray_format,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union CUeglFrame_st__bindgen_ty_1 {
+ ///< Array of CUarray corresponding to each plane
+ pub pArray: [CUarray; 3usize],
+ ///< Array of Pointers corresponding to each plane
+ pub pPitch: [*mut ::core::ffi::c_void; 3usize],
+}
+/** CUDA EGLFrame structure Descriptor - structure defining one frame of EGL.
+
+ Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not.*/
+pub type CUeglFrame_v1 = CUeglFrame_st;
+/** CUDA EGLFrame structure Descriptor - structure defining one frame of EGL.
+
+ Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not.*/
+pub type CUeglFrame = CUeglFrame_v1;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct CUeglStreamConnection_st {
+ _unused: [u8; 0],
+}
+/// CUDA EGLSream Connection
+pub type CUeglStreamConnection = *mut CUeglStreamConnection_st;
+impl VdpStatus {
+ pub const VDP_STATUS_OK: VdpStatus = VdpStatus(0);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_NO_IMPLEMENTATION: VdpStatus = VdpStatus(1);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_DISPLAY_PREEMPTED: VdpStatus = VdpStatus(2);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_HANDLE: VdpStatus = VdpStatus(3);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_POINTER: VdpStatus = VdpStatus(4);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_CHROMA_TYPE: VdpStatus = VdpStatus(5);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_Y_CB_CR_FORMAT: VdpStatus = VdpStatus(6);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_RGBA_FORMAT: VdpStatus = VdpStatus(7);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_INDEXED_FORMAT: VdpStatus = VdpStatus(8);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_COLOR_STANDARD: VdpStatus = VdpStatus(9);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_COLOR_TABLE_FORMAT: VdpStatus = VdpStatus(10);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_BLEND_FACTOR: VdpStatus = VdpStatus(11);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_BLEND_EQUATION: VdpStatus = VdpStatus(12);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_FLAG: VdpStatus = VdpStatus(13);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_DECODER_PROFILE: VdpStatus = VdpStatus(14);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE: VdpStatus = VdpStatus(15);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER: VdpStatus = VdpStatus(16);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE: VdpStatus = VdpStatus(17);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE: VdpStatus = VdpStatus(
+ 18,
+ );
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_FUNC_ID: VdpStatus = VdpStatus(19);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_SIZE: VdpStatus = VdpStatus(20);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_VALUE: VdpStatus = VdpStatus(21);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_INVALID_STRUCT_VERSION: VdpStatus = VdpStatus(22);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_RESOURCES: VdpStatus = VdpStatus(23);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_HANDLE_DEVICE_MISMATCH: VdpStatus = VdpStatus(24);
+}
+impl VdpStatus {
+ pub const VDP_STATUS_ERROR: VdpStatus = VdpStatus(25);
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct VdpStatus(pub ::core::ffi::c_uint);
+pub type VdpDevice = u32;
+pub type VdpVideoSurface = u32;
+pub type VdpOutputSurface = u32;
+pub type VdpFuncId = u32;
+pub type VdpGetProcAddress = ::core::option::Option<
+ unsafe extern "system" fn(
+ device: VdpDevice,
+ function_id: VdpFuncId,
+ function_pointer: *mut *mut ::core::ffi::c_void,
+ ) -> VdpStatus,
+>;
+impl CUerror {
+ pub const INVALID_VALUE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(1)
+ });
+ pub const OUT_OF_MEMORY: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(2)
+ });
+ pub const NOT_INITIALIZED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(3)
+ });
+ pub const DEINITIALIZED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(4)
+ });
+ pub const PROFILER_DISABLED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(5)
+ });
+ pub const PROFILER_NOT_INITIALIZED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(6)
+ });
+ pub const PROFILER_ALREADY_STARTED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(7)
+ });
+ pub const PROFILER_ALREADY_STOPPED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(8)
+ });
+ pub const STUB_LIBRARY: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(34)
+ });
+ pub const DEVICE_UNAVAILABLE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(46)
+ });
+ pub const NO_DEVICE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(100)
+ });
+ pub const INVALID_DEVICE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(101)
+ });
+ pub const DEVICE_NOT_LICENSED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(102)
+ });
+ pub const INVALID_IMAGE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(200)
+ });
+ pub const INVALID_CONTEXT: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(201)
+ });
+ pub const CONTEXT_ALREADY_CURRENT: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(202)
+ });
+ pub const MAP_FAILED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(205)
+ });
+ pub const UNMAP_FAILED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(206)
+ });
+ pub const ARRAY_IS_MAPPED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(207)
+ });
+ pub const ALREADY_MAPPED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(208)
+ });
+ pub const NO_BINARY_FOR_GPU: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(209)
+ });
+ pub const ALREADY_ACQUIRED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(210)
+ });
+ pub const NOT_MAPPED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(211)
+ });
+ pub const NOT_MAPPED_AS_ARRAY: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(212)
+ });
+ pub const NOT_MAPPED_AS_POINTER: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(213)
+ });
+ pub const ECC_UNCORRECTABLE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(214)
+ });
+ pub const UNSUPPORTED_LIMIT: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(215)
+ });
+ pub const CONTEXT_ALREADY_IN_USE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(216)
+ });
+ pub const PEER_ACCESS_UNSUPPORTED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(217)
+ });
+ pub const INVALID_PTX: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(218)
+ });
+ pub const INVALID_GRAPHICS_CONTEXT: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(219)
+ });
+ pub const NVLINK_UNCORRECTABLE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(220)
+ });
+ pub const JIT_COMPILER_NOT_FOUND: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(221)
+ });
+ pub const UNSUPPORTED_PTX_VERSION: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(222)
+ });
+ pub const JIT_COMPILATION_DISABLED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(223)
+ });
+ pub const UNSUPPORTED_EXEC_AFFINITY: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(224)
+ });
+ pub const UNSUPPORTED_DEVSIDE_SYNC: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(225)
+ });
+ pub const INVALID_SOURCE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(300)
+ });
+ pub const FILE_NOT_FOUND: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(301)
+ });
+ pub const SHARED_OBJECT_SYMBOL_NOT_FOUND: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(302)
+ });
+ pub const SHARED_OBJECT_INIT_FAILED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(303)
+ });
+ pub const OPERATING_SYSTEM: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(304)
+ });
+ pub const INVALID_HANDLE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(400)
+ });
+ pub const ILLEGAL_STATE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(401)
+ });
+ pub const LOSSY_QUERY: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(402)
+ });
+ pub const NOT_FOUND: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(500)
+ });
+ pub const NOT_READY: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(600)
+ });
+ pub const ILLEGAL_ADDRESS: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(700)
+ });
+ pub const LAUNCH_OUT_OF_RESOURCES: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(701)
+ });
+ pub const LAUNCH_TIMEOUT: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(702)
+ });
+ pub const LAUNCH_INCOMPATIBLE_TEXTURING: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(703)
+ });
+ pub const PEER_ACCESS_ALREADY_ENABLED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(704)
+ });
+ pub const PEER_ACCESS_NOT_ENABLED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(705)
+ });
+ pub const PRIMARY_CONTEXT_ACTIVE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(708)
+ });
+ pub const CONTEXT_IS_DESTROYED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(709)
+ });
+ pub const ASSERT: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(710)
+ });
+ pub const TOO_MANY_PEERS: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(711)
+ });
+ pub const HOST_MEMORY_ALREADY_REGISTERED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(712)
+ });
+ pub const HOST_MEMORY_NOT_REGISTERED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(713)
+ });
+ pub const HARDWARE_STACK_ERROR: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(714)
+ });
+ pub const ILLEGAL_INSTRUCTION: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(715)
+ });
+ pub const MISALIGNED_ADDRESS: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(716)
+ });
+ pub const INVALID_ADDRESS_SPACE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(717)
+ });
+ pub const INVALID_PC: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(718)
+ });
+ pub const LAUNCH_FAILED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(719)
+ });
+ pub const COOPERATIVE_LAUNCH_TOO_LARGE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(720)
+ });
+ pub const NOT_PERMITTED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(800)
+ });
+ pub const NOT_SUPPORTED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(801)
+ });
+ pub const SYSTEM_NOT_READY: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(802)
+ });
+ pub const SYSTEM_DRIVER_MISMATCH: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(803)
+ });
+ pub const COMPAT_NOT_SUPPORTED_ON_DEVICE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(804)
+ });
+ pub const MPS_CONNECTION_FAILED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(805)
+ });
+ pub const MPS_RPC_FAILURE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(806)
+ });
+ pub const MPS_SERVER_NOT_READY: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(807)
+ });
+ pub const MPS_MAX_CLIENTS_REACHED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(808)
+ });
+ pub const MPS_MAX_CONNECTIONS_REACHED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(809)
+ });
+ pub const MPS_CLIENT_TERMINATED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(810)
+ });
+ pub const CDP_NOT_SUPPORTED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(811)
+ });
+ pub const CDP_VERSION_MISMATCH: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(812)
+ });
+ pub const STREAM_CAPTURE_UNSUPPORTED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(900)
+ });
+ pub const STREAM_CAPTURE_INVALIDATED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(901)
+ });
+ pub const STREAM_CAPTURE_MERGE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(902)
+ });
+ pub const STREAM_CAPTURE_UNMATCHED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(903)
+ });
+ pub const STREAM_CAPTURE_UNJOINED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(904)
+ });
+ pub const STREAM_CAPTURE_ISOLATION: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(905)
+ });
+ pub const STREAM_CAPTURE_IMPLICIT: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(906)
+ });
+ pub const CAPTURED_EVENT: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(907)
+ });
+ pub const STREAM_CAPTURE_WRONG_THREAD: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(908)
+ });
+ pub const TIMEOUT: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(909)
+ });
+ pub const GRAPH_EXEC_UPDATE_FAILURE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(910)
+ });
+ pub const EXTERNAL_DEVICE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(911)
+ });
+ pub const INVALID_CLUSTER_SIZE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(912)
+ });
+ pub const FUNCTION_NOT_LOADED: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(913)
+ });
+ pub const INVALID_RESOURCE_TYPE: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(914)
+ });
+ pub const INVALID_RESOURCE_CONFIGURATION: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(915)
+ });
+ pub const UNKNOWN: CUerror = CUerror(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(999)
+ });
+}
+#[repr(transparent)]
+#[derive(Debug, Hash, Copy, Clone, PartialEq, Eq)]
+pub struct CUerror(pub ::core::num::NonZeroU32);
+pub trait CUresultConsts {
+ const SUCCESS: CUresult = CUresult::Ok(());
+ const ERROR_INVALID_VALUE: CUresult = CUresult::Err(CUerror::INVALID_VALUE);
+ const ERROR_OUT_OF_MEMORY: CUresult = CUresult::Err(CUerror::OUT_OF_MEMORY);
+ const ERROR_NOT_INITIALIZED: CUresult = CUresult::Err(CUerror::NOT_INITIALIZED);
+ const ERROR_DEINITIALIZED: CUresult = CUresult::Err(CUerror::DEINITIALIZED);
+ const ERROR_PROFILER_DISABLED: CUresult = CUresult::Err(CUerror::PROFILER_DISABLED);
+ const ERROR_PROFILER_NOT_INITIALIZED: CUresult = CUresult::Err(
+ CUerror::PROFILER_NOT_INITIALIZED,
+ );
+ const ERROR_PROFILER_ALREADY_STARTED: CUresult = CUresult::Err(
+ CUerror::PROFILER_ALREADY_STARTED,
+ );
+ const ERROR_PROFILER_ALREADY_STOPPED: CUresult = CUresult::Err(
+ CUerror::PROFILER_ALREADY_STOPPED,
+ );
+ const ERROR_STUB_LIBRARY: CUresult = CUresult::Err(CUerror::STUB_LIBRARY);
+ const ERROR_DEVICE_UNAVAILABLE: CUresult = CUresult::Err(
+ CUerror::DEVICE_UNAVAILABLE,
+ );
+ const ERROR_NO_DEVICE: CUresult = CUresult::Err(CUerror::NO_DEVICE);
+ const ERROR_INVALID_DEVICE: CUresult = CUresult::Err(CUerror::INVALID_DEVICE);
+ const ERROR_DEVICE_NOT_LICENSED: CUresult = CUresult::Err(
+ CUerror::DEVICE_NOT_LICENSED,
+ );
+ const ERROR_INVALID_IMAGE: CUresult = CUresult::Err(CUerror::INVALID_IMAGE);
+ const ERROR_INVALID_CONTEXT: CUresult = CUresult::Err(CUerror::INVALID_CONTEXT);
+ const ERROR_CONTEXT_ALREADY_CURRENT: CUresult = CUresult::Err(
+ CUerror::CONTEXT_ALREADY_CURRENT,
+ );
+ const ERROR_MAP_FAILED: CUresult = CUresult::Err(CUerror::MAP_FAILED);
+ const ERROR_UNMAP_FAILED: CUresult = CUresult::Err(CUerror::UNMAP_FAILED);
+ const ERROR_ARRAY_IS_MAPPED: CUresult = CUresult::Err(CUerror::ARRAY_IS_MAPPED);
+ const ERROR_ALREADY_MAPPED: CUresult = CUresult::Err(CUerror::ALREADY_MAPPED);
+ const ERROR_NO_BINARY_FOR_GPU: CUresult = CUresult::Err(CUerror::NO_BINARY_FOR_GPU);
+ const ERROR_ALREADY_ACQUIRED: CUresult = CUresult::Err(CUerror::ALREADY_ACQUIRED);
+ const ERROR_NOT_MAPPED: CUresult = CUresult::Err(CUerror::NOT_MAPPED);
+ const ERROR_NOT_MAPPED_AS_ARRAY: CUresult = CUresult::Err(
+ CUerror::NOT_MAPPED_AS_ARRAY,
+ );
+ const ERROR_NOT_MAPPED_AS_POINTER: CUresult = CUresult::Err(
+ CUerror::NOT_MAPPED_AS_POINTER,
+ );
+ const ERROR_ECC_UNCORRECTABLE: CUresult = CUresult::Err(CUerror::ECC_UNCORRECTABLE);
+ const ERROR_UNSUPPORTED_LIMIT: CUresult = CUresult::Err(CUerror::UNSUPPORTED_LIMIT);
+ const ERROR_CONTEXT_ALREADY_IN_USE: CUresult = CUresult::Err(
+ CUerror::CONTEXT_ALREADY_IN_USE,
+ );
+ const ERROR_PEER_ACCESS_UNSUPPORTED: CUresult = CUresult::Err(
+ CUerror::PEER_ACCESS_UNSUPPORTED,
+ );
+ const ERROR_INVALID_PTX: CUresult = CUresult::Err(CUerror::INVALID_PTX);
+ const ERROR_INVALID_GRAPHICS_CONTEXT: CUresult = CUresult::Err(
+ CUerror::INVALID_GRAPHICS_CONTEXT,
+ );
+ const ERROR_NVLINK_UNCORRECTABLE: CUresult = CUresult::Err(
+ CUerror::NVLINK_UNCORRECTABLE,
+ );
+ const ERROR_JIT_COMPILER_NOT_FOUND: CUresult = CUresult::Err(
+ CUerror::JIT_COMPILER_NOT_FOUND,
+ );
+ const ERROR_UNSUPPORTED_PTX_VERSION: CUresult = CUresult::Err(
+ CUerror::UNSUPPORTED_PTX_VERSION,
+ );
+ const ERROR_JIT_COMPILATION_DISABLED: CUresult = CUresult::Err(
+ CUerror::JIT_COMPILATION_DISABLED,
+ );
+ const ERROR_UNSUPPORTED_EXEC_AFFINITY: CUresult = CUresult::Err(
+ CUerror::UNSUPPORTED_EXEC_AFFINITY,
+ );
+ const ERROR_UNSUPPORTED_DEVSIDE_SYNC: CUresult = CUresult::Err(
+ CUerror::UNSUPPORTED_DEVSIDE_SYNC,
+ );
+ const ERROR_INVALID_SOURCE: CUresult = CUresult::Err(CUerror::INVALID_SOURCE);
+ const ERROR_FILE_NOT_FOUND: CUresult = CUresult::Err(CUerror::FILE_NOT_FOUND);
+ const ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND: CUresult = CUresult::Err(
+ CUerror::SHARED_OBJECT_SYMBOL_NOT_FOUND,
+ );
+ const ERROR_SHARED_OBJECT_INIT_FAILED: CUresult = CUresult::Err(
+ CUerror::SHARED_OBJECT_INIT_FAILED,
+ );
+ const ERROR_OPERATING_SYSTEM: CUresult = CUresult::Err(CUerror::OPERATING_SYSTEM);
+ const ERROR_INVALID_HANDLE: CUresult = CUresult::Err(CUerror::INVALID_HANDLE);
+ const ERROR_ILLEGAL_STATE: CUresult = CUresult::Err(CUerror::ILLEGAL_STATE);
+ const ERROR_LOSSY_QUERY: CUresult = CUresult::Err(CUerror::LOSSY_QUERY);
+ const ERROR_NOT_FOUND: CUresult = CUresult::Err(CUerror::NOT_FOUND);
+ const ERROR_NOT_READY: CUresult = CUresult::Err(CUerror::NOT_READY);
+ const ERROR_ILLEGAL_ADDRESS: CUresult = CUresult::Err(CUerror::ILLEGAL_ADDRESS);
+ const ERROR_LAUNCH_OUT_OF_RESOURCES: CUresult = CUresult::Err(
+ CUerror::LAUNCH_OUT_OF_RESOURCES,
+ );
+ const ERROR_LAUNCH_TIMEOUT: CUresult = CUresult::Err(CUerror::LAUNCH_TIMEOUT);
+ const ERROR_LAUNCH_INCOMPATIBLE_TEXTURING: CUresult = CUresult::Err(
+ CUerror::LAUNCH_INCOMPATIBLE_TEXTURING,
+ );
+ const ERROR_PEER_ACCESS_ALREADY_ENABLED: CUresult = CUresult::Err(
+ CUerror::PEER_ACCESS_ALREADY_ENABLED,
+ );
+ const ERROR_PEER_ACCESS_NOT_ENABLED: CUresult = CUresult::Err(
+ CUerror::PEER_ACCESS_NOT_ENABLED,
+ );
+ const ERROR_PRIMARY_CONTEXT_ACTIVE: CUresult = CUresult::Err(
+ CUerror::PRIMARY_CONTEXT_ACTIVE,
+ );
+ const ERROR_CONTEXT_IS_DESTROYED: CUresult = CUresult::Err(
+ CUerror::CONTEXT_IS_DESTROYED,
+ );
+ const ERROR_ASSERT: CUresult = CUresult::Err(CUerror::ASSERT);
+ const ERROR_TOO_MANY_PEERS: CUresult = CUresult::Err(CUerror::TOO_MANY_PEERS);
+ const ERROR_HOST_MEMORY_ALREADY_REGISTERED: CUresult = CUresult::Err(
+ CUerror::HOST_MEMORY_ALREADY_REGISTERED,
+ );
+ const ERROR_HOST_MEMORY_NOT_REGISTERED: CUresult = CUresult::Err(
+ CUerror::HOST_MEMORY_NOT_REGISTERED,
+ );
+ const ERROR_HARDWARE_STACK_ERROR: CUresult = CUresult::Err(
+ CUerror::HARDWARE_STACK_ERROR,
+ );
+ const ERROR_ILLEGAL_INSTRUCTION: CUresult = CUresult::Err(
+ CUerror::ILLEGAL_INSTRUCTION,
+ );
+ const ERROR_MISALIGNED_ADDRESS: CUresult = CUresult::Err(
+ CUerror::MISALIGNED_ADDRESS,
+ );
+ const ERROR_INVALID_ADDRESS_SPACE: CUresult = CUresult::Err(
+ CUerror::INVALID_ADDRESS_SPACE,
+ );
+ const ERROR_INVALID_PC: CUresult = CUresult::Err(CUerror::INVALID_PC);
+ const ERROR_LAUNCH_FAILED: CUresult = CUresult::Err(CUerror::LAUNCH_FAILED);
+ const ERROR_COOPERATIVE_LAUNCH_TOO_LARGE: CUresult = CUresult::Err(
+ CUerror::COOPERATIVE_LAUNCH_TOO_LARGE,
+ );
+ const ERROR_NOT_PERMITTED: CUresult = CUresult::Err(CUerror::NOT_PERMITTED);
+ const ERROR_NOT_SUPPORTED: CUresult = CUresult::Err(CUerror::NOT_SUPPORTED);
+ const ERROR_SYSTEM_NOT_READY: CUresult = CUresult::Err(CUerror::SYSTEM_NOT_READY);
+ const ERROR_SYSTEM_DRIVER_MISMATCH: CUresult = CUresult::Err(
+ CUerror::SYSTEM_DRIVER_MISMATCH,
+ );
+ const ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE: CUresult = CUresult::Err(
+ CUerror::COMPAT_NOT_SUPPORTED_ON_DEVICE,
+ );
+ const ERROR_MPS_CONNECTION_FAILED: CUresult = CUresult::Err(
+ CUerror::MPS_CONNECTION_FAILED,
+ );
+ const ERROR_MPS_RPC_FAILURE: CUresult = CUresult::Err(CUerror::MPS_RPC_FAILURE);
+ const ERROR_MPS_SERVER_NOT_READY: CUresult = CUresult::Err(
+ CUerror::MPS_SERVER_NOT_READY,
+ );
+ const ERROR_MPS_MAX_CLIENTS_REACHED: CUresult = CUresult::Err(
+ CUerror::MPS_MAX_CLIENTS_REACHED,
+ );
+ const ERROR_MPS_MAX_CONNECTIONS_REACHED: CUresult = CUresult::Err(
+ CUerror::MPS_MAX_CONNECTIONS_REACHED,
+ );
+ const ERROR_MPS_CLIENT_TERMINATED: CUresult = CUresult::Err(
+ CUerror::MPS_CLIENT_TERMINATED,
+ );
+ const ERROR_CDP_NOT_SUPPORTED: CUresult = CUresult::Err(CUerror::CDP_NOT_SUPPORTED);
+ const ERROR_CDP_VERSION_MISMATCH: CUresult = CUresult::Err(
+ CUerror::CDP_VERSION_MISMATCH,
+ );
+ const ERROR_STREAM_CAPTURE_UNSUPPORTED: CUresult = CUresult::Err(
+ CUerror::STREAM_CAPTURE_UNSUPPORTED,
+ );
+ const ERROR_STREAM_CAPTURE_INVALIDATED: CUresult = CUresult::Err(
+ CUerror::STREAM_CAPTURE_INVALIDATED,
+ );
+ const ERROR_STREAM_CAPTURE_MERGE: CUresult = CUresult::Err(
+ CUerror::STREAM_CAPTURE_MERGE,
+ );
+ const ERROR_STREAM_CAPTURE_UNMATCHED: CUresult = CUresult::Err(
+ CUerror::STREAM_CAPTURE_UNMATCHED,
+ );
+ const ERROR_STREAM_CAPTURE_UNJOINED: CUresult = CUresult::Err(
+ CUerror::STREAM_CAPTURE_UNJOINED,
+ );
+ const ERROR_STREAM_CAPTURE_ISOLATION: CUresult = CUresult::Err(
+ CUerror::STREAM_CAPTURE_ISOLATION,
+ );
+ const ERROR_STREAM_CAPTURE_IMPLICIT: CUresult = CUresult::Err(
+ CUerror::STREAM_CAPTURE_IMPLICIT,
+ );
+ const ERROR_CAPTURED_EVENT: CUresult = CUresult::Err(CUerror::CAPTURED_EVENT);
+ const ERROR_STREAM_CAPTURE_WRONG_THREAD: CUresult = CUresult::Err(
+ CUerror::STREAM_CAPTURE_WRONG_THREAD,
+ );
+ const ERROR_TIMEOUT: CUresult = CUresult::Err(CUerror::TIMEOUT);
+ const ERROR_GRAPH_EXEC_UPDATE_FAILURE: CUresult = CUresult::Err(
+ CUerror::GRAPH_EXEC_UPDATE_FAILURE,
+ );
+ const ERROR_EXTERNAL_DEVICE: CUresult = CUresult::Err(CUerror::EXTERNAL_DEVICE);
+ const ERROR_INVALID_CLUSTER_SIZE: CUresult = CUresult::Err(
+ CUerror::INVALID_CLUSTER_SIZE,
+ );
+ const ERROR_FUNCTION_NOT_LOADED: CUresult = CUresult::Err(
+ CUerror::FUNCTION_NOT_LOADED,
+ );
+ const ERROR_INVALID_RESOURCE_TYPE: CUresult = CUresult::Err(
+ CUerror::INVALID_RESOURCE_TYPE,
+ );
+ const ERROR_INVALID_RESOURCE_CONFIGURATION: CUresult = CUresult::Err(
+ CUerror::INVALID_RESOURCE_CONFIGURATION,
+ );
+ const ERROR_UNKNOWN: CUresult = CUresult::Err(CUerror::UNKNOWN);
+}
+impl CUresultConsts for CUresult {}
+#[must_use]
+pub type CUresult = ::core::result::Result<(), CUerror>;
+const _: fn() = || {
+ let _ = std::mem::transmute::<CUresult, u32>;
+};
+impl From<hip_runtime_sys::hipErrorCode_t> for CUerror {
+ fn from(error: hip_runtime_sys::hipErrorCode_t) -> Self {
+ Self(error.0)
+ }
+}
+unsafe impl Send for CUdeviceptr {}
+unsafe impl Sync for CUdeviceptr {}
+unsafe impl Send for CUcontext {}
+unsafe impl Sync for CUcontext {}
+unsafe impl Send for CUstream {}
+unsafe impl Sync for CUstream {}
+unsafe impl Send for CUmodule {}
+unsafe impl Sync for CUmodule {}
+unsafe impl Send for CUfunction {}
+unsafe impl Sync for CUfunction {}
+unsafe impl Send for CUlibrary {}
+unsafe impl Sync for CUlibrary {}
diff --git a/cuda_types/src/lib.rs b/cuda_types/src/lib.rs
index 2c2716a..cd8ce24 100644
--- a/cuda_types/src/lib.rs
+++ b/cuda_types/src/lib.rs
@@ -1,8110 +1,2 @@
-// Generated automatically by zluda_bindgen
-// DO NOT EDIT MANUALLY
-#![allow(warnings)]
-pub const CUDA_VERSION: u32 = 12040;
-pub const CU_IPC_HANDLE_SIZE: u32 = 64;
-pub const CU_COMPUTE_ACCELERATED_TARGET_BASE: u32 = 65536;
-pub const CU_GRAPH_COND_ASSIGN_DEFAULT: u32 = 1;
-pub const CU_GRAPH_KERNEL_NODE_PORT_DEFAULT: u32 = 0;
-pub const CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC: u32 = 1;
-pub const CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER: u32 = 2;
-pub const CU_MEMHOSTALLOC_PORTABLE: u32 = 1;
-pub const CU_MEMHOSTALLOC_DEVICEMAP: u32 = 2;
-pub const CU_MEMHOSTALLOC_WRITECOMBINED: u32 = 4;
-pub const CU_MEMHOSTREGISTER_PORTABLE: u32 = 1;
-pub const CU_MEMHOSTREGISTER_DEVICEMAP: u32 = 2;
-pub const CU_MEMHOSTREGISTER_IOMEMORY: u32 = 4;
-pub const CU_MEMHOSTREGISTER_READ_ONLY: u32 = 8;
-pub const CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL: u32 = 1;
-pub const CU_TENSOR_MAP_NUM_QWORDS: u32 = 16;
-pub const CUDA_EXTERNAL_MEMORY_DEDICATED: u32 = 1;
-pub const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC: u32 = 1;
-pub const CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC: u32 = 2;
-pub const CUDA_NVSCISYNC_ATTR_SIGNAL: u32 = 1;
-pub const CUDA_NVSCISYNC_ATTR_WAIT: u32 = 2;
-pub const CU_MEM_CREATE_USAGE_TILE_POOL: u32 = 1;
-pub const CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC: u32 = 1;
-pub const CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC: u32 = 2;
-pub const CUDA_ARRAY3D_LAYERED: u32 = 1;
-pub const CUDA_ARRAY3D_2DARRAY: u32 = 1;
-pub const CUDA_ARRAY3D_SURFACE_LDST: u32 = 2;
-pub const CUDA_ARRAY3D_CUBEMAP: u32 = 4;
-pub const CUDA_ARRAY3D_TEXTURE_GATHER: u32 = 8;
-pub const CUDA_ARRAY3D_DEPTH_TEXTURE: u32 = 16;
-pub const CUDA_ARRAY3D_COLOR_ATTACHMENT: u32 = 32;
-pub const CUDA_ARRAY3D_SPARSE: u32 = 64;
-pub const CUDA_ARRAY3D_DEFERRED_MAPPING: u32 = 128;
-pub const CU_TRSA_OVERRIDE_FORMAT: u32 = 1;
-pub const CU_TRSF_READ_AS_INTEGER: u32 = 1;
-pub const CU_TRSF_NORMALIZED_COORDINATES: u32 = 2;
-pub const CU_TRSF_SRGB: u32 = 16;
-pub const CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION: u32 = 32;
-pub const CU_TRSF_SEAMLESS_CUBEMAP: u32 = 64;
-pub const CU_LAUNCH_PARAM_END_AS_INT: u32 = 0;
-pub const CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT: u32 = 1;
-pub const CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT: u32 = 2;
-pub const CU_PARAM_TR_DEFAULT: i32 = -1;
-pub const CUDA_EGL_INFINITE_TIMEOUT: u32 = 4294967295;
-pub type cuuint32_t = u32;
-pub type cuuint64_t = u64;
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUdeviceptr_v2(pub *mut ::core::ffi::c_void);
-pub type CUdeviceptr = CUdeviceptr_v2;
-pub type CUdevice_v1 = ::core::ffi::c_int;
-pub type CUdevice = CUdevice_v1;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUctx_st {
- _unused: [u8; 0],
-}
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUcontext(pub *mut CUctx_st);
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUmod_st {
- _unused: [u8; 0],
-}
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmodule(pub *mut CUmod_st);
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUfunc_st {
- _unused: [u8; 0],
-}
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUfunction(pub *mut CUfunc_st);
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUlib_st {
- _unused: [u8; 0],
-}
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUlibrary(pub *mut CUlib_st);
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUkern_st {
- _unused: [u8; 0],
-}
-pub type CUkernel = *mut CUkern_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUarray_st {
- _unused: [u8; 0],
-}
-pub type CUarray = *mut CUarray_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUmipmappedArray_st {
- _unused: [u8; 0],
-}
-pub type CUmipmappedArray = *mut CUmipmappedArray_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUtexref_st {
- _unused: [u8; 0],
-}
-pub type CUtexref = *mut CUtexref_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUsurfref_st {
- _unused: [u8; 0],
-}
-pub type CUsurfref = *mut CUsurfref_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUevent_st {
- _unused: [u8; 0],
-}
-pub type CUevent = *mut CUevent_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUstream_st {
- _unused: [u8; 0],
-}
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUstream(pub *mut CUstream_st);
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUgraphicsResource_st {
- _unused: [u8; 0],
-}
-pub type CUgraphicsResource = *mut CUgraphicsResource_st;
-pub type CUtexObject_v1 = ::core::ffi::c_ulonglong;
-pub type CUtexObject = CUtexObject_v1;
-pub type CUsurfObject_v1 = ::core::ffi::c_ulonglong;
-pub type CUsurfObject = CUsurfObject_v1;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUextMemory_st {
- _unused: [u8; 0],
-}
-pub type CUexternalMemory = *mut CUextMemory_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUextSemaphore_st {
- _unused: [u8; 0],
-}
-pub type CUexternalSemaphore = *mut CUextSemaphore_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUgraph_st {
- _unused: [u8; 0],
-}
-pub type CUgraph = *mut CUgraph_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUgraphNode_st {
- _unused: [u8; 0],
-}
-pub type CUgraphNode = *mut CUgraphNode_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUgraphExec_st {
- _unused: [u8; 0],
-}
-pub type CUgraphExec = *mut CUgraphExec_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUmemPoolHandle_st {
- _unused: [u8; 0],
-}
-pub type CUmemoryPool = *mut CUmemPoolHandle_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUuserObject_st {
- _unused: [u8; 0],
-}
-pub type CUuserObject = *mut CUuserObject_st;
-pub type CUgraphConditionalHandle = cuuint64_t;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUgraphDeviceUpdatableNode_st {
- _unused: [u8; 0],
-}
-pub type CUgraphDeviceNode = *mut CUgraphDeviceUpdatableNode_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUasyncCallbackEntry_st {
- _unused: [u8; 0],
-}
-pub type CUasyncCallbackHandle = *mut CUasyncCallbackEntry_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUuuid_st {
- pub bytes: [::core::ffi::c_uchar; 16usize],
-}
-pub type CUuuid = CUuuid_st;
-/** Fabric handle - An opaque handle representing a memory allocation
- that can be exported to processes in same or different nodes. For IPC
- between processes on different nodes they must be connected via the
- NVSwitch fabric.*/
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemFabricHandle_st {
- pub data: [::core::ffi::c_uchar; 64usize],
-}
-/** Fabric handle - An opaque handle representing a memory allocation
- that can be exported to processes in same or different nodes. For IPC
- between processes on different nodes they must be connected via the
- NVSwitch fabric.*/
-pub type CUmemFabricHandle_v1 = CUmemFabricHandle_st;
-/** Fabric handle - An opaque handle representing a memory allocation
- that can be exported to processes in same or different nodes. For IPC
- between processes on different nodes they must be connected via the
- NVSwitch fabric.*/
-pub type CUmemFabricHandle = CUmemFabricHandle_v1;
-/// CUDA IPC event handle
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUipcEventHandle_st {
- pub reserved: [::core::ffi::c_char; 64usize],
-}
-/// CUDA IPC event handle
-pub type CUipcEventHandle_v1 = CUipcEventHandle_st;
-/// CUDA IPC event handle
-pub type CUipcEventHandle = CUipcEventHandle_v1;
-/// CUDA IPC mem handle
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUipcMemHandle_st {
- pub reserved: [::core::ffi::c_char; 64usize],
-}
-/// CUDA IPC mem handle
-pub type CUipcMemHandle_v1 = CUipcMemHandle_st;
-/// CUDA IPC mem handle
-pub type CUipcMemHandle = CUipcMemHandle_v1;
-impl CUipcMem_flags_enum {
- ///< Automatically enable peer access between remote devices as needed
- pub const CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS: CUipcMem_flags_enum = CUipcMem_flags_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// CUDA Ipc Mem Flags
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUipcMem_flags_enum(pub ::core::ffi::c_uint);
-/// CUDA Ipc Mem Flags
-pub use self::CUipcMem_flags_enum as CUipcMem_flags;
-impl CUmemAttach_flags_enum {
- ///< Memory can be accessed by any stream on any device
- pub const CU_MEM_ATTACH_GLOBAL: CUmemAttach_flags_enum = CUmemAttach_flags_enum(1);
-}
-impl CUmemAttach_flags_enum {
- ///< Memory cannot be accessed by any stream on any device
- pub const CU_MEM_ATTACH_HOST: CUmemAttach_flags_enum = CUmemAttach_flags_enum(2);
-}
-impl CUmemAttach_flags_enum {
- ///< Memory can only be accessed by a single stream on the associated device
- pub const CU_MEM_ATTACH_SINGLE: CUmemAttach_flags_enum = CUmemAttach_flags_enum(4);
-}
-#[repr(transparent)]
-/// CUDA Mem Attach Flags
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemAttach_flags_enum(pub ::core::ffi::c_uint);
-/// CUDA Mem Attach Flags
-pub use self::CUmemAttach_flags_enum as CUmemAttach_flags;
-impl CUctx_flags_enum {
- ///< Automatic scheduling
- pub const CU_CTX_SCHED_AUTO: CUctx_flags_enum = CUctx_flags_enum(0);
-}
-impl CUctx_flags_enum {
- ///< Set spin as default scheduling
- pub const CU_CTX_SCHED_SPIN: CUctx_flags_enum = CUctx_flags_enum(1);
-}
-impl CUctx_flags_enum {
- ///< Set yield as default scheduling
- pub const CU_CTX_SCHED_YIELD: CUctx_flags_enum = CUctx_flags_enum(2);
-}
-impl CUctx_flags_enum {
- ///< Set blocking synchronization as default scheduling
- pub const CU_CTX_SCHED_BLOCKING_SYNC: CUctx_flags_enum = CUctx_flags_enum(4);
-}
-impl CUctx_flags_enum {
- /**< Set blocking synchronization as default scheduling
- \deprecated This flag was deprecated as of CUDA 4.0
- and was replaced with ::CU_CTX_SCHED_BLOCKING_SYNC.*/
- pub const CU_CTX_BLOCKING_SYNC: CUctx_flags_enum = CUctx_flags_enum(4);
-}
-impl CUctx_flags_enum {
- pub const CU_CTX_SCHED_MASK: CUctx_flags_enum = CUctx_flags_enum(7);
-}
-impl CUctx_flags_enum {
- /**< \deprecated This flag was deprecated as of CUDA 11.0
- and it no longer has any effect. All contexts
- as of CUDA 3.2 behave as though the flag is enabled.*/
- pub const CU_CTX_MAP_HOST: CUctx_flags_enum = CUctx_flags_enum(8);
-}
-impl CUctx_flags_enum {
- ///< Keep local memory allocation after launch
- pub const CU_CTX_LMEM_RESIZE_TO_MAX: CUctx_flags_enum = CUctx_flags_enum(16);
-}
-impl CUctx_flags_enum {
- ///< Trigger coredumps from exceptions in this context
- pub const CU_CTX_COREDUMP_ENABLE: CUctx_flags_enum = CUctx_flags_enum(32);
-}
-impl CUctx_flags_enum {
- ///< Enable user pipe to trigger coredumps in this context
- pub const CU_CTX_USER_COREDUMP_ENABLE: CUctx_flags_enum = CUctx_flags_enum(64);
-}
-impl CUctx_flags_enum {
- ///< Ensure synchronous memory operations on this context will synchronize
- pub const CU_CTX_SYNC_MEMOPS: CUctx_flags_enum = CUctx_flags_enum(128);
-}
-impl CUctx_flags_enum {
- pub const CU_CTX_FLAGS_MASK: CUctx_flags_enum = CUctx_flags_enum(255);
-}
-#[repr(transparent)]
-/// Context creation flags
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUctx_flags_enum(pub ::core::ffi::c_uint);
-/// Context creation flags
-pub use self::CUctx_flags_enum as CUctx_flags;
-impl CUevent_sched_flags_enum {
- ///< Automatic scheduling
- pub const CU_EVENT_SCHED_AUTO: CUevent_sched_flags_enum = CUevent_sched_flags_enum(
- 0,
- );
-}
-impl CUevent_sched_flags_enum {
- ///< Set spin as default scheduling
- pub const CU_EVENT_SCHED_SPIN: CUevent_sched_flags_enum = CUevent_sched_flags_enum(
- 1,
- );
-}
-impl CUevent_sched_flags_enum {
- ///< Set yield as default scheduling
- pub const CU_EVENT_SCHED_YIELD: CUevent_sched_flags_enum = CUevent_sched_flags_enum(
- 2,
- );
-}
-impl CUevent_sched_flags_enum {
- ///< Set blocking synchronization as default scheduling
- pub const CU_EVENT_SCHED_BLOCKING_SYNC: CUevent_sched_flags_enum = CUevent_sched_flags_enum(
- 4,
- );
-}
-#[repr(transparent)]
-/// Event sched flags
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUevent_sched_flags_enum(pub ::core::ffi::c_uint);
-/// Event sched flags
-pub use self::CUevent_sched_flags_enum as CUevent_sched_flags;
-impl CUstream_flags_enum {
- ///< Default stream flag
- pub const CU_STREAM_DEFAULT: CUstream_flags_enum = CUstream_flags_enum(0);
-}
-impl CUstream_flags_enum {
- ///< Stream does not synchronize with stream 0 (the NULL stream)
- pub const CU_STREAM_NON_BLOCKING: CUstream_flags_enum = CUstream_flags_enum(1);
-}
-#[repr(transparent)]
-/// Stream creation flags
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUstream_flags_enum(pub ::core::ffi::c_uint);
-/// Stream creation flags
-pub use self::CUstream_flags_enum as CUstream_flags;
-impl CUevent_flags_enum {
- ///< Default event flag
- pub const CU_EVENT_DEFAULT: CUevent_flags_enum = CUevent_flags_enum(0);
-}
-impl CUevent_flags_enum {
- ///< Event uses blocking synchronization
- pub const CU_EVENT_BLOCKING_SYNC: CUevent_flags_enum = CUevent_flags_enum(1);
-}
-impl CUevent_flags_enum {
- ///< Event will not record timing data
- pub const CU_EVENT_DISABLE_TIMING: CUevent_flags_enum = CUevent_flags_enum(2);
-}
-impl CUevent_flags_enum {
- ///< Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set
- pub const CU_EVENT_INTERPROCESS: CUevent_flags_enum = CUevent_flags_enum(4);
-}
-#[repr(transparent)]
-/// Event creation flags
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUevent_flags_enum(pub ::core::ffi::c_uint);
-/// Event creation flags
-pub use self::CUevent_flags_enum as CUevent_flags;
-impl CUevent_record_flags_enum {
- ///< Default event record flag
- pub const CU_EVENT_RECORD_DEFAULT: CUevent_record_flags_enum = CUevent_record_flags_enum(
- 0,
- );
-}
-impl CUevent_record_flags_enum {
- /**< When using stream capture, create an event record node
- instead of the default behavior. This flag is invalid
- when used outside of capture.*/
- pub const CU_EVENT_RECORD_EXTERNAL: CUevent_record_flags_enum = CUevent_record_flags_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Event record flags
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUevent_record_flags_enum(pub ::core::ffi::c_uint);
-/// Event record flags
-pub use self::CUevent_record_flags_enum as CUevent_record_flags;
-impl CUevent_wait_flags_enum {
- ///< Default event wait flag
- pub const CU_EVENT_WAIT_DEFAULT: CUevent_wait_flags_enum = CUevent_wait_flags_enum(
- 0,
- );
-}
-impl CUevent_wait_flags_enum {
- /**< When using stream capture, create an event wait node
- instead of the default behavior. This flag is invalid
- when used outside of capture.*/
- pub const CU_EVENT_WAIT_EXTERNAL: CUevent_wait_flags_enum = CUevent_wait_flags_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Event wait flags
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUevent_wait_flags_enum(pub ::core::ffi::c_uint);
-/// Event wait flags
-pub use self::CUevent_wait_flags_enum as CUevent_wait_flags;
-impl CUstreamWaitValue_flags_enum {
- /**< Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit
-values). Note this is a cyclic comparison which ignores wraparound.
-(Default behavior.)*/
- pub const CU_STREAM_WAIT_VALUE_GEQ: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum(
- 0,
- );
-}
-impl CUstreamWaitValue_flags_enum {
- ///< Wait until *addr == value.
- pub const CU_STREAM_WAIT_VALUE_EQ: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum(
- 1,
- );
-}
-impl CUstreamWaitValue_flags_enum {
- ///< Wait until (*addr & value) != 0.
- pub const CU_STREAM_WAIT_VALUE_AND: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum(
- 2,
- );
-}
-impl CUstreamWaitValue_flags_enum {
- /**< Wait until ~(*addr | value) != 0. Support for this operation can be
-queried with ::cuDeviceGetAttribute() and
-::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR.*/
- pub const CU_STREAM_WAIT_VALUE_NOR: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum(
- 3,
- );
-}
-impl CUstreamWaitValue_flags_enum {
- /**< Follow the wait operation with a flush of outstanding remote writes. This
-means that, if a remote write operation is guaranteed to have reached the
-device before the wait can be satisfied, that write is guaranteed to be
-visible to downstream device work. The device is permitted to reorder
-remote writes internally. For example, this flag would be required if
-two remote writes arrive in a defined order, the wait is satisfied by the
-second write, and downstream work needs to observe the first write.
-Support for this operation is restricted to selected platforms and can be
-queried with ::CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES.*/
- pub const CU_STREAM_WAIT_VALUE_FLUSH: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum(
- 1073741824,
- );
-}
-#[repr(transparent)]
-/// Flags for ::cuStreamWaitValue32 and ::cuStreamWaitValue64
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUstreamWaitValue_flags_enum(pub ::core::ffi::c_uint);
-/// Flags for ::cuStreamWaitValue32 and ::cuStreamWaitValue64
-pub use self::CUstreamWaitValue_flags_enum as CUstreamWaitValue_flags;
-impl CUstreamWriteValue_flags_enum {
- ///< Default behavior
- pub const CU_STREAM_WRITE_VALUE_DEFAULT: CUstreamWriteValue_flags_enum = CUstreamWriteValue_flags_enum(
- 0,
- );
-}
-impl CUstreamWriteValue_flags_enum {
- /**< Permits the write to be reordered with writes which were issued
-before it, as a performance optimization. Normally,
-::cuStreamWriteValue32 will provide a memory fence before the
-write, which has similar semantics to
-__threadfence_system() but is scoped to the stream
-rather than a CUDA thread.
-This flag is not supported in the v2 API.*/
- pub const CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER: CUstreamWriteValue_flags_enum = CUstreamWriteValue_flags_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Flags for ::cuStreamWriteValue32
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUstreamWriteValue_flags_enum(pub ::core::ffi::c_uint);
-/// Flags for ::cuStreamWriteValue32
-pub use self::CUstreamWriteValue_flags_enum as CUstreamWriteValue_flags;
-impl CUstreamBatchMemOpType_enum {
- ///< Represents a ::cuStreamWaitValue32 operation
- pub const CU_STREAM_MEM_OP_WAIT_VALUE_32: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum(
- 1,
- );
-}
-impl CUstreamBatchMemOpType_enum {
- ///< Represents a ::cuStreamWriteValue32 operation
- pub const CU_STREAM_MEM_OP_WRITE_VALUE_32: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum(
- 2,
- );
-}
-impl CUstreamBatchMemOpType_enum {
- ///< Represents a ::cuStreamWaitValue64 operation
- pub const CU_STREAM_MEM_OP_WAIT_VALUE_64: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum(
- 4,
- );
-}
-impl CUstreamBatchMemOpType_enum {
- ///< Represents a ::cuStreamWriteValue64 operation
- pub const CU_STREAM_MEM_OP_WRITE_VALUE_64: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum(
- 5,
- );
-}
-impl CUstreamBatchMemOpType_enum {
- ///< Insert a memory barrier of the specified type
- pub const CU_STREAM_MEM_OP_BARRIER: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum(
- 6,
- );
-}
-impl CUstreamBatchMemOpType_enum {
- /**< This has the same effect as ::CU_STREAM_WAIT_VALUE_FLUSH, but as a
-standalone operation.*/
- pub const CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum(
- 3,
- );
-}
-#[repr(transparent)]
-/// Operations for ::cuStreamBatchMemOp
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUstreamBatchMemOpType_enum(pub ::core::ffi::c_uint);
-/// Operations for ::cuStreamBatchMemOp
-pub use self::CUstreamBatchMemOpType_enum as CUstreamBatchMemOpType;
-impl CUstreamMemoryBarrier_flags_enum {
- ///< System-wide memory barrier.
- pub const CU_STREAM_MEMORY_BARRIER_TYPE_SYS: CUstreamMemoryBarrier_flags_enum = CUstreamMemoryBarrier_flags_enum(
- 0,
- );
-}
-impl CUstreamMemoryBarrier_flags_enum {
- ///< Limit memory barrier scope to the GPU.
- pub const CU_STREAM_MEMORY_BARRIER_TYPE_GPU: CUstreamMemoryBarrier_flags_enum = CUstreamMemoryBarrier_flags_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Flags for ::cuStreamMemoryBarrier
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUstreamMemoryBarrier_flags_enum(pub ::core::ffi::c_uint);
-/// Flags for ::cuStreamMemoryBarrier
-pub use self::CUstreamMemoryBarrier_flags_enum as CUstreamMemoryBarrier_flags;
-/// Per-operation parameters for ::cuStreamBatchMemOp
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUstreamBatchMemOpParams_union {
- pub operation: CUstreamBatchMemOpType,
- pub waitValue: CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st,
- pub writeValue: CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st,
- pub flushRemoteWrites: CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st,
- pub memoryBarrier: CUstreamBatchMemOpParams_union_CUstreamMemOpMemoryBarrierParams_st,
- pub pad: [cuuint64_t; 6usize],
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st {
- pub operation: CUstreamBatchMemOpType,
- pub address: CUdeviceptr,
- pub __bindgen_anon_1: CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1,
- pub flags: ::core::ffi::c_uint,
- ///< For driver internal use. Initial value is unimportant.
- pub alias: CUdeviceptr,
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1 {
- pub value: cuuint32_t,
- pub value64: cuuint64_t,
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st {
- pub operation: CUstreamBatchMemOpType,
- pub address: CUdeviceptr,
- pub __bindgen_anon_1: CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1,
- pub flags: ::core::ffi::c_uint,
- ///< For driver internal use. Initial value is unimportant.
- pub alias: CUdeviceptr,
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1 {
- pub value: cuuint32_t,
- pub value64: cuuint64_t,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st {
- pub operation: CUstreamBatchMemOpType,
- pub flags: ::core::ffi::c_uint,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpMemoryBarrierParams_st {
- pub operation: CUstreamBatchMemOpType,
- pub flags: ::core::ffi::c_uint,
-}
-/// Per-operation parameters for ::cuStreamBatchMemOp
-pub type CUstreamBatchMemOpParams_v1 = CUstreamBatchMemOpParams_union;
-/// Per-operation parameters for ::cuStreamBatchMemOp
-pub type CUstreamBatchMemOpParams = CUstreamBatchMemOpParams_v1;
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st {
- pub ctx: CUcontext,
- pub count: ::core::ffi::c_uint,
- pub paramArray: *mut CUstreamBatchMemOpParams,
- pub flags: ::core::ffi::c_uint,
-}
-pub type CUDA_BATCH_MEM_OP_NODE_PARAMS_v1 = CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st;
-pub type CUDA_BATCH_MEM_OP_NODE_PARAMS = CUDA_BATCH_MEM_OP_NODE_PARAMS_v1;
-/// Batch memory operation node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st {
- ///< Context to use for the operations.
- pub ctx: CUcontext,
- ///< Number of operations in paramArray.
- pub count: ::core::ffi::c_uint,
- ///< Array of batch memory operations.
- pub paramArray: *mut CUstreamBatchMemOpParams,
- ///< Flags to control the node.
- pub flags: ::core::ffi::c_uint,
-}
-/// Batch memory operation node parameters
-pub type CUDA_BATCH_MEM_OP_NODE_PARAMS_v2 = CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st;
-impl CUoccupancy_flags_enum {
- ///< Default behavior
- pub const CU_OCCUPANCY_DEFAULT: CUoccupancy_flags_enum = CUoccupancy_flags_enum(0);
-}
-impl CUoccupancy_flags_enum {
- ///< Assume global caching is enabled and cannot be automatically turned off
- pub const CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE: CUoccupancy_flags_enum = CUoccupancy_flags_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Occupancy calculator flag
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUoccupancy_flags_enum(pub ::core::ffi::c_uint);
-/// Occupancy calculator flag
-pub use self::CUoccupancy_flags_enum as CUoccupancy_flags;
-impl CUstreamUpdateCaptureDependencies_flags_enum {
- ///< Add new nodes to the dependency set
- pub const CU_STREAM_ADD_CAPTURE_DEPENDENCIES: CUstreamUpdateCaptureDependencies_flags_enum = CUstreamUpdateCaptureDependencies_flags_enum(
- 0,
- );
-}
-impl CUstreamUpdateCaptureDependencies_flags_enum {
- ///< Replace the dependency set with the new nodes
- pub const CU_STREAM_SET_CAPTURE_DEPENDENCIES: CUstreamUpdateCaptureDependencies_flags_enum = CUstreamUpdateCaptureDependencies_flags_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Flags for ::cuStreamUpdateCaptureDependencies
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUstreamUpdateCaptureDependencies_flags_enum(pub ::core::ffi::c_uint);
-/// Flags for ::cuStreamUpdateCaptureDependencies
-pub use self::CUstreamUpdateCaptureDependencies_flags_enum as CUstreamUpdateCaptureDependencies_flags;
-impl CUasyncNotificationType_enum {
- pub const CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET: CUasyncNotificationType_enum = CUasyncNotificationType_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Types of async notification that can be sent
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUasyncNotificationType_enum(pub ::core::ffi::c_uint);
-/// Types of async notification that can be sent
-pub use self::CUasyncNotificationType_enum as CUasyncNotificationType;
-/// Information passed to the user via the async notification callback
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUasyncNotificationInfo_st {
- pub type_: CUasyncNotificationType,
- pub info: CUasyncNotificationInfo_st__bindgen_ty_1,
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUasyncNotificationInfo_st__bindgen_ty_1 {
- pub overBudget: CUasyncNotificationInfo_st__bindgen_ty_1__bindgen_ty_1,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUasyncNotificationInfo_st__bindgen_ty_1__bindgen_ty_1 {
- pub bytesOverBudget: ::core::ffi::c_ulonglong,
-}
-/// Information passed to the user via the async notification callback
-pub type CUasyncNotificationInfo = CUasyncNotificationInfo_st;
-/** CUDA async notification callback
- \param info Information describing what actions to take as a result of this trim notification.
- \param userData Pointer to user defined data provided at registration.
- \param callback The callback handle associated with this specific callback.*/
-pub type CUasyncCallback = ::core::option::Option<
- unsafe extern "system" fn(
- info: *mut CUasyncNotificationInfo,
- userData: *mut ::core::ffi::c_void,
- callback: CUasyncCallbackHandle,
- ),
->;
-impl CUarray_format_enum {
- ///< Unsigned 8-bit integers
- pub const CU_AD_FORMAT_UNSIGNED_INT8: CUarray_format_enum = CUarray_format_enum(1);
-}
-impl CUarray_format_enum {
- ///< Unsigned 16-bit integers
- pub const CU_AD_FORMAT_UNSIGNED_INT16: CUarray_format_enum = CUarray_format_enum(2);
-}
-impl CUarray_format_enum {
- ///< Unsigned 32-bit integers
- pub const CU_AD_FORMAT_UNSIGNED_INT32: CUarray_format_enum = CUarray_format_enum(3);
-}
-impl CUarray_format_enum {
- ///< Signed 8-bit integers
- pub const CU_AD_FORMAT_SIGNED_INT8: CUarray_format_enum = CUarray_format_enum(8);
-}
-impl CUarray_format_enum {
- ///< Signed 16-bit integers
- pub const CU_AD_FORMAT_SIGNED_INT16: CUarray_format_enum = CUarray_format_enum(9);
-}
-impl CUarray_format_enum {
- ///< Signed 32-bit integers
- pub const CU_AD_FORMAT_SIGNED_INT32: CUarray_format_enum = CUarray_format_enum(10);
-}
-impl CUarray_format_enum {
- ///< 16-bit floating point
- pub const CU_AD_FORMAT_HALF: CUarray_format_enum = CUarray_format_enum(16);
-}
-impl CUarray_format_enum {
- ///< 32-bit floating point
- pub const CU_AD_FORMAT_FLOAT: CUarray_format_enum = CUarray_format_enum(32);
-}
-impl CUarray_format_enum {
- ///< 8-bit YUV planar format, with 4:2:0 sampling
- pub const CU_AD_FORMAT_NV12: CUarray_format_enum = CUarray_format_enum(176);
-}
-impl CUarray_format_enum {
- ///< 1 channel unsigned 8-bit normalized integer
- pub const CU_AD_FORMAT_UNORM_INT8X1: CUarray_format_enum = CUarray_format_enum(192);
-}
-impl CUarray_format_enum {
- ///< 2 channel unsigned 8-bit normalized integer
- pub const CU_AD_FORMAT_UNORM_INT8X2: CUarray_format_enum = CUarray_format_enum(193);
-}
-impl CUarray_format_enum {
- ///< 4 channel unsigned 8-bit normalized integer
- pub const CU_AD_FORMAT_UNORM_INT8X4: CUarray_format_enum = CUarray_format_enum(194);
-}
-impl CUarray_format_enum {
- ///< 1 channel unsigned 16-bit normalized integer
- pub const CU_AD_FORMAT_UNORM_INT16X1: CUarray_format_enum = CUarray_format_enum(195);
-}
-impl CUarray_format_enum {
- ///< 2 channel unsigned 16-bit normalized integer
- pub const CU_AD_FORMAT_UNORM_INT16X2: CUarray_format_enum = CUarray_format_enum(196);
-}
-impl CUarray_format_enum {
- ///< 4 channel unsigned 16-bit normalized integer
- pub const CU_AD_FORMAT_UNORM_INT16X4: CUarray_format_enum = CUarray_format_enum(197);
-}
-impl CUarray_format_enum {
- ///< 1 channel signed 8-bit normalized integer
- pub const CU_AD_FORMAT_SNORM_INT8X1: CUarray_format_enum = CUarray_format_enum(198);
-}
-impl CUarray_format_enum {
- ///< 2 channel signed 8-bit normalized integer
- pub const CU_AD_FORMAT_SNORM_INT8X2: CUarray_format_enum = CUarray_format_enum(199);
-}
-impl CUarray_format_enum {
- ///< 4 channel signed 8-bit normalized integer
- pub const CU_AD_FORMAT_SNORM_INT8X4: CUarray_format_enum = CUarray_format_enum(200);
-}
-impl CUarray_format_enum {
- ///< 1 channel signed 16-bit normalized integer
- pub const CU_AD_FORMAT_SNORM_INT16X1: CUarray_format_enum = CUarray_format_enum(201);
-}
-impl CUarray_format_enum {
- ///< 2 channel signed 16-bit normalized integer
- pub const CU_AD_FORMAT_SNORM_INT16X2: CUarray_format_enum = CUarray_format_enum(202);
-}
-impl CUarray_format_enum {
- ///< 4 channel signed 16-bit normalized integer
- pub const CU_AD_FORMAT_SNORM_INT16X4: CUarray_format_enum = CUarray_format_enum(203);
-}
-impl CUarray_format_enum {
- ///< 4 channel unsigned normalized block-compressed (BC1 compression) format
- pub const CU_AD_FORMAT_BC1_UNORM: CUarray_format_enum = CUarray_format_enum(145);
-}
-impl CUarray_format_enum {
- ///< 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding
- pub const CU_AD_FORMAT_BC1_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum(
- 146,
- );
-}
-impl CUarray_format_enum {
- ///< 4 channel unsigned normalized block-compressed (BC2 compression) format
- pub const CU_AD_FORMAT_BC2_UNORM: CUarray_format_enum = CUarray_format_enum(147);
-}
-impl CUarray_format_enum {
- ///< 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding
- pub const CU_AD_FORMAT_BC2_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum(
- 148,
- );
-}
-impl CUarray_format_enum {
- ///< 4 channel unsigned normalized block-compressed (BC3 compression) format
- pub const CU_AD_FORMAT_BC3_UNORM: CUarray_format_enum = CUarray_format_enum(149);
-}
-impl CUarray_format_enum {
- ///< 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding
- pub const CU_AD_FORMAT_BC3_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum(
- 150,
- );
-}
-impl CUarray_format_enum {
- ///< 1 channel unsigned normalized block-compressed (BC4 compression) format
- pub const CU_AD_FORMAT_BC4_UNORM: CUarray_format_enum = CUarray_format_enum(151);
-}
-impl CUarray_format_enum {
- ///< 1 channel signed normalized block-compressed (BC4 compression) format
- pub const CU_AD_FORMAT_BC4_SNORM: CUarray_format_enum = CUarray_format_enum(152);
-}
-impl CUarray_format_enum {
- ///< 2 channel unsigned normalized block-compressed (BC5 compression) format
- pub const CU_AD_FORMAT_BC5_UNORM: CUarray_format_enum = CUarray_format_enum(153);
-}
-impl CUarray_format_enum {
- ///< 2 channel signed normalized block-compressed (BC5 compression) format
- pub const CU_AD_FORMAT_BC5_SNORM: CUarray_format_enum = CUarray_format_enum(154);
-}
-impl CUarray_format_enum {
- ///< 3 channel unsigned half-float block-compressed (BC6H compression) format
- pub const CU_AD_FORMAT_BC6H_UF16: CUarray_format_enum = CUarray_format_enum(155);
-}
-impl CUarray_format_enum {
- ///< 3 channel signed half-float block-compressed (BC6H compression) format
- pub const CU_AD_FORMAT_BC6H_SF16: CUarray_format_enum = CUarray_format_enum(156);
-}
-impl CUarray_format_enum {
- ///< 4 channel unsigned normalized block-compressed (BC7 compression) format
- pub const CU_AD_FORMAT_BC7_UNORM: CUarray_format_enum = CUarray_format_enum(157);
-}
-impl CUarray_format_enum {
- ///< 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding
- pub const CU_AD_FORMAT_BC7_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum(
- 158,
- );
-}
-#[repr(transparent)]
-/// Array formats
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUarray_format_enum(pub ::core::ffi::c_uint);
-/// Array formats
-pub use self::CUarray_format_enum as CUarray_format;
-impl CUaddress_mode_enum {
- ///< Wrapping address mode
- pub const CU_TR_ADDRESS_MODE_WRAP: CUaddress_mode_enum = CUaddress_mode_enum(0);
-}
-impl CUaddress_mode_enum {
- ///< Clamp to edge address mode
- pub const CU_TR_ADDRESS_MODE_CLAMP: CUaddress_mode_enum = CUaddress_mode_enum(1);
-}
-impl CUaddress_mode_enum {
- ///< Mirror address mode
- pub const CU_TR_ADDRESS_MODE_MIRROR: CUaddress_mode_enum = CUaddress_mode_enum(2);
-}
-impl CUaddress_mode_enum {
- ///< Border address mode
- pub const CU_TR_ADDRESS_MODE_BORDER: CUaddress_mode_enum = CUaddress_mode_enum(3);
-}
-#[repr(transparent)]
-/// Texture reference addressing modes
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUaddress_mode_enum(pub ::core::ffi::c_uint);
-/// Texture reference addressing modes
-pub use self::CUaddress_mode_enum as CUaddress_mode;
-impl CUfilter_mode_enum {
- ///< Point filter mode
- pub const CU_TR_FILTER_MODE_POINT: CUfilter_mode_enum = CUfilter_mode_enum(0);
-}
-impl CUfilter_mode_enum {
- ///< Linear filter mode
- pub const CU_TR_FILTER_MODE_LINEAR: CUfilter_mode_enum = CUfilter_mode_enum(1);
-}
-#[repr(transparent)]
-/// Texture reference filtering modes
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUfilter_mode_enum(pub ::core::ffi::c_uint);
-/// Texture reference filtering modes
-pub use self::CUfilter_mode_enum as CUfilter_mode;
-impl CUdevice_attribute_enum {
- ///< Maximum number of threads per block
- pub const CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 1,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum block dimension X
- pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 2,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum block dimension Y
- pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 3,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum block dimension Z
- pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 4,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum grid dimension X
- pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 5,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum grid dimension Y
- pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 6,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum grid dimension Z
- pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 7,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum shared memory available per block in bytes
- pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 8,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK
- pub const CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 8,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Memory available on device for __constant__ variables in a CUDA C kernel in bytes
- pub const CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 9,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Warp size in threads
- pub const CU_DEVICE_ATTRIBUTE_WARP_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 10,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum pitch in bytes allowed by memory copies
- pub const CU_DEVICE_ATTRIBUTE_MAX_PITCH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 11,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum number of 32-bit registers available per block
- pub const CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 12,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK
- pub const CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 12,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Typical clock frequency in kilohertz
- pub const CU_DEVICE_ATTRIBUTE_CLOCK_RATE: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 13,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Alignment requirement for textures
- pub const CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 14,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT.
- pub const CU_DEVICE_ATTRIBUTE_GPU_OVERLAP: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 15,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Number of multiprocessors on device
- pub const CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 16,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Specifies whether there is a run time limit on kernels
- pub const CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 17,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device is integrated with host memory
- pub const CU_DEVICE_ATTRIBUTE_INTEGRATED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 18,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device can map host memory into CUDA address space
- pub const CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 19,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Compute mode (See ::CUcomputemode for details)
- pub const CU_DEVICE_ATTRIBUTE_COMPUTE_MODE: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 20,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 1D texture width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 21,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D texture width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 22,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D texture height
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 23,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 3D texture width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 24,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 3D texture height
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 25,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 3D texture depth
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 26,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D layered texture width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 27,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D layered texture height
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 28,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum layers in a 2D layered texture
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 29,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 27,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 28,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 29,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Alignment requirement for surfaces
- pub const CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 30,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device can possibly execute multiple kernels concurrently
- pub const CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 31,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device has ECC support enabled
- pub const CU_DEVICE_ATTRIBUTE_ECC_ENABLED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 32,
- );
-}
-impl CUdevice_attribute_enum {
- ///< PCI bus ID of the device
- pub const CU_DEVICE_ATTRIBUTE_PCI_BUS_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 33,
- );
-}
-impl CUdevice_attribute_enum {
- ///< PCI device ID of the device
- pub const CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 34,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device is using TCC driver model
- pub const CU_DEVICE_ATTRIBUTE_TCC_DRIVER: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 35,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Peak memory clock frequency in kilohertz
- pub const CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 36,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Global memory bus width in bits
- pub const CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 37,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Size of L2 cache in bytes
- pub const CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 38,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum resident threads per multiprocessor
- pub const CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 39,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Number of asynchronous engines
- pub const CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 40,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device shares a unified address space with the host
- pub const CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 41,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 1D layered texture width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 42,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum layers in a 1D layered texture
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 43,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Deprecated, do not use.
- pub const CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 44,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 45,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 46,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Alternate maximum 3D texture width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 47,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Alternate maximum 3D texture height
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 48,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Alternate maximum 3D texture depth
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 49,
- );
-}
-impl CUdevice_attribute_enum {
- ///< PCI domain ID of the device
- pub const CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 50,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Pitch alignment requirement for textures
- pub const CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 51,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum cubemap texture width/height
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 52,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum cubemap layered texture width/height
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 53,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum layers in a cubemap layered texture
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 54,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 1D surface width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 55,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D surface width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 56,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D surface height
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 57,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 3D surface width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 58,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 3D surface height
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 59,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 3D surface depth
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 60,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 1D layered surface width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 61,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum layers in a 1D layered surface
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 62,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D layered surface width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 63,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D layered surface height
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 64,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum layers in a 2D layered surface
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 65,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum cubemap surface width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 66,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum cubemap layered surface width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 67,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum layers in a cubemap layered surface
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 68,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead.
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 69,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D linear texture width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 70,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D linear texture height
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 71,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum 2D linear texture pitch in bytes
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 72,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum mipmapped 2D texture width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 73,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum mipmapped 2D texture height
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 74,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Major compute capability version number
- pub const CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 75,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Minor compute capability version number
- pub const CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 76,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum mipmapped 1D texture width
- pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 77,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports stream priorities
- pub const CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 78,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports caching globals in L1
- pub const CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 79,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports caching locals in L1
- pub const CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 80,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum shared memory available per multiprocessor in bytes
- pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 81,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum number of 32-bit registers available per multiprocessor
- pub const CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 82,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device can allocate managed memory on this system
- pub const CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 83,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device is on a multi-GPU board
- pub const CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 84,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Unique id for a group of devices on the same multi-GPU board
- pub const CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 85,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware)
- pub const CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 86,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Ratio of single precision performance (in floating-point operations per second) to double precision performance
- pub const CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 87,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports coherently accessing pageable memory without calling cudaHostRegister on it
- pub const CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 88,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device can coherently access managed memory concurrently with the CPU
- pub const CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 89,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports compute preemption.
- pub const CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 90,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device can access host registered memory at the same virtual address as the CPU
- pub const CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 91,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Deprecated, along with v1 MemOps API, ::cuStreamBatchMemOp and related APIs are supported.
- pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 92,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Deprecated, along with v1 MemOps API, 64-bit operations are supported in ::cuStreamBatchMemOp and related APIs.
- pub const CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 93,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Deprecated, along with v1 MemOps API, ::CU_STREAM_WAIT_VALUE_NOR is supported.
- pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 94,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports launching cooperative kernels via ::cuLaunchCooperativeKernel
- pub const CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 95,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Deprecated, ::cuLaunchCooperativeKernelMultiDevice is deprecated.
- pub const CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 96,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum optin shared memory per block
- pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 97,
- );
-}
-impl CUdevice_attribute_enum {
- ///< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. See \ref CUDA_MEMOP for additional details.
- pub const CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 98,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports host memory registration via ::cudaHostRegister.
- pub const CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 99,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device accesses pageable memory via the host's page tables.
- pub const CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 100,
- );
-}
-impl CUdevice_attribute_enum {
- ///< The host can directly access managed memory on the device without migration.
- pub const CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 101,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED
- pub const CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 102,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports virtual memory management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs
- pub const CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 102,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
- pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 103,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
- pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 104,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
- pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 105,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum number of blocks per multiprocessor
- pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 106,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports compression of memory
- pub const CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 107,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum L2 persisting lines capacity setting in bytes.
- pub const CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 108,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Maximum value of CUaccessPolicyWindow::num_bytes.
- pub const CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 109,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports specifying the GPUDirect RDMA flag with ::cuMemCreate
- pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 110,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Shared memory reserved by CUDA driver per block in bytes
- pub const CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 111,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays
- pub const CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 112,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports using the ::cuMemHostRegister flag ::CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU
- pub const CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 113,
- );
-}
-impl CUdevice_attribute_enum {
- ///< External timeline semaphore interop is supported on the device
- pub const CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 114,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports using the ::cuMemAllocAsync and ::cuMemPool family of APIs
- pub const CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 115,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information)
- pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 116,
- );
-}
-impl CUdevice_attribute_enum {
- ///< The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the ::CUflushGPUDirectRDMAWritesOptions enum
- pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 117,
- );
-}
-impl CUdevice_attribute_enum {
- ///< GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See ::CUGPUDirectRDMAWritesOrdering for the numerical values returned here.
- pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 118,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Handle types supported with mempool based IPC
- pub const CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 119,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Indicates device supports cluster launch
- pub const CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 120,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays
- pub const CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 121,
- );
-}
-impl CUdevice_attribute_enum {
- ///< 64-bit operations are supported in ::cuStreamBatchMemOp and related MemOp APIs.
- pub const CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 122,
- );
-}
-impl CUdevice_attribute_enum {
- ///< ::CU_STREAM_WAIT_VALUE_NOR is supported by MemOp APIs.
- pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 123,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports buffer sharing with dma_buf mechanism.
- pub const CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 124,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports IPC Events.
- pub const CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 125,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Number of memory domains the device supports.
- pub const CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 126,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports accessing memory using Tensor Map.
- pub const CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 127,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports exporting memory to a fabric handle with cuMemExportToShareableHandle() or requested with cuMemCreate()
- pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 128,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports unified function pointers.
- pub const CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 129,
- );
-}
-impl CUdevice_attribute_enum {
- pub const CU_DEVICE_ATTRIBUTE_NUMA_CONFIG: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 130,
- );
-}
-impl CUdevice_attribute_enum {
- pub const CU_DEVICE_ATTRIBUTE_NUMA_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 131,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Device supports switch multicast and reduction operations.
- pub const CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 132,
- );
-}
-impl CUdevice_attribute_enum {
- ///< Indicates if contexts created on this device will be shared via MPS
- pub const CU_DEVICE_ATTRIBUTE_MPS_ENABLED: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 133,
- );
-}
-impl CUdevice_attribute_enum {
- ///< NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA.
- pub const CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 134,
- );
-}
-impl CUdevice_attribute_enum {
- pub const CU_DEVICE_ATTRIBUTE_MAX: CUdevice_attribute_enum = CUdevice_attribute_enum(
- 135,
- );
-}
-#[repr(transparent)]
-/// Device properties
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUdevice_attribute_enum(pub ::core::ffi::c_uint);
-/// Device properties
-pub use self::CUdevice_attribute_enum as CUdevice_attribute;
-/// Legacy device properties
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUdevprop_st {
- ///< Maximum number of threads per block
- pub maxThreadsPerBlock: ::core::ffi::c_int,
- ///< Maximum size of each dimension of a block
- pub maxThreadsDim: [::core::ffi::c_int; 3usize],
- ///< Maximum size of each dimension of a grid
- pub maxGridSize: [::core::ffi::c_int; 3usize],
- ///< Shared memory available per block in bytes
- pub sharedMemPerBlock: ::core::ffi::c_int,
- ///< Constant memory available on device in bytes
- pub totalConstantMemory: ::core::ffi::c_int,
- ///< Warp size in threads
- pub SIMDWidth: ::core::ffi::c_int,
- ///< Maximum pitch in bytes allowed by memory copies
- pub memPitch: ::core::ffi::c_int,
- ///< 32-bit registers available per block
- pub regsPerBlock: ::core::ffi::c_int,
- ///< Clock frequency in kilohertz
- pub clockRate: ::core::ffi::c_int,
- ///< Alignment requirement for textures
- pub textureAlign: ::core::ffi::c_int,
-}
-/// Legacy device properties
-pub type CUdevprop_v1 = CUdevprop_st;
-/// Legacy device properties
-pub type CUdevprop = CUdevprop_v1;
-impl CUpointer_attribute_enum {
- ///< The ::CUcontext on which a pointer was allocated or registered
- pub const CU_POINTER_ATTRIBUTE_CONTEXT: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 1,
- );
-}
-impl CUpointer_attribute_enum {
- ///< The ::CUmemorytype describing the physical location of a pointer
- pub const CU_POINTER_ATTRIBUTE_MEMORY_TYPE: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 2,
- );
-}
-impl CUpointer_attribute_enum {
- ///< The address at which a pointer's memory may be accessed on the device
- pub const CU_POINTER_ATTRIBUTE_DEVICE_POINTER: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 3,
- );
-}
-impl CUpointer_attribute_enum {
- ///< The address at which a pointer's memory may be accessed on the host
- pub const CU_POINTER_ATTRIBUTE_HOST_POINTER: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 4,
- );
-}
-impl CUpointer_attribute_enum {
- ///< A pair of tokens for use with the nv-p2p.h Linux kernel interface
- pub const CU_POINTER_ATTRIBUTE_P2P_TOKENS: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 5,
- );
-}
-impl CUpointer_attribute_enum {
- ///< Synchronize every synchronous memory operation initiated on this region
- pub const CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 6,
- );
-}
-impl CUpointer_attribute_enum {
- ///< A process-wide unique ID for an allocated memory region
- pub const CU_POINTER_ATTRIBUTE_BUFFER_ID: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 7,
- );
-}
-impl CUpointer_attribute_enum {
- ///< Indicates if the pointer points to managed memory
- pub const CU_POINTER_ATTRIBUTE_IS_MANAGED: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 8,
- );
-}
-impl CUpointer_attribute_enum {
- ///< A device ordinal of a device on which a pointer was allocated or registered
- pub const CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 9,
- );
-}
-impl CUpointer_attribute_enum {
- ///< 1 if this pointer maps to an allocation that is suitable for ::cudaIpcGetMemHandle, 0 otherwise
- pub const CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 10,
- );
-}
-impl CUpointer_attribute_enum {
- ///< Starting address for this requested pointer
- pub const CU_POINTER_ATTRIBUTE_RANGE_START_ADDR: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 11,
- );
-}
-impl CUpointer_attribute_enum {
- ///< Size of the address range for this requested pointer
- pub const CU_POINTER_ATTRIBUTE_RANGE_SIZE: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 12,
- );
-}
-impl CUpointer_attribute_enum {
- ///< 1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise
- pub const CU_POINTER_ATTRIBUTE_MAPPED: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 13,
- );
-}
-impl CUpointer_attribute_enum {
- ///< Bitmask of allowed ::CUmemAllocationHandleType for this allocation
- pub const CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 14,
- );
-}
-impl CUpointer_attribute_enum {
- ///< 1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API
- pub const CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 15,
- );
-}
-impl CUpointer_attribute_enum {
- ///< Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given
- pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAGS: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 16,
- );
-}
-impl CUpointer_attribute_enum {
- ///< Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL.
- pub const CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 17,
- );
-}
-impl CUpointer_attribute_enum {
- ///< Size of the actual underlying mapping that the pointer belongs to
- pub const CU_POINTER_ATTRIBUTE_MAPPING_SIZE: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 18,
- );
-}
-impl CUpointer_attribute_enum {
- ///< The start address of the mapping that the pointer belongs to
- pub const CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 19,
- );
-}
-impl CUpointer_attribute_enum {
- ///< A process-wide unique id corresponding to the physical allocation the pointer belongs to
- pub const CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID: CUpointer_attribute_enum = CUpointer_attribute_enum(
- 20,
- );
-}
-#[repr(transparent)]
-/// Pointer information
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUpointer_attribute_enum(pub ::core::ffi::c_uint);
-/// Pointer information
-pub use self::CUpointer_attribute_enum as CUpointer_attribute;
-impl CUfunction_attribute_enum {
- /** The maximum number of threads per block, beyond which a launch of the
- function would fail. This number depends on both the function and the
- device on which the function is currently loaded.*/
- pub const CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 0,
- );
-}
-impl CUfunction_attribute_enum {
- /** The size in bytes of statically-allocated shared memory required by
- this function. This does not include dynamically-allocated shared
- memory requested by the user at runtime.*/
- pub const CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 1,
- );
-}
-impl CUfunction_attribute_enum {
- /** The size in bytes of user-allocated constant memory required by this
- function.*/
- pub const CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 2,
- );
-}
-impl CUfunction_attribute_enum {
- /// The size in bytes of local memory used by each thread of this function.
- pub const CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 3,
- );
-}
-impl CUfunction_attribute_enum {
- /// The number of registers used by each thread of this function.
- pub const CU_FUNC_ATTRIBUTE_NUM_REGS: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 4,
- );
-}
-impl CUfunction_attribute_enum {
- /** The PTX virtual architecture version for which the function was
- compiled. This value is the major PTX version * 10 + the minor PTX
- version, so a PTX version 1.3 function would return the value 13.
- Note that this may return the undefined value of 0 for cubins
- compiled prior to CUDA 3.0.*/
- pub const CU_FUNC_ATTRIBUTE_PTX_VERSION: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 5,
- );
-}
-impl CUfunction_attribute_enum {
- /** The binary architecture version for which the function was compiled.
- This value is the major binary version * 10 + the minor binary version,
- so a binary version 1.3 function would return the value 13. Note that
- this will return a value of 10 for legacy cubins that do not have a
- properly-encoded binary architecture version.*/
- pub const CU_FUNC_ATTRIBUTE_BINARY_VERSION: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 6,
- );
-}
-impl CUfunction_attribute_enum {
- /** The attribute to indicate whether the function has been compiled with
- user specified option "-Xptxas --dlcm=ca" set .*/
- pub const CU_FUNC_ATTRIBUTE_CACHE_MODE_CA: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 7,
- );
-}
-impl CUfunction_attribute_enum {
- /** The maximum size in bytes of dynamically-allocated shared memory that can be used by
- this function. If the user-specified dynamic shared memory size is larger than this
- value, the launch will fail.
- See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
- pub const CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 8,
- );
-}
-impl CUfunction_attribute_enum {
- /** On devices where the L1 cache and shared memory use the same hardware resources,
- this sets the shared memory carveout preference, in percent of the total shared memory.
- Refer to ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR.
- This is only a hint, and the driver can choose a different ratio if required to execute the function.
- See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
- pub const CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 9,
- );
-}
-impl CUfunction_attribute_enum {
- /** If this attribute is set, the kernel must launch with a valid cluster
- size specified.
- See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
- pub const CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 10,
- );
-}
-impl CUfunction_attribute_enum {
- /** The required cluster width in blocks. The values must either all be 0 or
- all be positive. The validity of the cluster dimensions is otherwise
- checked at launch time.
-
- If the value is set during compile time, it cannot be set at runtime.
- Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED.
- See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
- pub const CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 11,
- );
-}
-impl CUfunction_attribute_enum {
- /** The required cluster height in blocks. The values must either all be 0 or
- all be positive. The validity of the cluster dimensions is otherwise
- checked at launch time.
-
- If the value is set during compile time, it cannot be set at runtime.
- Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED.
- See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
- pub const CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 12,
- );
-}
-impl CUfunction_attribute_enum {
- /** The required cluster depth in blocks. The values must either all be 0 or
- all be positive. The validity of the cluster dimensions is otherwise
- checked at launch time.
-
- If the value is set during compile time, it cannot be set at runtime.
- Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED.
- See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
- pub const CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 13,
- );
-}
-impl CUfunction_attribute_enum {
- /** Whether the function can be launched with non-portable cluster size. 1 is
- allowed, 0 is disallowed. A non-portable cluster size may only function
- on the specific SKUs the program is tested on. The launch might fail if
- the program is run on a different hardware platform.
-
- CUDA API provides cudaOccupancyMaxActiveClusters to assist with checking
- whether the desired size can be launched on the current device.
-
- Portable Cluster Size
-
- A portable cluster size is guaranteed to be functional on all compute
- capabilities higher than the target compute capability. The portable
- cluster size for sm_90 is 8 blocks per cluster. This value may increase
- for future compute capabilities.
-
- The specific hardware unit may support higher cluster sizes that’s not
- guaranteed to be portable.
- See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
- pub const CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 14,
- );
-}
-impl CUfunction_attribute_enum {
- /** The block scheduling policy of a function. The value type is
- CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy.
- See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
- pub const CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 15,
- );
-}
-impl CUfunction_attribute_enum {
- /** The block scheduling policy of a function. The value type is
- CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy.
- See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/
- pub const CU_FUNC_ATTRIBUTE_MAX: CUfunction_attribute_enum = CUfunction_attribute_enum(
- 16,
- );
-}
-#[repr(transparent)]
-/// Function properties
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUfunction_attribute_enum(pub ::core::ffi::c_uint);
-/// Function properties
-pub use self::CUfunction_attribute_enum as CUfunction_attribute;
-impl CUfunc_cache_enum {
- ///< no preference for shared memory or L1 (default)
- pub const CU_FUNC_CACHE_PREFER_NONE: CUfunc_cache_enum = CUfunc_cache_enum(0);
-}
-impl CUfunc_cache_enum {
- ///< prefer larger shared memory and smaller L1 cache
- pub const CU_FUNC_CACHE_PREFER_SHARED: CUfunc_cache_enum = CUfunc_cache_enum(1);
-}
-impl CUfunc_cache_enum {
- ///< prefer larger L1 cache and smaller shared memory
- pub const CU_FUNC_CACHE_PREFER_L1: CUfunc_cache_enum = CUfunc_cache_enum(2);
-}
-impl CUfunc_cache_enum {
- ///< prefer equal sized L1 cache and shared memory
- pub const CU_FUNC_CACHE_PREFER_EQUAL: CUfunc_cache_enum = CUfunc_cache_enum(3);
-}
-#[repr(transparent)]
-/// Function cache configurations
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUfunc_cache_enum(pub ::core::ffi::c_uint);
-/// Function cache configurations
-pub use self::CUfunc_cache_enum as CUfunc_cache;
-impl CUsharedconfig_enum {
- ///< set default shared memory bank size
- pub const CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: CUsharedconfig_enum = CUsharedconfig_enum(
- 0,
- );
-}
-impl CUsharedconfig_enum {
- ///< set shared memory bank width to four bytes
- pub const CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: CUsharedconfig_enum = CUsharedconfig_enum(
- 1,
- );
-}
-impl CUsharedconfig_enum {
- ///< set shared memory bank width to eight bytes
- pub const CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: CUsharedconfig_enum = CUsharedconfig_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/** \deprecated
-
- Shared memory configurations*/
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUsharedconfig_enum(pub ::core::ffi::c_uint);
-/** \deprecated
-
- Shared memory configurations*/
-pub use self::CUsharedconfig_enum as CUsharedconfig;
-impl CUshared_carveout_enum {
- ///< No preference for shared memory or L1 (default)
- pub const CU_SHAREDMEM_CARVEOUT_DEFAULT: CUshared_carveout_enum = CUshared_carveout_enum(
- -1,
- );
-}
-impl CUshared_carveout_enum {
- ///< Prefer maximum available shared memory, minimum L1 cache
- pub const CU_SHAREDMEM_CARVEOUT_MAX_SHARED: CUshared_carveout_enum = CUshared_carveout_enum(
- 100,
- );
-}
-impl CUshared_carveout_enum {
- ///< Prefer maximum available L1 cache, minimum shared memory
- pub const CU_SHAREDMEM_CARVEOUT_MAX_L1: CUshared_carveout_enum = CUshared_carveout_enum(
- 0,
- );
-}
-#[repr(transparent)]
-/// Shared memory carveout configurations. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUshared_carveout_enum(pub ::core::ffi::c_int);
-/// Shared memory carveout configurations. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute
-pub use self::CUshared_carveout_enum as CUshared_carveout;
-impl CUmemorytype_enum {
- ///< Host memory
- pub const CU_MEMORYTYPE_HOST: CUmemorytype_enum = CUmemorytype_enum(1);
-}
-impl CUmemorytype_enum {
- ///< Device memory
- pub const CU_MEMORYTYPE_DEVICE: CUmemorytype_enum = CUmemorytype_enum(2);
-}
-impl CUmemorytype_enum {
- ///< Array memory
- pub const CU_MEMORYTYPE_ARRAY: CUmemorytype_enum = CUmemorytype_enum(3);
-}
-impl CUmemorytype_enum {
- ///< Unified device or host memory
- pub const CU_MEMORYTYPE_UNIFIED: CUmemorytype_enum = CUmemorytype_enum(4);
-}
-#[repr(transparent)]
-/// Memory types
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemorytype_enum(pub ::core::ffi::c_uint);
-/// Memory types
-pub use self::CUmemorytype_enum as CUmemorytype;
-impl CUcomputemode_enum {
- ///< Default compute mode (Multiple contexts allowed per device)
- pub const CU_COMPUTEMODE_DEFAULT: CUcomputemode_enum = CUcomputemode_enum(0);
-}
-impl CUcomputemode_enum {
- ///< Compute-prohibited mode (No contexts can be created on this device at this time)
- pub const CU_COMPUTEMODE_PROHIBITED: CUcomputemode_enum = CUcomputemode_enum(2);
-}
-impl CUcomputemode_enum {
- ///< Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time)
- pub const CU_COMPUTEMODE_EXCLUSIVE_PROCESS: CUcomputemode_enum = CUcomputemode_enum(
- 3,
- );
-}
-#[repr(transparent)]
-/// Compute Modes
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUcomputemode_enum(pub ::core::ffi::c_uint);
-/// Compute Modes
-pub use self::CUcomputemode_enum as CUcomputemode;
-impl CUmem_advise_enum {
- ///< Data will mostly be read and only occasionally be written to
- pub const CU_MEM_ADVISE_SET_READ_MOSTLY: CUmem_advise_enum = CUmem_advise_enum(1);
-}
-impl CUmem_advise_enum {
- ///< Undo the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY
- pub const CU_MEM_ADVISE_UNSET_READ_MOSTLY: CUmem_advise_enum = CUmem_advise_enum(2);
-}
-impl CUmem_advise_enum {
- ///< Set the preferred location for the data as the specified device
- pub const CU_MEM_ADVISE_SET_PREFERRED_LOCATION: CUmem_advise_enum = CUmem_advise_enum(
- 3,
- );
-}
-impl CUmem_advise_enum {
- ///< Clear the preferred location for the data
- pub const CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: CUmem_advise_enum = CUmem_advise_enum(
- 4,
- );
-}
-impl CUmem_advise_enum {
- ///< Data will be accessed by the specified device, so prevent page faults as much as possible
- pub const CU_MEM_ADVISE_SET_ACCESSED_BY: CUmem_advise_enum = CUmem_advise_enum(5);
-}
-impl CUmem_advise_enum {
- ///< Let the Unified Memory subsystem decide on the page faulting policy for the specified device
- pub const CU_MEM_ADVISE_UNSET_ACCESSED_BY: CUmem_advise_enum = CUmem_advise_enum(6);
-}
-#[repr(transparent)]
-/// Memory advise values
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmem_advise_enum(pub ::core::ffi::c_uint);
-/// Memory advise values
-pub use self::CUmem_advise_enum as CUmem_advise;
-impl CUmem_range_attribute_enum {
- ///< Whether the range will mostly be read and only occasionally be written to
- pub const CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
- 1,
- );
-}
-impl CUmem_range_attribute_enum {
- ///< The preferred location of the range
- pub const CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
- 2,
- );
-}
-impl CUmem_range_attribute_enum {
- ///< Memory range has ::CU_MEM_ADVISE_SET_ACCESSED_BY set for specified device
- pub const CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
- 3,
- );
-}
-impl CUmem_range_attribute_enum {
- ///< The last location to which the range was prefetched
- pub const CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
- 4,
- );
-}
-impl CUmem_range_attribute_enum {
- ///< The preferred location type of the range
- pub const CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
- 5,
- );
-}
-impl CUmem_range_attribute_enum {
- ///< The preferred location id of the range
- pub const CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
- 6,
- );
-}
-impl CUmem_range_attribute_enum {
- ///< The last location type to which the range was prefetched
- pub const CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
- 7,
- );
-}
-impl CUmem_range_attribute_enum {
- ///< The last location id to which the range was prefetched
- pub const CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID: CUmem_range_attribute_enum = CUmem_range_attribute_enum(
- 8,
- );
-}
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmem_range_attribute_enum(pub ::core::ffi::c_uint);
-pub use self::CUmem_range_attribute_enum as CUmem_range_attribute;
-impl CUjit_option_enum {
- /** Max number of registers that a thread may use.\n
- Option type: unsigned int\n
- Applies to: compiler only*/
- pub const CU_JIT_MAX_REGISTERS: CUjit_option_enum = CUjit_option_enum(0);
-}
-impl CUjit_option_enum {
- /** IN: Specifies minimum number of threads per block to target compilation
- for\n
- OUT: Returns the number of threads the compiler actually targeted.
- This restricts the resource utilization of the compiler (e.g. max
- registers) such that a block with the given number of threads should be
- able to launch based on register limitations. Note, this option does not
- currently take into account any other resource limitations, such as
- shared memory utilization.\n
- Cannot be combined with ::CU_JIT_TARGET.\n
- Option type: unsigned int\n
- Applies to: compiler only*/
- pub const CU_JIT_THREADS_PER_BLOCK: CUjit_option_enum = CUjit_option_enum(1);
-}
-impl CUjit_option_enum {
- /** Overwrites the option value with the total wall clock time, in
- milliseconds, spent in the compiler and linker\n
- Option type: float\n
- Applies to: compiler and linker*/
- pub const CU_JIT_WALL_TIME: CUjit_option_enum = CUjit_option_enum(2);
-}
-impl CUjit_option_enum {
- /** Pointer to a buffer in which to print any log messages
- that are informational in nature (the buffer size is specified via
- option ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES)\n
- Option type: char *\n
- Applies to: compiler and linker*/
- pub const CU_JIT_INFO_LOG_BUFFER: CUjit_option_enum = CUjit_option_enum(3);
-}
-impl CUjit_option_enum {
- /** IN: Log buffer size in bytes. Log messages will be capped at this size
- (including null terminator)\n
- OUT: Amount of log buffer filled with messages\n
- Option type: unsigned int\n
- Applies to: compiler and linker*/
- pub const CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: CUjit_option_enum = CUjit_option_enum(
- 4,
- );
-}
-impl CUjit_option_enum {
- /** Pointer to a buffer in which to print any log messages that
- reflect errors (the buffer size is specified via option
- ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES)\n
- Option type: char *\n
- Applies to: compiler and linker*/
- pub const CU_JIT_ERROR_LOG_BUFFER: CUjit_option_enum = CUjit_option_enum(5);
-}
-impl CUjit_option_enum {
- /** IN: Log buffer size in bytes. Log messages will be capped at this size
- (including null terminator)\n
- OUT: Amount of log buffer filled with messages\n
- Option type: unsigned int\n
- Applies to: compiler and linker*/
- pub const CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: CUjit_option_enum = CUjit_option_enum(
- 6,
- );
-}
-impl CUjit_option_enum {
- /** Level of optimizations to apply to generated code (0 - 4), with 4
- being the default and highest level of optimizations.\n
- Option type: unsigned int\n
- Applies to: compiler only*/
- pub const CU_JIT_OPTIMIZATION_LEVEL: CUjit_option_enum = CUjit_option_enum(7);
-}
-impl CUjit_option_enum {
- /** No option value required. Determines the target based on the current
- attached context (default)\n
- Option type: No option value needed\n
- Applies to: compiler and linker*/
- pub const CU_JIT_TARGET_FROM_CUCONTEXT: CUjit_option_enum = CUjit_option_enum(8);
-}
-impl CUjit_option_enum {
- /** Target is chosen based on supplied ::CUjit_target. Cannot be
- combined with ::CU_JIT_THREADS_PER_BLOCK.\n
- Option type: unsigned int for enumerated type ::CUjit_target\n
- Applies to: compiler and linker*/
- pub const CU_JIT_TARGET: CUjit_option_enum = CUjit_option_enum(9);
-}
-impl CUjit_option_enum {
- /** Specifies choice of fallback strategy if matching cubin is not found.
- Choice is based on supplied ::CUjit_fallback. This option cannot be
- used with cuLink* APIs as the linker requires exact matches.\n
- Option type: unsigned int for enumerated type ::CUjit_fallback\n
- Applies to: compiler only*/
- pub const CU_JIT_FALLBACK_STRATEGY: CUjit_option_enum = CUjit_option_enum(10);
-}
-impl CUjit_option_enum {
- /** Specifies whether to create debug information in output (-g)
- (0: false, default)\n
- Option type: int\n
- Applies to: compiler and linker*/
- pub const CU_JIT_GENERATE_DEBUG_INFO: CUjit_option_enum = CUjit_option_enum(11);
-}
-impl CUjit_option_enum {
- /** Generate verbose log messages (0: false, default)\n
- Option type: int\n
- Applies to: compiler and linker*/
- pub const CU_JIT_LOG_VERBOSE: CUjit_option_enum = CUjit_option_enum(12);
-}
-impl CUjit_option_enum {
- /** Generate line number information (-lineinfo) (0: false, default)\n
- Option type: int\n
- Applies to: compiler only*/
- pub const CU_JIT_GENERATE_LINE_INFO: CUjit_option_enum = CUjit_option_enum(13);
-}
-impl CUjit_option_enum {
- /** Specifies whether to enable caching explicitly (-dlcm) \n
- Choice is based on supplied ::CUjit_cacheMode_enum.\n
- Option type: unsigned int for enumerated type ::CUjit_cacheMode_enum\n
- Applies to: compiler only*/
- pub const CU_JIT_CACHE_MODE: CUjit_option_enum = CUjit_option_enum(14);
-}
-impl CUjit_option_enum {
- /** \deprecated
- This jit option is deprecated and should not be used.*/
- pub const CU_JIT_NEW_SM3X_OPT: CUjit_option_enum = CUjit_option_enum(15);
-}
-impl CUjit_option_enum {
- /// This jit option is used for internal purpose only.
- pub const CU_JIT_FAST_COMPILE: CUjit_option_enum = CUjit_option_enum(16);
-}
-impl CUjit_option_enum {
- /** Array of device symbol names that will be relocated to the corresponding
- host addresses stored in ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES.\n
- Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n
- When loading a device module, driver will relocate all encountered
- unresolved symbols to the host addresses.\n
- It is only allowed to register symbols that correspond to unresolved
- global variables.\n
- It is illegal to register the same device symbol at multiple addresses.\n
- Option type: const char **\n
- Applies to: dynamic linker only*/
- pub const CU_JIT_GLOBAL_SYMBOL_NAMES: CUjit_option_enum = CUjit_option_enum(17);
-}
-impl CUjit_option_enum {
- /** Array of host addresses that will be used to relocate corresponding
- device symbols stored in ::CU_JIT_GLOBAL_SYMBOL_NAMES.\n
- Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n
- Option type: void **\n
- Applies to: dynamic linker only*/
- pub const CU_JIT_GLOBAL_SYMBOL_ADDRESSES: CUjit_option_enum = CUjit_option_enum(18);
-}
-impl CUjit_option_enum {
- /** Number of entries in ::CU_JIT_GLOBAL_SYMBOL_NAMES and
- ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES arrays.\n
- Option type: unsigned int\n
- Applies to: dynamic linker only*/
- pub const CU_JIT_GLOBAL_SYMBOL_COUNT: CUjit_option_enum = CUjit_option_enum(19);
-}
-impl CUjit_option_enum {
- /** \deprecated
- Enable link-time optimization (-dlto) for device code (Disabled by default).\n
- This option is not supported on 32-bit platforms.\n
- Option type: int\n
- Applies to: compiler and linker
-
- Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
- pub const CU_JIT_LTO: CUjit_option_enum = CUjit_option_enum(20);
-}
-impl CUjit_option_enum {
- /** \deprecated
- Control single-precision denormals (-ftz) support (0: false, default).
- 1 : flushes denormal values to zero
- 0 : preserves denormal values
- Option type: int\n
- Applies to: link-time optimization specified with CU_JIT_LTO
-
- Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
- pub const CU_JIT_FTZ: CUjit_option_enum = CUjit_option_enum(21);
-}
-impl CUjit_option_enum {
- /** \deprecated
- Control single-precision floating-point division and reciprocals
- (-prec-div) support (1: true, default).
- 1 : Enables the IEEE round-to-nearest mode
- 0 : Enables the fast approximation mode
- Option type: int\n
- Applies to: link-time optimization specified with CU_JIT_LTO
-
- Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
- pub const CU_JIT_PREC_DIV: CUjit_option_enum = CUjit_option_enum(22);
-}
-impl CUjit_option_enum {
- /** \deprecated
- Control single-precision floating-point square root
- (-prec-sqrt) support (1: true, default).
- 1 : Enables the IEEE round-to-nearest mode
- 0 : Enables the fast approximation mode
- Option type: int\n
- Applies to: link-time optimization specified with CU_JIT_LTO
-
- Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
- pub const CU_JIT_PREC_SQRT: CUjit_option_enum = CUjit_option_enum(23);
-}
-impl CUjit_option_enum {
- /** \deprecated
- Enable/Disable the contraction of floating-point multiplies
- and adds/subtracts into floating-point multiply-add (-fma)
- operations (1: Enable, default; 0: Disable).
- Option type: int\n
- Applies to: link-time optimization specified with CU_JIT_LTO
-
- Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
- pub const CU_JIT_FMA: CUjit_option_enum = CUjit_option_enum(24);
-}
-impl CUjit_option_enum {
- /** \deprecated
- Array of kernel names that should be preserved at link time while others
- can be removed.\n
- Must contain ::CU_JIT_REFERENCED_KERNEL_COUNT entries.\n
- Note that kernel names can be mangled by the compiler in which case the
- mangled name needs to be specified.\n
- Wildcard "*" can be used to represent zero or more characters instead of
- specifying the full or mangled name.\n
- It is important to note that the wildcard "*" is also added implicitly.
- For example, specifying "foo" will match "foobaz", "barfoo", "barfoobaz" and
- thus preserve all kernels with those names. This can be avoided by providing
- a more specific name like "barfoobaz".\n
- Option type: const char **\n
- Applies to: dynamic linker only
-
- Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
- pub const CU_JIT_REFERENCED_KERNEL_NAMES: CUjit_option_enum = CUjit_option_enum(25);
-}
-impl CUjit_option_enum {
- /** \deprecated
- Number of entries in ::CU_JIT_REFERENCED_KERNEL_NAMES array.\n
- Option type: unsigned int\n
- Applies to: dynamic linker only
-
- Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
- pub const CU_JIT_REFERENCED_KERNEL_COUNT: CUjit_option_enum = CUjit_option_enum(26);
-}
-impl CUjit_option_enum {
- /** \deprecated
- Array of variable names (__device__ and/or __constant__) that should be
- preserved at link time while others can be removed.\n
- Must contain ::CU_JIT_REFERENCED_VARIABLE_COUNT entries.\n
- Note that variable names can be mangled by the compiler in which case the
- mangled name needs to be specified.\n
- Wildcard "*" can be used to represent zero or more characters instead of
- specifying the full or mangled name.\n
- It is important to note that the wildcard "*" is also added implicitly.
- For example, specifying "foo" will match "foobaz", "barfoo", "barfoobaz" and
- thus preserve all variables with those names. This can be avoided by providing
- a more specific name like "barfoobaz".\n
- Option type: const char **\n
- Applies to: link-time optimization specified with CU_JIT_LTO
-
- Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
- pub const CU_JIT_REFERENCED_VARIABLE_NAMES: CUjit_option_enum = CUjit_option_enum(
- 27,
- );
-}
-impl CUjit_option_enum {
- /** \deprecated
- Number of entries in ::CU_JIT_REFERENCED_VARIABLE_NAMES array.\n
- Option type: unsigned int\n
- Applies to: link-time optimization specified with CU_JIT_LTO
-
- Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
- pub const CU_JIT_REFERENCED_VARIABLE_COUNT: CUjit_option_enum = CUjit_option_enum(
- 28,
- );
-}
-impl CUjit_option_enum {
- /** \deprecated
- This option serves as a hint to enable the JIT compiler/linker
- to remove constant (__constant__) and device (__device__) variables
- unreferenced in device code (Disabled by default).\n
- Note that host references to constant and device variables using APIs like
- ::cuModuleGetGlobal() with this option specified may result in undefined behavior unless
- the variables are explicitly specified using ::CU_JIT_REFERENCED_VARIABLE_NAMES.\n
- Option type: int\n
- Applies to: link-time optimization specified with CU_JIT_LTO
-
- Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
- pub const CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES: CUjit_option_enum = CUjit_option_enum(
- 29,
- );
-}
-impl CUjit_option_enum {
- /** Generate position independent code (0: false)\n
- Option type: int\n
- Applies to: compiler only*/
- pub const CU_JIT_POSITION_INDEPENDENT_CODE: CUjit_option_enum = CUjit_option_enum(
- 30,
- );
-}
-impl CUjit_option_enum {
- /** This option hints to the JIT compiler the minimum number of CTAs from the
- kernel’s grid to be mapped to a SM. This option is ignored when used together
- with ::CU_JIT_MAX_REGISTERS or ::CU_JIT_THREADS_PER_BLOCK.
- Optimizations based on this option need ::CU_JIT_MAX_THREADS_PER_BLOCK to
- be specified as well. For kernels already using PTX directive .minnctapersm,
- this option will be ignored by default. Use ::CU_JIT_OVERRIDE_DIRECTIVE_VALUES
- to let this option take precedence over the PTX directive.
- Option type: unsigned int\n
- Applies to: compiler only*/
- pub const CU_JIT_MIN_CTA_PER_SM: CUjit_option_enum = CUjit_option_enum(31);
-}
-impl CUjit_option_enum {
- /** Maximum number threads in a thread block, computed as the product of
- the maximum extent specifed for each dimension of the block. This limit
- is guaranteed not to be exeeded in any invocation of the kernel. Exceeding
- the the maximum number of threads results in runtime error or kernel launch
- failure. For kernels already using PTX directive .maxntid, this option will
- be ignored by default. Use ::CU_JIT_OVERRIDE_DIRECTIVE_VALUES to let this
- option take precedence over the PTX directive.
- Option type: int\n
- Applies to: compiler only*/
- pub const CU_JIT_MAX_THREADS_PER_BLOCK: CUjit_option_enum = CUjit_option_enum(32);
-}
-impl CUjit_option_enum {
- /** This option lets the values specified using ::CU_JIT_MAX_REGISTERS,
- ::CU_JIT_THREADS_PER_BLOCK, ::CU_JIT_MAX_THREADS_PER_BLOCK and
- ::CU_JIT_MIN_CTA_PER_SM take precedence over any PTX directives.
- (0: Disable, default; 1: Enable)
- Option type: int\n
- Applies to: compiler only*/
- pub const CU_JIT_OVERRIDE_DIRECTIVE_VALUES: CUjit_option_enum = CUjit_option_enum(
- 33,
- );
-}
-impl CUjit_option_enum {
- /** This option lets the values specified using ::CU_JIT_MAX_REGISTERS,
- ::CU_JIT_THREADS_PER_BLOCK, ::CU_JIT_MAX_THREADS_PER_BLOCK and
- ::CU_JIT_MIN_CTA_PER_SM take precedence over any PTX directives.
- (0: Disable, default; 1: Enable)
- Option type: int\n
- Applies to: compiler only*/
- pub const CU_JIT_NUM_OPTIONS: CUjit_option_enum = CUjit_option_enum(34);
-}
-#[repr(transparent)]
-/// Online compiler and linker options
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUjit_option_enum(pub ::core::ffi::c_uint);
-/// Online compiler and linker options
-pub use self::CUjit_option_enum as CUjit_option;
-impl CUjit_target_enum {
- ///< Compute device class 3.0
- pub const CU_TARGET_COMPUTE_30: CUjit_target_enum = CUjit_target_enum(30);
-}
-impl CUjit_target_enum {
- ///< Compute device class 3.2
- pub const CU_TARGET_COMPUTE_32: CUjit_target_enum = CUjit_target_enum(32);
-}
-impl CUjit_target_enum {
- ///< Compute device class 3.5
- pub const CU_TARGET_COMPUTE_35: CUjit_target_enum = CUjit_target_enum(35);
-}
-impl CUjit_target_enum {
- ///< Compute device class 3.7
- pub const CU_TARGET_COMPUTE_37: CUjit_target_enum = CUjit_target_enum(37);
-}
-impl CUjit_target_enum {
- ///< Compute device class 5.0
- pub const CU_TARGET_COMPUTE_50: CUjit_target_enum = CUjit_target_enum(50);
-}
-impl CUjit_target_enum {
- ///< Compute device class 5.2
- pub const CU_TARGET_COMPUTE_52: CUjit_target_enum = CUjit_target_enum(52);
-}
-impl CUjit_target_enum {
- ///< Compute device class 5.3
- pub const CU_TARGET_COMPUTE_53: CUjit_target_enum = CUjit_target_enum(53);
-}
-impl CUjit_target_enum {
- ///< Compute device class 6.0.
- pub const CU_TARGET_COMPUTE_60: CUjit_target_enum = CUjit_target_enum(60);
-}
-impl CUjit_target_enum {
- ///< Compute device class 6.1.
- pub const CU_TARGET_COMPUTE_61: CUjit_target_enum = CUjit_target_enum(61);
-}
-impl CUjit_target_enum {
- ///< Compute device class 6.2.
- pub const CU_TARGET_COMPUTE_62: CUjit_target_enum = CUjit_target_enum(62);
-}
-impl CUjit_target_enum {
- ///< Compute device class 7.0.
- pub const CU_TARGET_COMPUTE_70: CUjit_target_enum = CUjit_target_enum(70);
-}
-impl CUjit_target_enum {
- ///< Compute device class 7.2.
- pub const CU_TARGET_COMPUTE_72: CUjit_target_enum = CUjit_target_enum(72);
-}
-impl CUjit_target_enum {
- ///< Compute device class 7.5.
- pub const CU_TARGET_COMPUTE_75: CUjit_target_enum = CUjit_target_enum(75);
-}
-impl CUjit_target_enum {
- ///< Compute device class 8.0.
- pub const CU_TARGET_COMPUTE_80: CUjit_target_enum = CUjit_target_enum(80);
-}
-impl CUjit_target_enum {
- ///< Compute device class 8.6.
- pub const CU_TARGET_COMPUTE_86: CUjit_target_enum = CUjit_target_enum(86);
-}
-impl CUjit_target_enum {
- ///< Compute device class 8.7.
- pub const CU_TARGET_COMPUTE_87: CUjit_target_enum = CUjit_target_enum(87);
-}
-impl CUjit_target_enum {
- ///< Compute device class 8.9.
- pub const CU_TARGET_COMPUTE_89: CUjit_target_enum = CUjit_target_enum(89);
-}
-impl CUjit_target_enum {
- ///< Compute device class 9.0.
- pub const CU_TARGET_COMPUTE_90: CUjit_target_enum = CUjit_target_enum(90);
-}
-impl CUjit_target_enum {
- pub const CU_TARGET_COMPUTE_90A: CUjit_target_enum = CUjit_target_enum(65626);
-}
-#[repr(transparent)]
-/// Online compilation targets
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUjit_target_enum(pub ::core::ffi::c_uint);
-/// Online compilation targets
-pub use self::CUjit_target_enum as CUjit_target;
-impl CUjit_fallback_enum {
- ///< Prefer to compile ptx if exact binary match not found
- pub const CU_PREFER_PTX: CUjit_fallback_enum = CUjit_fallback_enum(0);
-}
-impl CUjit_fallback_enum {
- ///< Prefer to fall back to compatible binary code if exact match not found
- pub const CU_PREFER_BINARY: CUjit_fallback_enum = CUjit_fallback_enum(1);
-}
-#[repr(transparent)]
-/// Cubin matching fallback strategies
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUjit_fallback_enum(pub ::core::ffi::c_uint);
-/// Cubin matching fallback strategies
-pub use self::CUjit_fallback_enum as CUjit_fallback;
-impl CUjit_cacheMode_enum {
- ///< Compile with no -dlcm flag specified
- pub const CU_JIT_CACHE_OPTION_NONE: CUjit_cacheMode_enum = CUjit_cacheMode_enum(0);
-}
-impl CUjit_cacheMode_enum {
- ///< Compile with L1 cache disabled
- pub const CU_JIT_CACHE_OPTION_CG: CUjit_cacheMode_enum = CUjit_cacheMode_enum(1);
-}
-impl CUjit_cacheMode_enum {
- ///< Compile with L1 cache enabled
- pub const CU_JIT_CACHE_OPTION_CA: CUjit_cacheMode_enum = CUjit_cacheMode_enum(2);
-}
-#[repr(transparent)]
-/// Caching modes for dlcm
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUjit_cacheMode_enum(pub ::core::ffi::c_uint);
-/// Caching modes for dlcm
-pub use self::CUjit_cacheMode_enum as CUjit_cacheMode;
-impl CUjitInputType_enum {
- /** Compiled device-class-specific device code\n
- Applicable options: none*/
- pub const CU_JIT_INPUT_CUBIN: CUjitInputType_enum = CUjitInputType_enum(0);
-}
-impl CUjitInputType_enum {
- /** PTX source code\n
- Applicable options: PTX compiler options*/
- pub const CU_JIT_INPUT_PTX: CUjitInputType_enum = CUjitInputType_enum(1);
-}
-impl CUjitInputType_enum {
- /** Bundle of multiple cubins and/or PTX of some device code\n
- Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY*/
- pub const CU_JIT_INPUT_FATBINARY: CUjitInputType_enum = CUjitInputType_enum(2);
-}
-impl CUjitInputType_enum {
- /** Host object with embedded device code\n
- Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY*/
- pub const CU_JIT_INPUT_OBJECT: CUjitInputType_enum = CUjitInputType_enum(3);
-}
-impl CUjitInputType_enum {
- /** Archive of host objects with embedded device code\n
- Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY*/
- pub const CU_JIT_INPUT_LIBRARY: CUjitInputType_enum = CUjitInputType_enum(4);
-}
-impl CUjitInputType_enum {
- /** \deprecated
- High-level intermediate code for link-time optimization\n
- Applicable options: NVVM compiler options, PTX compiler options
-
- Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
- pub const CU_JIT_INPUT_NVVM: CUjitInputType_enum = CUjitInputType_enum(5);
-}
-impl CUjitInputType_enum {
- /** \deprecated
- High-level intermediate code for link-time optimization\n
- Applicable options: NVVM compiler options, PTX compiler options
-
- Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/
- pub const CU_JIT_NUM_INPUT_TYPES: CUjitInputType_enum = CUjitInputType_enum(6);
-}
-#[repr(transparent)]
-/// Device code formats
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUjitInputType_enum(pub ::core::ffi::c_uint);
-/// Device code formats
-pub use self::CUjitInputType_enum as CUjitInputType;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUlinkState_st {
- _unused: [u8; 0],
-}
-pub type CUlinkState = *mut CUlinkState_st;
-impl CUgraphicsRegisterFlags_enum {
- pub const CU_GRAPHICS_REGISTER_FLAGS_NONE: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum(
- 0,
- );
-}
-impl CUgraphicsRegisterFlags_enum {
- pub const CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum(
- 1,
- );
-}
-impl CUgraphicsRegisterFlags_enum {
- pub const CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum(
- 2,
- );
-}
-impl CUgraphicsRegisterFlags_enum {
- pub const CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum(
- 4,
- );
-}
-impl CUgraphicsRegisterFlags_enum {
- pub const CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum(
- 8,
- );
-}
-#[repr(transparent)]
-/// Flags to register a graphics resource
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgraphicsRegisterFlags_enum(pub ::core::ffi::c_uint);
-/// Flags to register a graphics resource
-pub use self::CUgraphicsRegisterFlags_enum as CUgraphicsRegisterFlags;
-impl CUgraphicsMapResourceFlags_enum {
- pub const CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: CUgraphicsMapResourceFlags_enum = CUgraphicsMapResourceFlags_enum(
- 0,
- );
-}
-impl CUgraphicsMapResourceFlags_enum {
- pub const CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: CUgraphicsMapResourceFlags_enum = CUgraphicsMapResourceFlags_enum(
- 1,
- );
-}
-impl CUgraphicsMapResourceFlags_enum {
- pub const CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: CUgraphicsMapResourceFlags_enum = CUgraphicsMapResourceFlags_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/// Flags for mapping and unmapping interop resources
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgraphicsMapResourceFlags_enum(pub ::core::ffi::c_uint);
-/// Flags for mapping and unmapping interop resources
-pub use self::CUgraphicsMapResourceFlags_enum as CUgraphicsMapResourceFlags;
-impl CUarray_cubemap_face_enum {
- ///< Positive X face of cubemap
- pub const CU_CUBEMAP_FACE_POSITIVE_X: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum(
- 0,
- );
-}
-impl CUarray_cubemap_face_enum {
- ///< Negative X face of cubemap
- pub const CU_CUBEMAP_FACE_NEGATIVE_X: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum(
- 1,
- );
-}
-impl CUarray_cubemap_face_enum {
- ///< Positive Y face of cubemap
- pub const CU_CUBEMAP_FACE_POSITIVE_Y: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum(
- 2,
- );
-}
-impl CUarray_cubemap_face_enum {
- ///< Negative Y face of cubemap
- pub const CU_CUBEMAP_FACE_NEGATIVE_Y: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum(
- 3,
- );
-}
-impl CUarray_cubemap_face_enum {
- ///< Positive Z face of cubemap
- pub const CU_CUBEMAP_FACE_POSITIVE_Z: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum(
- 4,
- );
-}
-impl CUarray_cubemap_face_enum {
- ///< Negative Z face of cubemap
- pub const CU_CUBEMAP_FACE_NEGATIVE_Z: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum(
- 5,
- );
-}
-#[repr(transparent)]
-/// Array indices for cube faces
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUarray_cubemap_face_enum(pub ::core::ffi::c_uint);
-/// Array indices for cube faces
-pub use self::CUarray_cubemap_face_enum as CUarray_cubemap_face;
-impl CUlimit_enum {
- ///< GPU thread stack size
- pub const CU_LIMIT_STACK_SIZE: CUlimit_enum = CUlimit_enum(0);
-}
-impl CUlimit_enum {
- ///< GPU printf FIFO size
- pub const CU_LIMIT_PRINTF_FIFO_SIZE: CUlimit_enum = CUlimit_enum(1);
-}
-impl CUlimit_enum {
- ///< GPU malloc heap size
- pub const CU_LIMIT_MALLOC_HEAP_SIZE: CUlimit_enum = CUlimit_enum(2);
-}
-impl CUlimit_enum {
- ///< GPU device runtime launch synchronize depth
- pub const CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH: CUlimit_enum = CUlimit_enum(3);
-}
-impl CUlimit_enum {
- ///< GPU device runtime pending launch count
- pub const CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT: CUlimit_enum = CUlimit_enum(4);
-}
-impl CUlimit_enum {
- ///< A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint
- pub const CU_LIMIT_MAX_L2_FETCH_GRANULARITY: CUlimit_enum = CUlimit_enum(5);
-}
-impl CUlimit_enum {
- ///< A size in bytes for L2 persisting lines cache size
- pub const CU_LIMIT_PERSISTING_L2_CACHE_SIZE: CUlimit_enum = CUlimit_enum(6);
-}
-impl CUlimit_enum {
- pub const CU_LIMIT_MAX: CUlimit_enum = CUlimit_enum(7);
-}
-#[repr(transparent)]
-/// Limits
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUlimit_enum(pub ::core::ffi::c_uint);
-/// Limits
-pub use self::CUlimit_enum as CUlimit;
-impl CUresourcetype_enum {
- ///< Array resource
- pub const CU_RESOURCE_TYPE_ARRAY: CUresourcetype_enum = CUresourcetype_enum(0);
-}
-impl CUresourcetype_enum {
- ///< Mipmapped array resource
- pub const CU_RESOURCE_TYPE_MIPMAPPED_ARRAY: CUresourcetype_enum = CUresourcetype_enum(
- 1,
- );
-}
-impl CUresourcetype_enum {
- ///< Linear resource
- pub const CU_RESOURCE_TYPE_LINEAR: CUresourcetype_enum = CUresourcetype_enum(2);
-}
-impl CUresourcetype_enum {
- ///< Pitch 2D resource
- pub const CU_RESOURCE_TYPE_PITCH2D: CUresourcetype_enum = CUresourcetype_enum(3);
-}
-#[repr(transparent)]
-/// Resource types
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUresourcetype_enum(pub ::core::ffi::c_uint);
-/// Resource types
-pub use self::CUresourcetype_enum as CUresourcetype;
-/** CUDA host function
- \param userData Argument value passed to the function*/
-pub type CUhostFn = ::core::option::Option<
- unsafe extern "system" fn(userData: *mut ::core::ffi::c_void),
->;
-impl CUaccessProperty_enum {
- ///< Normal cache persistence.
- pub const CU_ACCESS_PROPERTY_NORMAL: CUaccessProperty_enum = CUaccessProperty_enum(
- 0,
- );
-}
-impl CUaccessProperty_enum {
- ///< Streaming access is less likely to persit from cache.
- pub const CU_ACCESS_PROPERTY_STREAMING: CUaccessProperty_enum = CUaccessProperty_enum(
- 1,
- );
-}
-impl CUaccessProperty_enum {
- ///< Persisting access is more likely to persist in cache.
- pub const CU_ACCESS_PROPERTY_PERSISTING: CUaccessProperty_enum = CUaccessProperty_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/// Specifies performance hint with ::CUaccessPolicyWindow for hitProp and missProp members.
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUaccessProperty_enum(pub ::core::ffi::c_uint);
-/// Specifies performance hint with ::CUaccessPolicyWindow for hitProp and missProp members.
-pub use self::CUaccessProperty_enum as CUaccessProperty;
-/** Specifies an access policy for a window, a contiguous extent of memory
- beginning at base_ptr and ending at base_ptr + num_bytes.
- num_bytes is limited by CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE.
- Partition into many segments and assign segments such that:
- sum of "hit segments" / window == approx. ratio.
- sum of "miss segments" / window == approx 1-ratio.
- Segments and ratio specifications are fitted to the capabilities of
- the architecture.
- Accesses in a hit segment apply the hitProp access policy.
- Accesses in a miss segment apply the missProp access policy.*/
-#[repr(C)]
-#[derive(Debug, Copy, Clone, PartialEq)]
-pub struct CUaccessPolicyWindow_st {
- ///< Starting address of the access policy window. CUDA driver may align it.
- pub base_ptr: *mut ::core::ffi::c_void,
- ///< Size in bytes of the window policy. CUDA driver may restrict the maximum size and alignment.
- pub num_bytes: usize,
- ///< hitRatio specifies percentage of lines assigned hitProp, rest are assigned missProp.
- pub hitRatio: f32,
- ///< ::CUaccessProperty set for hit.
- pub hitProp: CUaccessProperty,
- ///< ::CUaccessProperty set for miss. Must be either NORMAL or STREAMING
- pub missProp: CUaccessProperty,
-}
-/** Specifies an access policy for a window, a contiguous extent of memory
- beginning at base_ptr and ending at base_ptr + num_bytes.
- num_bytes is limited by CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE.
- Partition into many segments and assign segments such that:
- sum of "hit segments" / window == approx. ratio.
- sum of "miss segments" / window == approx 1-ratio.
- Segments and ratio specifications are fitted to the capabilities of
- the architecture.
- Accesses in a hit segment apply the hitProp access policy.
- Accesses in a miss segment apply the missProp access policy.*/
-pub type CUaccessPolicyWindow_v1 = CUaccessPolicyWindow_st;
-/// Access policy window
-pub type CUaccessPolicyWindow = CUaccessPolicyWindow_v1;
-/// GPU kernel node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_KERNEL_NODE_PARAMS_st {
- ///< Kernel to launch
- pub func: CUfunction,
- ///< Width of grid in blocks
- pub gridDimX: ::core::ffi::c_uint,
- ///< Height of grid in blocks
- pub gridDimY: ::core::ffi::c_uint,
- ///< Depth of grid in blocks
- pub gridDimZ: ::core::ffi::c_uint,
- ///< X dimension of each thread block
- pub blockDimX: ::core::ffi::c_uint,
- ///< Y dimension of each thread block
- pub blockDimY: ::core::ffi::c_uint,
- ///< Z dimension of each thread block
- pub blockDimZ: ::core::ffi::c_uint,
- ///< Dynamic shared-memory size per thread block in bytes
- pub sharedMemBytes: ::core::ffi::c_uint,
- ///< Array of pointers to kernel parameters
- pub kernelParams: *mut *mut ::core::ffi::c_void,
- ///< Extra options
- pub extra: *mut *mut ::core::ffi::c_void,
-}
-/// GPU kernel node parameters
-pub type CUDA_KERNEL_NODE_PARAMS_v1 = CUDA_KERNEL_NODE_PARAMS_st;
-/// GPU kernel node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_KERNEL_NODE_PARAMS_v2_st {
- ///< Kernel to launch
- pub func: CUfunction,
- ///< Width of grid in blocks
- pub gridDimX: ::core::ffi::c_uint,
- ///< Height of grid in blocks
- pub gridDimY: ::core::ffi::c_uint,
- ///< Depth of grid in blocks
- pub gridDimZ: ::core::ffi::c_uint,
- ///< X dimension of each thread block
- pub blockDimX: ::core::ffi::c_uint,
- ///< Y dimension of each thread block
- pub blockDimY: ::core::ffi::c_uint,
- ///< Z dimension of each thread block
- pub blockDimZ: ::core::ffi::c_uint,
- ///< Dynamic shared-memory size per thread block in bytes
- pub sharedMemBytes: ::core::ffi::c_uint,
- ///< Array of pointers to kernel parameters
- pub kernelParams: *mut *mut ::core::ffi::c_void,
- ///< Extra options
- pub extra: *mut *mut ::core::ffi::c_void,
- ///< Kernel to launch, will only be referenced if func is NULL
- pub kern: CUkernel,
- ///< Context for the kernel task to run in. The value NULL will indicate the current context should be used by the api. This field is ignored if func is set.
- pub ctx: CUcontext,
-}
-/// GPU kernel node parameters
-pub type CUDA_KERNEL_NODE_PARAMS_v2 = CUDA_KERNEL_NODE_PARAMS_v2_st;
-/// GPU kernel node parameters
-pub type CUDA_KERNEL_NODE_PARAMS = CUDA_KERNEL_NODE_PARAMS_v2;
-/// GPU kernel node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_KERNEL_NODE_PARAMS_v3_st {
- ///< Kernel to launch
- pub func: CUfunction,
- ///< Width of grid in blocks
- pub gridDimX: ::core::ffi::c_uint,
- ///< Height of grid in blocks
- pub gridDimY: ::core::ffi::c_uint,
- ///< Depth of grid in blocks
- pub gridDimZ: ::core::ffi::c_uint,
- ///< X dimension of each thread block
- pub blockDimX: ::core::ffi::c_uint,
- ///< Y dimension of each thread block
- pub blockDimY: ::core::ffi::c_uint,
- ///< Z dimension of each thread block
- pub blockDimZ: ::core::ffi::c_uint,
- ///< Dynamic shared-memory size per thread block in bytes
- pub sharedMemBytes: ::core::ffi::c_uint,
- ///< Array of pointers to kernel parameters
- pub kernelParams: *mut *mut ::core::ffi::c_void,
- ///< Extra options
- pub extra: *mut *mut ::core::ffi::c_void,
- ///< Kernel to launch, will only be referenced if func is NULL
- pub kern: CUkernel,
- ///< Context for the kernel task to run in. The value NULL will indicate the current context should be used by the api. This field is ignored if func is set.
- pub ctx: CUcontext,
-}
-/// GPU kernel node parameters
-pub type CUDA_KERNEL_NODE_PARAMS_v3 = CUDA_KERNEL_NODE_PARAMS_v3_st;
-/// Memset node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_MEMSET_NODE_PARAMS_st {
- ///< Destination device pointer
- pub dst: CUdeviceptr,
- ///< Pitch of destination device pointer. Unused if height is 1
- pub pitch: usize,
- ///< Value to be set
- pub value: ::core::ffi::c_uint,
- ///< Size of each element in bytes. Must be 1, 2, or 4.
- pub elementSize: ::core::ffi::c_uint,
- ///< Width of the row in elements
- pub width: usize,
- ///< Number of rows
- pub height: usize,
-}
-/// Memset node parameters
-pub type CUDA_MEMSET_NODE_PARAMS_v1 = CUDA_MEMSET_NODE_PARAMS_st;
-/// Memset node parameters
-pub type CUDA_MEMSET_NODE_PARAMS = CUDA_MEMSET_NODE_PARAMS_v1;
-/// Memset node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_MEMSET_NODE_PARAMS_v2_st {
- ///< Destination device pointer
- pub dst: CUdeviceptr,
- ///< Pitch of destination device pointer. Unused if height is 1
- pub pitch: usize,
- ///< Value to be set
- pub value: ::core::ffi::c_uint,
- ///< Size of each element in bytes. Must be 1, 2, or 4.
- pub elementSize: ::core::ffi::c_uint,
- ///< Width of the row in elements
- pub width: usize,
- ///< Number of rows
- pub height: usize,
- ///< Context on which to run the node
- pub ctx: CUcontext,
-}
-/// Memset node parameters
-pub type CUDA_MEMSET_NODE_PARAMS_v2 = CUDA_MEMSET_NODE_PARAMS_v2_st;
-/// Host node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash)]
-pub struct CUDA_HOST_NODE_PARAMS_st {
- ///< The function to call when the node executes
- pub fn_: CUhostFn,
- ///< Argument to pass to the function
- pub userData: *mut ::core::ffi::c_void,
-}
-/// Host node parameters
-pub type CUDA_HOST_NODE_PARAMS_v1 = CUDA_HOST_NODE_PARAMS_st;
-/// Host node parameters
-pub type CUDA_HOST_NODE_PARAMS = CUDA_HOST_NODE_PARAMS_v1;
-/// Host node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_HOST_NODE_PARAMS_v2_st {
- ///< The function to call when the node executes
- pub fn_: CUhostFn,
- ///< Argument to pass to the function
- pub userData: *mut ::core::ffi::c_void,
-}
-/// Host node parameters
-pub type CUDA_HOST_NODE_PARAMS_v2 = CUDA_HOST_NODE_PARAMS_v2_st;
-impl CUgraphConditionalNodeType_enum {
- ///< Conditional 'if' Node. Body executed once if condition value is non-zero.
- pub const CU_GRAPH_COND_TYPE_IF: CUgraphConditionalNodeType_enum = CUgraphConditionalNodeType_enum(
- 0,
- );
-}
-impl CUgraphConditionalNodeType_enum {
- ///< Conditional 'while' Node. Body executed repeatedly while condition value is non-zero.
- pub const CU_GRAPH_COND_TYPE_WHILE: CUgraphConditionalNodeType_enum = CUgraphConditionalNodeType_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Conditional node types
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgraphConditionalNodeType_enum(pub ::core::ffi::c_uint);
-/// Conditional node types
-pub use self::CUgraphConditionalNodeType_enum as CUgraphConditionalNodeType;
-/// Conditional node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_CONDITIONAL_NODE_PARAMS {
- /**< Conditional node handle.
-Handles must be created in advance of creating the node
-using ::cuGraphConditionalHandleCreate.*/
- pub handle: CUgraphConditionalHandle,
- ///< Type of conditional node.
- pub type_: CUgraphConditionalNodeType,
- ///< Size of graph output array. Must be 1.
- pub size: ::core::ffi::c_uint,
- /**< CUDA-owned array populated with conditional node child graphs during creation of the node.
-Valid for the lifetime of the conditional node.
-The contents of the graph(s) are subject to the following constraints:
-
-- Allowed node types are kernel nodes, empty nodes, child graphs, memsets,
-memcopies, and conditionals. This applies recursively to child graphs and conditional bodies.
-- All kernels, including kernels in nested conditionals or child graphs at any level,
-must belong to the same CUDA context.
-
-These graphs may be populated using graph node creation APIs or ::cuStreamBeginCaptureToGraph.*/
- pub phGraph_out: *mut CUgraph,
- ///< Context on which to run the node. Must match context used to create the handle and all body nodes.
- pub ctx: CUcontext,
-}
-impl CUgraphNodeType_enum {
- ///< GPU kernel node
- pub const CU_GRAPH_NODE_TYPE_KERNEL: CUgraphNodeType_enum = CUgraphNodeType_enum(0);
-}
-impl CUgraphNodeType_enum {
- ///< Memcpy node
- pub const CU_GRAPH_NODE_TYPE_MEMCPY: CUgraphNodeType_enum = CUgraphNodeType_enum(1);
-}
-impl CUgraphNodeType_enum {
- ///< Memset node
- pub const CU_GRAPH_NODE_TYPE_MEMSET: CUgraphNodeType_enum = CUgraphNodeType_enum(2);
-}
-impl CUgraphNodeType_enum {
- ///< Host (executable) node
- pub const CU_GRAPH_NODE_TYPE_HOST: CUgraphNodeType_enum = CUgraphNodeType_enum(3);
-}
-impl CUgraphNodeType_enum {
- ///< Node which executes an embedded graph
- pub const CU_GRAPH_NODE_TYPE_GRAPH: CUgraphNodeType_enum = CUgraphNodeType_enum(4);
-}
-impl CUgraphNodeType_enum {
- ///< Empty (no-op) node
- pub const CU_GRAPH_NODE_TYPE_EMPTY: CUgraphNodeType_enum = CUgraphNodeType_enum(5);
-}
-impl CUgraphNodeType_enum {
- ///< External event wait node
- pub const CU_GRAPH_NODE_TYPE_WAIT_EVENT: CUgraphNodeType_enum = CUgraphNodeType_enum(
- 6,
- );
-}
-impl CUgraphNodeType_enum {
- ///< External event record node
- pub const CU_GRAPH_NODE_TYPE_EVENT_RECORD: CUgraphNodeType_enum = CUgraphNodeType_enum(
- 7,
- );
-}
-impl CUgraphNodeType_enum {
- ///< External semaphore signal node
- pub const CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL: CUgraphNodeType_enum = CUgraphNodeType_enum(
- 8,
- );
-}
-impl CUgraphNodeType_enum {
- ///< External semaphore wait node
- pub const CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT: CUgraphNodeType_enum = CUgraphNodeType_enum(
- 9,
- );
-}
-impl CUgraphNodeType_enum {
- ///< Memory Allocation Node
- pub const CU_GRAPH_NODE_TYPE_MEM_ALLOC: CUgraphNodeType_enum = CUgraphNodeType_enum(
- 10,
- );
-}
-impl CUgraphNodeType_enum {
- ///< Memory Free Node
- pub const CU_GRAPH_NODE_TYPE_MEM_FREE: CUgraphNodeType_enum = CUgraphNodeType_enum(
- 11,
- );
-}
-impl CUgraphNodeType_enum {
- ///< Batch MemOp Node
- pub const CU_GRAPH_NODE_TYPE_BATCH_MEM_OP: CUgraphNodeType_enum = CUgraphNodeType_enum(
- 12,
- );
-}
-impl CUgraphNodeType_enum {
- /**< Conditional Node
-
-May be used to implement a conditional execution path or loop
-inside of a graph. The graph(s) contained within the body of the conditional node
-can be selectively executed or iterated upon based on the value of a conditional
-variable.
-
-Handles must be created in advance of creating the node
-using ::cuGraphConditionalHandleCreate.
-
-The following restrictions apply to graphs which contain conditional nodes:
-The graph cannot be used in a child node.
-Only one instantiation of the graph may exist at any point in time.
-The graph cannot be cloned.
-
-To set the control value, supply a default value when creating the handle and/or
-call ::cudaGraphSetConditional from device code.*/
- pub const CU_GRAPH_NODE_TYPE_CONDITIONAL: CUgraphNodeType_enum = CUgraphNodeType_enum(
- 13,
- );
-}
-#[repr(transparent)]
-/// Graph node types
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgraphNodeType_enum(pub ::core::ffi::c_uint);
-/// Graph node types
-pub use self::CUgraphNodeType_enum as CUgraphNodeType;
-impl CUgraphDependencyType_enum {
- ///< This is an ordinary dependency.
- pub const CU_GRAPH_DEPENDENCY_TYPE_DEFAULT: CUgraphDependencyType_enum = CUgraphDependencyType_enum(
- 0,
- );
-}
-impl CUgraphDependencyType_enum {
- /**< This dependency type allows the downstream node to
-use \c cudaGridDependencySynchronize(). It may only be used
-between kernel nodes, and must be used with either the
-::CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC or
-::CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER outgoing port.*/
- pub const CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC: CUgraphDependencyType_enum = CUgraphDependencyType_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Type annotations that can be applied to graph edges as part of ::CUgraphEdgeData.
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgraphDependencyType_enum(pub ::core::ffi::c_uint);
-/// Type annotations that can be applied to graph edges as part of ::CUgraphEdgeData.
-pub use self::CUgraphDependencyType_enum as CUgraphDependencyType;
-/** Optional annotation for edges in a CUDA graph. Note, all edges implicitly have annotations and
- default to a zero-initialized value if not specified. A zero-initialized struct indicates a
- standard full serialization of two nodes with memory visibility.*/
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgraphEdgeData_st {
- /**< This indicates when the dependency is triggered from the upstream
-node on the edge. The meaning is specfic to the node type. A value
-of 0 in all cases means full completion of the upstream node, with
-memory visibility to the downstream node or portion thereof
-(indicated by \c to_port).
-<br>
-Only kernel nodes define non-zero ports. A kernel node
-can use the following output port types:
-::CU_GRAPH_KERNEL_NODE_PORT_DEFAULT, ::CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC,
-or ::CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER.*/
- pub from_port: ::core::ffi::c_uchar,
- /**< This indicates what portion of the downstream node is dependent on
-the upstream node or portion thereof (indicated by \c from_port). The
-meaning is specific to the node type. A value of 0 in all cases means
-the entirety of the downstream node is dependent on the upstream work.
-<br>
-Currently no node types define non-zero ports. Accordingly, this field
-must be set to zero.*/
- pub to_port: ::core::ffi::c_uchar,
- /**< This should be populated with a value from ::CUgraphDependencyType. (It
-is typed as char due to compiler-specific layout of bitfields.) See
-::CUgraphDependencyType.*/
- pub type_: ::core::ffi::c_uchar,
- /**< These bytes are unused and must be zeroed. This ensures
-compatibility if additional fields are added in the future.*/
- pub reserved: [::core::ffi::c_uchar; 5usize],
-}
-/** Optional annotation for edges in a CUDA graph. Note, all edges implicitly have annotations and
- default to a zero-initialized value if not specified. A zero-initialized struct indicates a
- standard full serialization of two nodes with memory visibility.*/
-pub type CUgraphEdgeData = CUgraphEdgeData_st;
-impl CUgraphInstantiateResult_enum {
- ///< Instantiation succeeded
- pub const CUDA_GRAPH_INSTANTIATE_SUCCESS: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum(
- 0,
- );
-}
-impl CUgraphInstantiateResult_enum {
- ///< Instantiation failed for an unexpected reason which is described in the return value of the function
- pub const CUDA_GRAPH_INSTANTIATE_ERROR: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum(
- 1,
- );
-}
-impl CUgraphInstantiateResult_enum {
- ///< Instantiation failed due to invalid structure, such as cycles
- pub const CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum(
- 2,
- );
-}
-impl CUgraphInstantiateResult_enum {
- ///< Instantiation for device launch failed because the graph contained an unsupported operation
- pub const CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum(
- 3,
- );
-}
-impl CUgraphInstantiateResult_enum {
- ///< Instantiation for device launch failed due to the nodes belonging to different contexts
- pub const CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum(
- 4,
- );
-}
-#[repr(transparent)]
-/// Graph instantiation results
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgraphInstantiateResult_enum(pub ::core::ffi::c_uint);
-/// Graph instantiation results
-pub use self::CUgraphInstantiateResult_enum as CUgraphInstantiateResult;
-/// Graph instantiation parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_GRAPH_INSTANTIATE_PARAMS_st {
- ///< Instantiation flags
- pub flags: cuuint64_t,
- ///< Upload stream
- pub hUploadStream: CUstream,
- ///< The node which caused instantiation to fail, if any
- pub hErrNode_out: CUgraphNode,
- ///< Whether instantiation was successful. If it failed, the reason why
- pub result_out: CUgraphInstantiateResult,
-}
-/// Graph instantiation parameters
-pub type CUDA_GRAPH_INSTANTIATE_PARAMS = CUDA_GRAPH_INSTANTIATE_PARAMS_st;
-impl CUsynchronizationPolicy_enum {
- pub const CU_SYNC_POLICY_AUTO: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(
- 1,
- );
-}
-impl CUsynchronizationPolicy_enum {
- pub const CU_SYNC_POLICY_SPIN: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(
- 2,
- );
-}
-impl CUsynchronizationPolicy_enum {
- pub const CU_SYNC_POLICY_YIELD: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(
- 3,
- );
-}
-impl CUsynchronizationPolicy_enum {
- pub const CU_SYNC_POLICY_BLOCKING_SYNC: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(
- 4,
- );
-}
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUsynchronizationPolicy_enum(pub ::core::ffi::c_uint);
-pub use self::CUsynchronizationPolicy_enum as CUsynchronizationPolicy;
-impl CUclusterSchedulingPolicy_enum {
- ///< the default policy
- pub const CU_CLUSTER_SCHEDULING_POLICY_DEFAULT: CUclusterSchedulingPolicy_enum = CUclusterSchedulingPolicy_enum(
- 0,
- );
-}
-impl CUclusterSchedulingPolicy_enum {
- ///< spread the blocks within a cluster to the SMs
- pub const CU_CLUSTER_SCHEDULING_POLICY_SPREAD: CUclusterSchedulingPolicy_enum = CUclusterSchedulingPolicy_enum(
- 1,
- );
-}
-impl CUclusterSchedulingPolicy_enum {
- ///< allow the hardware to load-balance the blocks in a cluster to the SMs
- pub const CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING: CUclusterSchedulingPolicy_enum = CUclusterSchedulingPolicy_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/// Cluster scheduling policies. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUclusterSchedulingPolicy_enum(pub ::core::ffi::c_uint);
-/// Cluster scheduling policies. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute
-pub use self::CUclusterSchedulingPolicy_enum as CUclusterSchedulingPolicy;
-impl CUlaunchMemSyncDomain_enum {
- ///< Launch kernels in the default domain
- pub const CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT: CUlaunchMemSyncDomain_enum = CUlaunchMemSyncDomain_enum(
- 0,
- );
-}
-impl CUlaunchMemSyncDomain_enum {
- ///< Launch kernels in the remote domain
- pub const CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE: CUlaunchMemSyncDomain_enum = CUlaunchMemSyncDomain_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/** Memory Synchronization Domain
-
- A kernel can be launched in a specified memory synchronization domain that affects all memory operations issued by
- that kernel. A memory barrier issued in one domain will only order memory operations in that domain, thus eliminating
- latency increase from memory barriers ordering unrelated traffic.
-
- By default, kernels are launched in domain 0. Kernel launched with ::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a
- different domain ID. User may also alter the domain ID with ::CUlaunchMemSyncDomainMap for a specific stream /
- graph node / kernel launch. See ::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN, ::cuStreamSetAttribute, ::cuLaunchKernelEx,
- ::cuGraphKernelNodeSetAttribute.
-
- Memory operations done in kernels launched in different domains are considered system-scope distanced. In other
- words, a GPU scoped memory synchronization is not sufficient for memory order to be observed by kernels in another
- memory synchronization domain even if they are on the same GPU.*/
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUlaunchMemSyncDomain_enum(pub ::core::ffi::c_uint);
-/** Memory Synchronization Domain
-
- A kernel can be launched in a specified memory synchronization domain that affects all memory operations issued by
- that kernel. A memory barrier issued in one domain will only order memory operations in that domain, thus eliminating
- latency increase from memory barriers ordering unrelated traffic.
-
- By default, kernels are launched in domain 0. Kernel launched with ::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a
- different domain ID. User may also alter the domain ID with ::CUlaunchMemSyncDomainMap for a specific stream /
- graph node / kernel launch. See ::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN, ::cuStreamSetAttribute, ::cuLaunchKernelEx,
- ::cuGraphKernelNodeSetAttribute.
-
- Memory operations done in kernels launched in different domains are considered system-scope distanced. In other
- words, a GPU scoped memory synchronization is not sufficient for memory order to be observed by kernels in another
- memory synchronization domain even if they are on the same GPU.*/
-pub use self::CUlaunchMemSyncDomain_enum as CUlaunchMemSyncDomain;
-/** Memory Synchronization Domain map
-
- See ::cudaLaunchMemSyncDomain.
-
- By default, kernels are launched in domain 0. Kernel launched with ::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a
- different domain ID. User may also alter the domain ID with ::CUlaunchMemSyncDomainMap for a specific stream /
- graph node / kernel launch. See ::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP.
-
- Domain ID range is available through ::CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT.*/
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUlaunchMemSyncDomainMap_st {
- ///< The default domain ID to use for designated kernels
- pub default_: ::core::ffi::c_uchar,
- ///< The remote domain ID to use for designated kernels
- pub remote: ::core::ffi::c_uchar,
-}
-/** Memory Synchronization Domain map
-
- See ::cudaLaunchMemSyncDomain.
-
- By default, kernels are launched in domain 0. Kernel launched with ::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a
- different domain ID. User may also alter the domain ID with ::CUlaunchMemSyncDomainMap for a specific stream /
- graph node / kernel launch. See ::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP.
-
- Domain ID range is available through ::CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT.*/
-pub type CUlaunchMemSyncDomainMap = CUlaunchMemSyncDomainMap_st;
-impl CUlaunchAttributeID_enum {
- ///< Ignored entry, for convenient composition
- pub const CU_LAUNCH_ATTRIBUTE_IGNORE: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 0,
- );
-}
-impl CUlaunchAttributeID_enum {
- /**< Valid for streams, graph nodes, launches. See
-::CUlaunchAttributeValue::accessPolicyWindow.*/
- pub const CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 1,
- );
-}
-impl CUlaunchAttributeID_enum {
- /**< Valid for graph nodes, launches. See
-::CUlaunchAttributeValue::cooperative.*/
- pub const CU_LAUNCH_ATTRIBUTE_COOPERATIVE: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 2,
- );
-}
-impl CUlaunchAttributeID_enum {
- /**< Valid for streams. See
-::CUlaunchAttributeValue::syncPolicy.*/
- pub const CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 3,
- );
-}
-impl CUlaunchAttributeID_enum {
- ///< Valid for graph nodes, launches. See ::CUlaunchAttributeValue::clusterDim.
- pub const CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 4,
- );
-}
-impl CUlaunchAttributeID_enum {
- ///< Valid for graph nodes, launches. See ::CUlaunchAttributeValue::clusterSchedulingPolicyPreference.
- pub const CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 5,
- );
-}
-impl CUlaunchAttributeID_enum {
- /**< Valid for launches. Setting
-::CUlaunchAttributeValue::programmaticStreamSerializationAllowed
-to non-0 signals that the kernel will use programmatic
-means to resolve its stream dependency, so that the
-CUDA runtime should opportunistically allow the grid's
-execution to overlap with the previous kernel in the
-stream, if that kernel requests the overlap. The
-dependent launches can choose to wait on the
-dependency using the programmatic sync
-(cudaGridDependencySynchronize() or equivalent PTX
-instructions).*/
- pub const CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 6,
- );
-}
-impl CUlaunchAttributeID_enum {
- /**< Valid for launches. Set
-::CUlaunchAttributeValue::programmaticEvent to
-record the event. Event recorded through this
-launch attribute is guaranteed to only trigger
-after all block in the associated kernel trigger
-the event. A block can trigger the event through
-PTX launchdep.release or CUDA builtin function
-cudaTriggerProgrammaticLaunchCompletion(). A
-trigger can also be inserted at the beginning of
-each block's execution if triggerAtBlockStart is
-set to non-0. The dependent launches can choose to
-wait on the dependency using the programmatic sync
-(cudaGridDependencySynchronize() or equivalent PTX
-instructions). Note that dependents (including the
-CPU thread calling cuEventSynchronize()) are not
-guaranteed to observe the release precisely when
-it is released. For example, cuEventSynchronize()
-may only observe the event trigger long after the
-associated kernel has completed. This recording
-type is primarily meant for establishing
-programmatic dependency between device tasks. Note
-also this type of dependency allows, but does not
-guarantee, concurrent execution of tasks.
-<br>
-The event supplied must not be an interprocess or
-interop event. The event must disable timing (i.e.
-must be created with the ::CU_EVENT_DISABLE_TIMING
-flag set).*/
- pub const CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 7,
- );
-}
-impl CUlaunchAttributeID_enum {
- /**< Valid for streams, graph nodes, launches. See
-::CUlaunchAttributeValue::priority.*/
- pub const CU_LAUNCH_ATTRIBUTE_PRIORITY: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 8,
- );
-}
-impl CUlaunchAttributeID_enum {
- /**< Valid for streams, graph nodes, launches. See
-::CUlaunchAttributeValue::memSyncDomainMap.*/
- pub const CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 9,
- );
-}
-impl CUlaunchAttributeID_enum {
- /**< Valid for streams, graph nodes, launches. See
-::CUlaunchAttributeValue::memSyncDomain.*/
- pub const CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 10,
- );
-}
-impl CUlaunchAttributeID_enum {
- /**< Valid for launches. Set
-::CUlaunchAttributeValue::launchCompletionEvent to record the
-event.
-<br>
-Nominally, the event is triggered once all blocks of the kernel
-have begun execution. Currently this is a best effort. If a kernel
-B has a launch completion dependency on a kernel A, B may wait
-until A is complete. Alternatively, blocks of B may begin before
-all blocks of A have begun, for example if B can claim execution
-resources unavailable to A (e.g. they run on different GPUs) or
-if B is a higher priority than A.
-Exercise caution if such an ordering inversion could lead
-to deadlock.
-<br>
-A launch completion event is nominally similar to a programmatic
-event with \c triggerAtBlockStart set except that it is not
-visible to \c cudaGridDependencySynchronize() and can be used with
-compute capability less than 9.0.
-<br>
-The event supplied must not be an interprocess or interop
-event. The event must disable timing (i.e. must be created
-with the ::CU_EVENT_DISABLE_TIMING flag set).*/
- pub const CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 12,
- );
-}
-impl CUlaunchAttributeID_enum {
- /**< Valid for graph nodes, launches. This attribute is graphs-only,
-and passing it to a launch in a non-capturing stream will result
-in an error.
-<br>
-::CUlaunchAttributeValue::deviceUpdatableKernelNode::deviceUpdatable can
-only be set to 0 or 1. Setting the field to 1 indicates that the
-corresponding kernel node should be device-updatable. On success, a handle
-will be returned via
-::CUlaunchAttributeValue::deviceUpdatableKernelNode::devNode which can be
-passed to the various device-side update functions to update the node's
-kernel parameters from within another kernel. For more information on the
-types of device updates that can be made, as well as the relevant limitations
-thereof, see ::cudaGraphKernelNodeUpdatesApply.
-<br>
-Nodes which are device-updatable have additional restrictions compared to
-regular kernel nodes. Firstly, device-updatable nodes cannot be removed
-from their graph via ::cuGraphDestroyNode. Additionally, once opted-in
-to this functionality, a node cannot opt out, and any attempt to set the
-deviceUpdatable attribute to 0 will result in an error. Device-updatable
-kernel nodes also cannot have their attributes copied to/from another kernel
-node via ::cuGraphKernelNodeCopyAttributes. Graphs containing one or more
-device-updatable nodes also do not allow multiple instantiation, and neither
-the graph nor its instantiated version can be passed to ::cuGraphExecUpdate.
-<br>
-If a graph contains device-updatable nodes and updates those nodes from the device
-from within the graph, the graph must be uploaded with ::cuGraphUpload before it
-is launched. For such a graph, if host-side executable graph updates are made to the
-device-updatable nodes, the graph must be uploaded before it is launched again.*/
- pub const CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 13,
- );
-}
-impl CUlaunchAttributeID_enum {
- pub const CU_LAUNCH_ATTRIBUTE_MAX: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum(
- 14,
- );
-}
-#[repr(transparent)]
-/// Launch attributes enum; used as id field of ::CUlaunchAttribute
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUlaunchAttributeID_enum(pub ::core::ffi::c_uint);
-/// Launch attributes enum; used as id field of ::CUlaunchAttribute
-pub use self::CUlaunchAttributeID_enum as CUlaunchAttributeID;
-/// Launch attributes union; used as value field of ::CUlaunchAttribute
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUlaunchAttributeValue_union {
- pub pad: [::core::ffi::c_char; 64usize],
- ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.
- pub accessPolicyWindow: CUaccessPolicyWindow,
- /**< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero indicates a cooperative
-kernel (see ::cuLaunchCooperativeKernel).*/
- pub cooperative: ::core::ffi::c_int,
- /**< Value of launch attribute
-::CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. ::CUsynchronizationPolicy for
-work queued up in this stream*/
- pub syncPolicy: CUsynchronizationPolicy,
- pub clusterDim: CUlaunchAttributeValue_union__bindgen_ty_1,
- /**< Value of launch attribute
-::CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster
-scheduling policy preference for the kernel.*/
- pub clusterSchedulingPolicyPreference: CUclusterSchedulingPolicy,
- /**< Value of launch attribute
-::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.*/
- pub programmaticStreamSerializationAllowed: ::core::ffi::c_int,
- ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT.
- pub programmaticEvent: CUlaunchAttributeValue_union__bindgen_ty_2,
- ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT.
- pub launchCompletionEvent: CUlaunchAttributeValue_union__bindgen_ty_3,
- ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution priority of the kernel.
- pub priority: ::core::ffi::c_int,
- /**< Value of launch attribute
-::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. See
-::CUlaunchMemSyncDomainMap.*/
- pub memSyncDomainMap: CUlaunchMemSyncDomainMap,
- /**< Value of launch attribute
-::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. See::CUlaunchMemSyncDomain*/
- pub memSyncDomain: CUlaunchMemSyncDomain,
- ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE.
- pub deviceUpdatableKernelNode: CUlaunchAttributeValue_union__bindgen_ty_4,
-}
-/** Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION that
- represents the desired cluster dimensions for the kernel. Opaque type
- with the following fields:
- - \p x - The X dimension of the cluster, in blocks. Must be a divisor
- of the grid X dimension.
- - \p y - The Y dimension of the cluster, in blocks. Must be a divisor
- of the grid Y dimension.
- - \p z - The Z dimension of the cluster, in blocks. Must be a divisor
- of the grid Z dimension.*/
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUlaunchAttributeValue_union__bindgen_ty_1 {
- pub x: ::core::ffi::c_uint,
- pub y: ::core::ffi::c_uint,
- pub z: ::core::ffi::c_uint,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUlaunchAttributeValue_union__bindgen_ty_2 {
- ///< Event to fire when all blocks trigger it
- pub event: CUevent,
- /**< Event record flags, see ::cuEventRecordWithFlags. Does not accept
-::CU_EVENT_RECORD_EXTERNAL.*/
- pub flags: ::core::ffi::c_int,
- ///< If this is set to non-0, each block launch will automatically trigger the event
- pub triggerAtBlockStart: ::core::ffi::c_int,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUlaunchAttributeValue_union__bindgen_ty_3 {
- ///< Event to fire when the last block launches
- pub event: CUevent,
- ///< Event record flags, see ::cuEventRecordWithFlags. Does not accept ::CU_EVENT_RECORD_EXTERNAL.
- pub flags: ::core::ffi::c_int,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUlaunchAttributeValue_union__bindgen_ty_4 {
- ///< Whether or not the resulting kernel node should be device-updatable.
- pub deviceUpdatable: ::core::ffi::c_int,
- ///< Returns a handle to pass to the various device-side update functions.
- pub devNode: CUgraphDeviceNode,
-}
-/// Launch attributes union; used as value field of ::CUlaunchAttribute
-pub type CUlaunchAttributeValue = CUlaunchAttributeValue_union;
-/// Launch attribute
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUlaunchAttribute_st {
- ///< Attribute to set
- pub id: CUlaunchAttributeID,
- pub pad: [::core::ffi::c_char; 4usize],
- ///< Value of the attribute
- pub value: CUlaunchAttributeValue,
-}
-/// Launch attribute
-pub type CUlaunchAttribute = CUlaunchAttribute_st;
-/// CUDA extensible launch configuration
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUlaunchConfig_st {
- ///< Width of grid in blocks
- pub gridDimX: ::core::ffi::c_uint,
- ///< Height of grid in blocks
- pub gridDimY: ::core::ffi::c_uint,
- ///< Depth of grid in blocks
- pub gridDimZ: ::core::ffi::c_uint,
- ///< X dimension of each thread block
- pub blockDimX: ::core::ffi::c_uint,
- ///< Y dimension of each thread block
- pub blockDimY: ::core::ffi::c_uint,
- ///< Z dimension of each thread block
- pub blockDimZ: ::core::ffi::c_uint,
- ///< Dynamic shared-memory size per thread block in bytes
- pub sharedMemBytes: ::core::ffi::c_uint,
- ///< Stream identifier
- pub hStream: CUstream,
- ///< List of attributes; nullable if ::CUlaunchConfig::numAttrs == 0
- pub attrs: *mut CUlaunchAttribute,
- ///< Number of attributes populated in ::CUlaunchConfig::attrs
- pub numAttrs: ::core::ffi::c_uint,
-}
-/// CUDA extensible launch configuration
-pub type CUlaunchConfig = CUlaunchConfig_st;
-/// Launch attributes enum; used as id field of ::CUlaunchAttribute
-pub use self::CUlaunchAttributeID as CUkernelNodeAttrID;
-/// Launch attributes union; used as value field of ::CUlaunchAttribute
-pub type CUkernelNodeAttrValue_v1 = CUlaunchAttributeValue;
-/// Launch attributes union; used as value field of ::CUlaunchAttribute
-pub type CUkernelNodeAttrValue = CUkernelNodeAttrValue_v1;
-impl CUstreamCaptureStatus_enum {
- ///< Stream is not capturing
- pub const CU_STREAM_CAPTURE_STATUS_NONE: CUstreamCaptureStatus_enum = CUstreamCaptureStatus_enum(
- 0,
- );
-}
-impl CUstreamCaptureStatus_enum {
- ///< Stream is actively capturing
- pub const CU_STREAM_CAPTURE_STATUS_ACTIVE: CUstreamCaptureStatus_enum = CUstreamCaptureStatus_enum(
- 1,
- );
-}
-impl CUstreamCaptureStatus_enum {
- /**< Stream is part of a capture sequence that
-has been invalidated, but not terminated*/
- pub const CU_STREAM_CAPTURE_STATUS_INVALIDATED: CUstreamCaptureStatus_enum = CUstreamCaptureStatus_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/// Possible stream capture statuses returned by ::cuStreamIsCapturing
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUstreamCaptureStatus_enum(pub ::core::ffi::c_uint);
-/// Possible stream capture statuses returned by ::cuStreamIsCapturing
-pub use self::CUstreamCaptureStatus_enum as CUstreamCaptureStatus;
-impl CUstreamCaptureMode_enum {
- pub const CU_STREAM_CAPTURE_MODE_GLOBAL: CUstreamCaptureMode_enum = CUstreamCaptureMode_enum(
- 0,
- );
-}
-impl CUstreamCaptureMode_enum {
- pub const CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: CUstreamCaptureMode_enum = CUstreamCaptureMode_enum(
- 1,
- );
-}
-impl CUstreamCaptureMode_enum {
- pub const CU_STREAM_CAPTURE_MODE_RELAXED: CUstreamCaptureMode_enum = CUstreamCaptureMode_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/** Possible modes for stream capture thread interactions. For more details see
- ::cuStreamBeginCapture and ::cuThreadExchangeStreamCaptureMode*/
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUstreamCaptureMode_enum(pub ::core::ffi::c_uint);
-/// Launch attributes enum; used as id field of ::CUlaunchAttribute
-pub use self::CUlaunchAttributeID as CUstreamAttrID;
-/** Possible modes for stream capture thread interactions. For more details see
- ::cuStreamBeginCapture and ::cuThreadExchangeStreamCaptureMode*/
-pub use self::CUstreamCaptureMode_enum as CUstreamCaptureMode;
-/// Launch attributes union; used as value field of ::CUlaunchAttribute
-pub type CUstreamAttrValue_v1 = CUlaunchAttributeValue;
-/// Launch attributes union; used as value field of ::CUlaunchAttribute
-pub type CUstreamAttrValue = CUstreamAttrValue_v1;
-impl CUdriverProcAddress_flags_enum {
- ///< Default search mode for driver symbols.
- pub const CU_GET_PROC_ADDRESS_DEFAULT: CUdriverProcAddress_flags_enum = CUdriverProcAddress_flags_enum(
- 0,
- );
-}
-impl CUdriverProcAddress_flags_enum {
- ///< Search for legacy versions of driver symbols.
- pub const CU_GET_PROC_ADDRESS_LEGACY_STREAM: CUdriverProcAddress_flags_enum = CUdriverProcAddress_flags_enum(
- 1,
- );
-}
-impl CUdriverProcAddress_flags_enum {
- ///< Search for per-thread versions of driver symbols.
- pub const CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM: CUdriverProcAddress_flags_enum = CUdriverProcAddress_flags_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/// Flags to specify search options. For more details see ::cuGetProcAddress
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUdriverProcAddress_flags_enum(pub ::core::ffi::c_uint);
-/// Flags to specify search options. For more details see ::cuGetProcAddress
-pub use self::CUdriverProcAddress_flags_enum as CUdriverProcAddress_flags;
-impl CUdriverProcAddressQueryResult_enum {
- ///< Symbol was succesfully found
- pub const CU_GET_PROC_ADDRESS_SUCCESS: CUdriverProcAddressQueryResult_enum = CUdriverProcAddressQueryResult_enum(
- 0,
- );
-}
-impl CUdriverProcAddressQueryResult_enum {
- ///< Symbol was not found in search
- pub const CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND: CUdriverProcAddressQueryResult_enum = CUdriverProcAddressQueryResult_enum(
- 1,
- );
-}
-impl CUdriverProcAddressQueryResult_enum {
- ///< Symbol was found but version supplied was not sufficient
- pub const CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT: CUdriverProcAddressQueryResult_enum = CUdriverProcAddressQueryResult_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/// Flags to indicate search status. For more details see ::cuGetProcAddress
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUdriverProcAddressQueryResult_enum(pub ::core::ffi::c_uint);
-/// Flags to indicate search status. For more details see ::cuGetProcAddress
-pub use self::CUdriverProcAddressQueryResult_enum as CUdriverProcAddressQueryResult;
-impl CUexecAffinityType_enum {
- ///< Create a context with limited SMs.
- pub const CU_EXEC_AFFINITY_TYPE_SM_COUNT: CUexecAffinityType_enum = CUexecAffinityType_enum(
- 0,
- );
-}
-impl CUexecAffinityType_enum {
- pub const CU_EXEC_AFFINITY_TYPE_MAX: CUexecAffinityType_enum = CUexecAffinityType_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Execution Affinity Types
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUexecAffinityType_enum(pub ::core::ffi::c_uint);
-/// Execution Affinity Types
-pub use self::CUexecAffinityType_enum as CUexecAffinityType;
-/// Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUexecAffinitySmCount_st {
- ///< The number of SMs the context is limited to use.
- pub val: ::core::ffi::c_uint,
-}
-/// Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT
-pub type CUexecAffinitySmCount_v1 = CUexecAffinitySmCount_st;
-/// Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT
-pub type CUexecAffinitySmCount = CUexecAffinitySmCount_v1;
-/// Execution Affinity Parameters
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUexecAffinityParam_st {
- pub type_: CUexecAffinityType,
- pub param: CUexecAffinityParam_st__bindgen_ty_1,
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUexecAffinityParam_st__bindgen_ty_1 {
- pub smCount: CUexecAffinitySmCount,
-}
-/// Execution Affinity Parameters
-pub type CUexecAffinityParam_v1 = CUexecAffinityParam_st;
-/// Execution Affinity Parameters
-pub type CUexecAffinityParam = CUexecAffinityParam_v1;
-impl CUlibraryOption_enum {
- pub const CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE: CUlibraryOption_enum = CUlibraryOption_enum(
- 0,
- );
-}
-impl CUlibraryOption_enum {
- /** Specifes that the argument \p code passed to ::cuLibraryLoadData() will be preserved.
- Specifying this option will let the driver know that \p code can be accessed at any point
- until ::cuLibraryUnload(). The default behavior is for the driver to allocate and
- maintain its own copy of \p code. Note that this is only a memory usage optimization
- hint and the driver can choose to ignore it if required.
- Specifying this option with ::cuLibraryLoadFromFile() is invalid and
- will return ::CUDA_ERROR_INVALID_VALUE.*/
- pub const CU_LIBRARY_BINARY_IS_PRESERVED: CUlibraryOption_enum = CUlibraryOption_enum(
- 1,
- );
-}
-impl CUlibraryOption_enum {
- /** Specifes that the argument \p code passed to ::cuLibraryLoadData() will be preserved.
- Specifying this option will let the driver know that \p code can be accessed at any point
- until ::cuLibraryUnload(). The default behavior is for the driver to allocate and
- maintain its own copy of \p code. Note that this is only a memory usage optimization
- hint and the driver can choose to ignore it if required.
- Specifying this option with ::cuLibraryLoadFromFile() is invalid and
- will return ::CUDA_ERROR_INVALID_VALUE.*/
- pub const CU_LIBRARY_NUM_OPTIONS: CUlibraryOption_enum = CUlibraryOption_enum(2);
-}
-#[repr(transparent)]
-/// Library options to be specified with ::cuLibraryLoadData() or ::cuLibraryLoadFromFile()
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUlibraryOption_enum(pub ::core::ffi::c_uint);
-/// Library options to be specified with ::cuLibraryLoadData() or ::cuLibraryLoadFromFile()
-pub use self::CUlibraryOption_enum as CUlibraryOption;
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUlibraryHostUniversalFunctionAndDataTable_st {
- pub functionTable: *mut ::core::ffi::c_void,
- pub functionWindowSize: usize,
- pub dataTable: *mut ::core::ffi::c_void,
- pub dataWindowSize: usize,
-}
-pub type CUlibraryHostUniversalFunctionAndDataTable = CUlibraryHostUniversalFunctionAndDataTable_st;
-/// Error codes
-#[must_use]
-pub type cudaError_enum = ::core::ffi::c_uint;
-impl CUdevice_P2PAttribute_enum {
- ///< A relative value indicating the performance of the link between two devices
- pub const CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum(
- 1,
- );
-}
-impl CUdevice_P2PAttribute_enum {
- ///< P2P Access is enable
- pub const CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum(
- 2,
- );
-}
-impl CUdevice_P2PAttribute_enum {
- ///< Atomic operation over the link supported
- pub const CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum(
- 3,
- );
-}
-impl CUdevice_P2PAttribute_enum {
- ///< \deprecated use CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED instead
- pub const CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum(
- 4,
- );
-}
-impl CUdevice_P2PAttribute_enum {
- ///< Accessing CUDA arrays over the link supported
- pub const CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum(
- 4,
- );
-}
-#[repr(transparent)]
-/// P2P Attributes
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUdevice_P2PAttribute_enum(pub ::core::ffi::c_uint);
-/// P2P Attributes
-pub use self::CUdevice_P2PAttribute_enum as CUdevice_P2PAttribute;
-/** CUDA stream callback
- \param hStream The stream the callback was added to, as passed to ::cuStreamAddCallback. May be NULL.
- \param status ::CUDA_SUCCESS or any persistent error on the stream.
- \param userData User parameter provided at registration.*/
-pub type CUstreamCallback = ::core::option::Option<
- unsafe extern "system" fn(
- hStream: CUstream,
- status: CUresult,
- userData: *mut ::core::ffi::c_void,
- ),
->;
-/** Block size to per-block dynamic shared memory mapping for a certain
- kernel \param blockSize Block size of the kernel.
-
- \return The dynamic shared memory needed by a block.*/
-pub type CUoccupancyB2DSize = ::core::option::Option<
- unsafe extern "system" fn(blockSize: ::core::ffi::c_int) -> usize,
->;
-/// 2D memory copy parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_MEMCPY2D_st {
- ///< Source X in bytes
- pub srcXInBytes: usize,
- ///< Source Y
- pub srcY: usize,
- ///< Source memory type (host, device, array)
- pub srcMemoryType: CUmemorytype,
- ///< Source host pointer
- pub srcHost: *const ::core::ffi::c_void,
- ///< Source device pointer
- pub srcDevice: CUdeviceptr,
- ///< Source array reference
- pub srcArray: CUarray,
- ///< Source pitch (ignored when src is array)
- pub srcPitch: usize,
- ///< Destination X in bytes
- pub dstXInBytes: usize,
- ///< Destination Y
- pub dstY: usize,
- ///< Destination memory type (host, device, array)
- pub dstMemoryType: CUmemorytype,
- ///< Destination host pointer
- pub dstHost: *mut ::core::ffi::c_void,
- ///< Destination device pointer
- pub dstDevice: CUdeviceptr,
- ///< Destination array reference
- pub dstArray: CUarray,
- ///< Destination pitch (ignored when dst is array)
- pub dstPitch: usize,
- ///< Width of 2D memory copy in bytes
- pub WidthInBytes: usize,
- ///< Height of 2D memory copy
- pub Height: usize,
-}
-/// 2D memory copy parameters
-pub type CUDA_MEMCPY2D_v2 = CUDA_MEMCPY2D_st;
-/// 2D memory copy parameters
-pub type CUDA_MEMCPY2D = CUDA_MEMCPY2D_v2;
-/// 3D memory copy parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_MEMCPY3D_st {
- ///< Source X in bytes
- pub srcXInBytes: usize,
- ///< Source Y
- pub srcY: usize,
- ///< Source Z
- pub srcZ: usize,
- ///< Source LOD
- pub srcLOD: usize,
- ///< Source memory type (host, device, array)
- pub srcMemoryType: CUmemorytype,
- ///< Source host pointer
- pub srcHost: *const ::core::ffi::c_void,
- ///< Source device pointer
- pub srcDevice: CUdeviceptr,
- ///< Source array reference
- pub srcArray: CUarray,
- ///< Must be NULL
- pub reserved0: *mut ::core::ffi::c_void,
- ///< Source pitch (ignored when src is array)
- pub srcPitch: usize,
- ///< Source height (ignored when src is array; may be 0 if Depth==1)
- pub srcHeight: usize,
- ///< Destination X in bytes
- pub dstXInBytes: usize,
- ///< Destination Y
- pub dstY: usize,
- ///< Destination Z
- pub dstZ: usize,
- ///< Destination LOD
- pub dstLOD: usize,
- ///< Destination memory type (host, device, array)
- pub dstMemoryType: CUmemorytype,
- ///< Destination host pointer
- pub dstHost: *mut ::core::ffi::c_void,
- ///< Destination device pointer
- pub dstDevice: CUdeviceptr,
- ///< Destination array reference
- pub dstArray: CUarray,
- ///< Must be NULL
- pub reserved1: *mut ::core::ffi::c_void,
- ///< Destination pitch (ignored when dst is array)
- pub dstPitch: usize,
- ///< Destination height (ignored when dst is array; may be 0 if Depth==1)
- pub dstHeight: usize,
- ///< Width of 3D memory copy in bytes
- pub WidthInBytes: usize,
- ///< Height of 3D memory copy
- pub Height: usize,
- ///< Depth of 3D memory copy
- pub Depth: usize,
-}
-/// 3D memory copy parameters
-pub type CUDA_MEMCPY3D_v2 = CUDA_MEMCPY3D_st;
-/// 3D memory copy parameters
-pub type CUDA_MEMCPY3D = CUDA_MEMCPY3D_v2;
-/// 3D memory cross-context copy parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_MEMCPY3D_PEER_st {
- ///< Source X in bytes
- pub srcXInBytes: usize,
- ///< Source Y
- pub srcY: usize,
- ///< Source Z
- pub srcZ: usize,
- ///< Source LOD
- pub srcLOD: usize,
- ///< Source memory type (host, device, array)
- pub srcMemoryType: CUmemorytype,
- ///< Source host pointer
- pub srcHost: *const ::core::ffi::c_void,
- ///< Source device pointer
- pub srcDevice: CUdeviceptr,
- ///< Source array reference
- pub srcArray: CUarray,
- ///< Source context (ignored with srcMemoryType is ::CU_MEMORYTYPE_ARRAY)
- pub srcContext: CUcontext,
- ///< Source pitch (ignored when src is array)
- pub srcPitch: usize,
- ///< Source height (ignored when src is array; may be 0 if Depth==1)
- pub srcHeight: usize,
- ///< Destination X in bytes
- pub dstXInBytes: usize,
- ///< Destination Y
- pub dstY: usize,
- ///< Destination Z
- pub dstZ: usize,
- ///< Destination LOD
- pub dstLOD: usize,
- ///< Destination memory type (host, device, array)
- pub dstMemoryType: CUmemorytype,
- ///< Destination host pointer
- pub dstHost: *mut ::core::ffi::c_void,
- ///< Destination device pointer
- pub dstDevice: CUdeviceptr,
- ///< Destination array reference
- pub dstArray: CUarray,
- ///< Destination context (ignored with dstMemoryType is ::CU_MEMORYTYPE_ARRAY)
- pub dstContext: CUcontext,
- ///< Destination pitch (ignored when dst is array)
- pub dstPitch: usize,
- ///< Destination height (ignored when dst is array; may be 0 if Depth==1)
- pub dstHeight: usize,
- ///< Width of 3D memory copy in bytes
- pub WidthInBytes: usize,
- ///< Height of 3D memory copy
- pub Height: usize,
- ///< Depth of 3D memory copy
- pub Depth: usize,
-}
-/// 3D memory cross-context copy parameters
-pub type CUDA_MEMCPY3D_PEER_v1 = CUDA_MEMCPY3D_PEER_st;
-/// 3D memory cross-context copy parameters
-pub type CUDA_MEMCPY3D_PEER = CUDA_MEMCPY3D_PEER_v1;
-/// Memcpy node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_MEMCPY_NODE_PARAMS_st {
- ///< Must be zero
- pub flags: ::core::ffi::c_int,
- ///< Must be zero
- pub reserved: ::core::ffi::c_int,
- ///< Context on which to run the node
- pub copyCtx: CUcontext,
- ///< Parameters for the memory copy
- pub copyParams: CUDA_MEMCPY3D,
-}
-/// Memcpy node parameters
-pub type CUDA_MEMCPY_NODE_PARAMS = CUDA_MEMCPY_NODE_PARAMS_st;
-/// Array descriptor
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_ARRAY_DESCRIPTOR_st {
- ///< Width of array
- pub Width: usize,
- ///< Height of array
- pub Height: usize,
- ///< Array format
- pub Format: CUarray_format,
- ///< Channels per array element
- pub NumChannels: ::core::ffi::c_uint,
-}
-/// Array descriptor
-pub type CUDA_ARRAY_DESCRIPTOR_v2 = CUDA_ARRAY_DESCRIPTOR_st;
-/// Array descriptor
-pub type CUDA_ARRAY_DESCRIPTOR = CUDA_ARRAY_DESCRIPTOR_v2;
-/// 3D array descriptor
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_ARRAY3D_DESCRIPTOR_st {
- ///< Width of 3D array
- pub Width: usize,
- ///< Height of 3D array
- pub Height: usize,
- ///< Depth of 3D array
- pub Depth: usize,
- ///< Array format
- pub Format: CUarray_format,
- ///< Channels per array element
- pub NumChannels: ::core::ffi::c_uint,
- ///< Flags
- pub Flags: ::core::ffi::c_uint,
-}
-/// 3D array descriptor
-pub type CUDA_ARRAY3D_DESCRIPTOR_v2 = CUDA_ARRAY3D_DESCRIPTOR_st;
-/// 3D array descriptor
-pub type CUDA_ARRAY3D_DESCRIPTOR = CUDA_ARRAY3D_DESCRIPTOR_v2;
-/// CUDA array sparse properties
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_ARRAY_SPARSE_PROPERTIES_st {
- pub tileExtent: CUDA_ARRAY_SPARSE_PROPERTIES_st__bindgen_ty_1,
- /// First mip level at which the mip tail begins.
- pub miptailFirstLevel: ::core::ffi::c_uint,
- /// Total size of the mip tail.
- pub miptailSize: ::core::ffi::c_ulonglong,
- /// Flags will either be zero or ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL
- pub flags: ::core::ffi::c_uint,
- pub reserved: [::core::ffi::c_uint; 4usize],
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_ARRAY_SPARSE_PROPERTIES_st__bindgen_ty_1 {
- ///< Width of sparse tile in elements
- pub width: ::core::ffi::c_uint,
- ///< Height of sparse tile in elements
- pub height: ::core::ffi::c_uint,
- ///< Depth of sparse tile in elements
- pub depth: ::core::ffi::c_uint,
-}
-/// CUDA array sparse properties
-pub type CUDA_ARRAY_SPARSE_PROPERTIES_v1 = CUDA_ARRAY_SPARSE_PROPERTIES_st;
-/// CUDA array sparse properties
-pub type CUDA_ARRAY_SPARSE_PROPERTIES = CUDA_ARRAY_SPARSE_PROPERTIES_v1;
-/// CUDA array memory requirements
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_ARRAY_MEMORY_REQUIREMENTS_st {
- ///< Total required memory size
- pub size: usize,
- ///< alignment requirement
- pub alignment: usize,
- pub reserved: [::core::ffi::c_uint; 4usize],
-}
-/// CUDA array memory requirements
-pub type CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 = CUDA_ARRAY_MEMORY_REQUIREMENTS_st;
-/// CUDA array memory requirements
-pub type CUDA_ARRAY_MEMORY_REQUIREMENTS = CUDA_ARRAY_MEMORY_REQUIREMENTS_v1;
-/// CUDA Resource descriptor
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUDA_RESOURCE_DESC_st {
- ///< Resource type
- pub resType: CUresourcetype,
- pub res: CUDA_RESOURCE_DESC_st__bindgen_ty_1,
- ///< Flags (must be zero)
- pub flags: ::core::ffi::c_uint,
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUDA_RESOURCE_DESC_st__bindgen_ty_1 {
- pub array: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1,
- pub mipmap: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2,
- pub linear: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3,
- pub pitch2D: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4,
- pub reserved: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
- ///< CUDA array
- pub hArray: CUarray,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2 {
- ///< CUDA mipmapped array
- pub hMipmappedArray: CUmipmappedArray,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3 {
- ///< Device pointer
- pub devPtr: CUdeviceptr,
- ///< Array format
- pub format: CUarray_format,
- ///< Channels per array element
- pub numChannels: ::core::ffi::c_uint,
- ///< Size in bytes
- pub sizeInBytes: usize,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4 {
- ///< Device pointer
- pub devPtr: CUdeviceptr,
- ///< Array format
- pub format: CUarray_format,
- ///< Channels per array element
- pub numChannels: ::core::ffi::c_uint,
- ///< Width of the array in elements
- pub width: usize,
- ///< Height of the array in elements
- pub height: usize,
- ///< Pitch between two rows in bytes
- pub pitchInBytes: usize,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5 {
- pub reserved: [::core::ffi::c_int; 32usize],
-}
-/// CUDA Resource descriptor
-pub type CUDA_RESOURCE_DESC_v1 = CUDA_RESOURCE_DESC_st;
-/// CUDA Resource descriptor
-pub type CUDA_RESOURCE_DESC = CUDA_RESOURCE_DESC_v1;
-/// Texture descriptor
-#[repr(C)]
-#[derive(Debug, Copy, Clone, PartialEq)]
-pub struct CUDA_TEXTURE_DESC_st {
- ///< Address modes
- pub addressMode: [CUaddress_mode; 3usize],
- ///< Filter mode
- pub filterMode: CUfilter_mode,
- ///< Flags
- pub flags: ::core::ffi::c_uint,
- ///< Maximum anisotropy ratio
- pub maxAnisotropy: ::core::ffi::c_uint,
- ///< Mipmap filter mode
- pub mipmapFilterMode: CUfilter_mode,
- ///< Mipmap level bias
- pub mipmapLevelBias: f32,
- ///< Mipmap minimum level clamp
- pub minMipmapLevelClamp: f32,
- ///< Mipmap maximum level clamp
- pub maxMipmapLevelClamp: f32,
- ///< Border Color
- pub borderColor: [f32; 4usize],
- pub reserved: [::core::ffi::c_int; 12usize],
-}
-/// Texture descriptor
-pub type CUDA_TEXTURE_DESC_v1 = CUDA_TEXTURE_DESC_st;
-/// Texture descriptor
-pub type CUDA_TEXTURE_DESC = CUDA_TEXTURE_DESC_v1;
-impl CUresourceViewFormat_enum {
- ///< No resource view format (use underlying resource format)
- pub const CU_RES_VIEW_FORMAT_NONE: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 0,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 1 channel unsigned 8-bit integers
- pub const CU_RES_VIEW_FORMAT_UINT_1X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 1,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 2 channel unsigned 8-bit integers
- pub const CU_RES_VIEW_FORMAT_UINT_2X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 2,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 4 channel unsigned 8-bit integers
- pub const CU_RES_VIEW_FORMAT_UINT_4X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 3,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 1 channel signed 8-bit integers
- pub const CU_RES_VIEW_FORMAT_SINT_1X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 4,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 2 channel signed 8-bit integers
- pub const CU_RES_VIEW_FORMAT_SINT_2X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 5,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 4 channel signed 8-bit integers
- pub const CU_RES_VIEW_FORMAT_SINT_4X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 6,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 1 channel unsigned 16-bit integers
- pub const CU_RES_VIEW_FORMAT_UINT_1X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 7,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 2 channel unsigned 16-bit integers
- pub const CU_RES_VIEW_FORMAT_UINT_2X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 8,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 4 channel unsigned 16-bit integers
- pub const CU_RES_VIEW_FORMAT_UINT_4X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 9,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 1 channel signed 16-bit integers
- pub const CU_RES_VIEW_FORMAT_SINT_1X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 10,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 2 channel signed 16-bit integers
- pub const CU_RES_VIEW_FORMAT_SINT_2X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 11,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 4 channel signed 16-bit integers
- pub const CU_RES_VIEW_FORMAT_SINT_4X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 12,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 1 channel unsigned 32-bit integers
- pub const CU_RES_VIEW_FORMAT_UINT_1X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 13,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 2 channel unsigned 32-bit integers
- pub const CU_RES_VIEW_FORMAT_UINT_2X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 14,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 4 channel unsigned 32-bit integers
- pub const CU_RES_VIEW_FORMAT_UINT_4X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 15,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 1 channel signed 32-bit integers
- pub const CU_RES_VIEW_FORMAT_SINT_1X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 16,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 2 channel signed 32-bit integers
- pub const CU_RES_VIEW_FORMAT_SINT_2X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 17,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 4 channel signed 32-bit integers
- pub const CU_RES_VIEW_FORMAT_SINT_4X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 18,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 1 channel 16-bit floating point
- pub const CU_RES_VIEW_FORMAT_FLOAT_1X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 19,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 2 channel 16-bit floating point
- pub const CU_RES_VIEW_FORMAT_FLOAT_2X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 20,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 4 channel 16-bit floating point
- pub const CU_RES_VIEW_FORMAT_FLOAT_4X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 21,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 1 channel 32-bit floating point
- pub const CU_RES_VIEW_FORMAT_FLOAT_1X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 22,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 2 channel 32-bit floating point
- pub const CU_RES_VIEW_FORMAT_FLOAT_2X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 23,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< 4 channel 32-bit floating point
- pub const CU_RES_VIEW_FORMAT_FLOAT_4X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 24,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< Block compressed 1
- pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC1: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 25,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< Block compressed 2
- pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC2: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 26,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< Block compressed 3
- pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC3: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 27,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< Block compressed 4 unsigned
- pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC4: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 28,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< Block compressed 4 signed
- pub const CU_RES_VIEW_FORMAT_SIGNED_BC4: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 29,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< Block compressed 5 unsigned
- pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC5: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 30,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< Block compressed 5 signed
- pub const CU_RES_VIEW_FORMAT_SIGNED_BC5: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 31,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< Block compressed 6 unsigned half-float
- pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC6H: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 32,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< Block compressed 6 signed half-float
- pub const CU_RES_VIEW_FORMAT_SIGNED_BC6H: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 33,
- );
-}
-impl CUresourceViewFormat_enum {
- ///< Block compressed 7
- pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC7: CUresourceViewFormat_enum = CUresourceViewFormat_enum(
- 34,
- );
-}
-#[repr(transparent)]
-/// Resource view format
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUresourceViewFormat_enum(pub ::core::ffi::c_uint);
-/// Resource view format
-pub use self::CUresourceViewFormat_enum as CUresourceViewFormat;
-/// Resource view descriptor
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_RESOURCE_VIEW_DESC_st {
- ///< Resource view format
- pub format: CUresourceViewFormat,
- ///< Width of the resource view
- pub width: usize,
- ///< Height of the resource view
- pub height: usize,
- ///< Depth of the resource view
- pub depth: usize,
- ///< First defined mipmap level
- pub firstMipmapLevel: ::core::ffi::c_uint,
- ///< Last defined mipmap level
- pub lastMipmapLevel: ::core::ffi::c_uint,
- ///< First layer index
- pub firstLayer: ::core::ffi::c_uint,
- ///< Last layer index
- pub lastLayer: ::core::ffi::c_uint,
- pub reserved: [::core::ffi::c_uint; 16usize],
-}
-/// Resource view descriptor
-pub type CUDA_RESOURCE_VIEW_DESC_v1 = CUDA_RESOURCE_VIEW_DESC_st;
-/// Resource view descriptor
-pub type CUDA_RESOURCE_VIEW_DESC = CUDA_RESOURCE_VIEW_DESC_v1;
-/// Tensor map descriptor. Requires compiler support for aligning to 64 bytes.
-#[repr(C)]
-#[repr(align(64))]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUtensorMap_st {
- pub opaque: [cuuint64_t; 16usize],
-}
-/// Tensor map descriptor. Requires compiler support for aligning to 64 bytes.
-pub type CUtensorMap = CUtensorMap_st;
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_UINT8: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 0,
- );
-}
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_UINT16: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 1,
- );
-}
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_UINT32: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 2,
- );
-}
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_INT32: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 3,
- );
-}
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_UINT64: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 4,
- );
-}
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_INT64: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 5,
- );
-}
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_FLOAT16: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 6,
- );
-}
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_FLOAT32: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 7,
- );
-}
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_FLOAT64: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 8,
- );
-}
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_BFLOAT16: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 9,
- );
-}
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 10,
- );
-}
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_TFLOAT32: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 11,
- );
-}
-impl CUtensorMapDataType_enum {
- pub const CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ: CUtensorMapDataType_enum = CUtensorMapDataType_enum(
- 12,
- );
-}
-#[repr(transparent)]
-/// Tensor map data type
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUtensorMapDataType_enum(pub ::core::ffi::c_uint);
-/// Tensor map data type
-pub use self::CUtensorMapDataType_enum as CUtensorMapDataType;
-impl CUtensorMapInterleave_enum {
- pub const CU_TENSOR_MAP_INTERLEAVE_NONE: CUtensorMapInterleave_enum = CUtensorMapInterleave_enum(
- 0,
- );
-}
-impl CUtensorMapInterleave_enum {
- pub const CU_TENSOR_MAP_INTERLEAVE_16B: CUtensorMapInterleave_enum = CUtensorMapInterleave_enum(
- 1,
- );
-}
-impl CUtensorMapInterleave_enum {
- pub const CU_TENSOR_MAP_INTERLEAVE_32B: CUtensorMapInterleave_enum = CUtensorMapInterleave_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/// Tensor map interleave layout type
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUtensorMapInterleave_enum(pub ::core::ffi::c_uint);
-/// Tensor map interleave layout type
-pub use self::CUtensorMapInterleave_enum as CUtensorMapInterleave;
-impl CUtensorMapSwizzle_enum {
- pub const CU_TENSOR_MAP_SWIZZLE_NONE: CUtensorMapSwizzle_enum = CUtensorMapSwizzle_enum(
- 0,
- );
-}
-impl CUtensorMapSwizzle_enum {
- pub const CU_TENSOR_MAP_SWIZZLE_32B: CUtensorMapSwizzle_enum = CUtensorMapSwizzle_enum(
- 1,
- );
-}
-impl CUtensorMapSwizzle_enum {
- pub const CU_TENSOR_MAP_SWIZZLE_64B: CUtensorMapSwizzle_enum = CUtensorMapSwizzle_enum(
- 2,
- );
-}
-impl CUtensorMapSwizzle_enum {
- pub const CU_TENSOR_MAP_SWIZZLE_128B: CUtensorMapSwizzle_enum = CUtensorMapSwizzle_enum(
- 3,
- );
-}
-#[repr(transparent)]
-/// Tensor map swizzling mode of shared memory banks
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUtensorMapSwizzle_enum(pub ::core::ffi::c_uint);
-/// Tensor map swizzling mode of shared memory banks
-pub use self::CUtensorMapSwizzle_enum as CUtensorMapSwizzle;
-impl CUtensorMapL2promotion_enum {
- pub const CU_TENSOR_MAP_L2_PROMOTION_NONE: CUtensorMapL2promotion_enum = CUtensorMapL2promotion_enum(
- 0,
- );
-}
-impl CUtensorMapL2promotion_enum {
- pub const CU_TENSOR_MAP_L2_PROMOTION_L2_64B: CUtensorMapL2promotion_enum = CUtensorMapL2promotion_enum(
- 1,
- );
-}
-impl CUtensorMapL2promotion_enum {
- pub const CU_TENSOR_MAP_L2_PROMOTION_L2_128B: CUtensorMapL2promotion_enum = CUtensorMapL2promotion_enum(
- 2,
- );
-}
-impl CUtensorMapL2promotion_enum {
- pub const CU_TENSOR_MAP_L2_PROMOTION_L2_256B: CUtensorMapL2promotion_enum = CUtensorMapL2promotion_enum(
- 3,
- );
-}
-#[repr(transparent)]
-/// Tensor map L2 promotion type
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUtensorMapL2promotion_enum(pub ::core::ffi::c_uint);
-/// Tensor map L2 promotion type
-pub use self::CUtensorMapL2promotion_enum as CUtensorMapL2promotion;
-impl CUtensorMapFloatOOBfill_enum {
- pub const CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE: CUtensorMapFloatOOBfill_enum = CUtensorMapFloatOOBfill_enum(
- 0,
- );
-}
-impl CUtensorMapFloatOOBfill_enum {
- pub const CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA: CUtensorMapFloatOOBfill_enum = CUtensorMapFloatOOBfill_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Tensor map out-of-bounds fill type
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUtensorMapFloatOOBfill_enum(pub ::core::ffi::c_uint);
-/// Tensor map out-of-bounds fill type
-pub use self::CUtensorMapFloatOOBfill_enum as CUtensorMapFloatOOBfill;
-/// GPU Direct v3 tokens
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st {
- pub p2pToken: ::core::ffi::c_ulonglong,
- pub vaSpaceToken: ::core::ffi::c_uint,
-}
-/// GPU Direct v3 tokens
-pub type CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1 = CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st;
-/// GPU Direct v3 tokens
-pub type CUDA_POINTER_ATTRIBUTE_P2P_TOKENS = CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1;
-impl CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum {
- ///< No access, meaning the device cannot access this memory at all, thus must be staged through accessible memory in order to complete certain operations
- pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE: CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum = CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum(
- 0,
- );
-}
-impl CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum {
- ///< Read-only access, meaning writes to this memory are considered invalid accesses and thus return error in that case.
- pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ: CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum = CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum(
- 1,
- );
-}
-impl CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum {
- ///< Read-write access, the device has full read-write access to the memory
- pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE: CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum = CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum(
- 3,
- );
-}
-#[repr(transparent)]
-/** Access flags that specify the level of access the current context's device has
- on the memory referenced.*/
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum(pub ::core::ffi::c_uint);
-/** Access flags that specify the level of access the current context's device has
- on the memory referenced.*/
-pub use self::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum as CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS;
-/// Kernel launch parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_LAUNCH_PARAMS_st {
- ///< Kernel to launch
- pub function: CUfunction,
- ///< Width of grid in blocks
- pub gridDimX: ::core::ffi::c_uint,
- ///< Height of grid in blocks
- pub gridDimY: ::core::ffi::c_uint,
- ///< Depth of grid in blocks
- pub gridDimZ: ::core::ffi::c_uint,
- ///< X dimension of each thread block
- pub blockDimX: ::core::ffi::c_uint,
- ///< Y dimension of each thread block
- pub blockDimY: ::core::ffi::c_uint,
- ///< Z dimension of each thread block
- pub blockDimZ: ::core::ffi::c_uint,
- ///< Dynamic shared-memory size per thread block in bytes
- pub sharedMemBytes: ::core::ffi::c_uint,
- ///< Stream identifier
- pub hStream: CUstream,
- ///< Array of pointers to kernel parameters
- pub kernelParams: *mut *mut ::core::ffi::c_void,
-}
-/// Kernel launch parameters
-pub type CUDA_LAUNCH_PARAMS_v1 = CUDA_LAUNCH_PARAMS_st;
-/// Kernel launch parameters
-pub type CUDA_LAUNCH_PARAMS = CUDA_LAUNCH_PARAMS_v1;
-impl CUexternalMemoryHandleType_enum {
- /// Handle is an opaque file descriptor
- pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
- 1,
- );
-}
-impl CUexternalMemoryHandleType_enum {
- /// Handle is an opaque shared NT handle
- pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
- 2,
- );
-}
-impl CUexternalMemoryHandleType_enum {
- /// Handle is an opaque, globally shared handle
- pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
- 3,
- );
-}
-impl CUexternalMemoryHandleType_enum {
- /// Handle is a D3D12 heap object
- pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
- 4,
- );
-}
-impl CUexternalMemoryHandleType_enum {
- /// Handle is a D3D12 committed resource
- pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
- 5,
- );
-}
-impl CUexternalMemoryHandleType_enum {
- /// Handle is a shared NT handle to a D3D11 resource
- pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
- 6,
- );
-}
-impl CUexternalMemoryHandleType_enum {
- /// Handle is a globally shared handle to a D3D11 resource
- pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
- 7,
- );
-}
-impl CUexternalMemoryHandleType_enum {
- /// Handle is an NvSciBuf object
- pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum(
- 8,
- );
-}
-#[repr(transparent)]
-/// External memory handle types
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUexternalMemoryHandleType_enum(pub ::core::ffi::c_uint);
-/// External memory handle types
-pub use self::CUexternalMemoryHandleType_enum as CUexternalMemoryHandleType;
-/// External memory handle descriptor
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st {
- /// Type of the handle
- pub type_: CUexternalMemoryHandleType,
- pub handle: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1,
- /// Size of the memory allocation
- pub size: ::core::ffi::c_ulonglong,
- /// Flags must either be zero or ::CUDA_EXTERNAL_MEMORY_DEDICATED
- pub flags: ::core::ffi::c_uint,
- pub reserved: [::core::ffi::c_uint; 16usize],
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1 {
- /** File descriptor referencing the memory object. Valid
- when type is
- ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD*/
- pub fd: ::core::ffi::c_int,
- pub win32: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1,
- /** A handle representing an NvSciBuf Object. Valid when type
- is ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF*/
- pub nvSciBufObject: *const ::core::ffi::c_void,
-}
-/** Win32 handle referencing the semaphore object. Valid when
- type is one of the following:
- - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32
- - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT
- - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP
- - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE
- - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE
- - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT
- Exactly one of 'handle' and 'name' must be non-NULL. If
- type is one of the following:
- ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT
- ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT
- then 'name' must be NULL.*/
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
- /// Valid NT handle. Must be NULL if 'name' is non-NULL
- pub handle: *mut ::core::ffi::c_void,
- /** Name of a valid memory object.
- Must be NULL if 'handle' is non-NULL.*/
- pub name: *const ::core::ffi::c_void,
-}
-/// External memory handle descriptor
-pub type CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 = CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st;
-/// External memory handle descriptor
-pub type CUDA_EXTERNAL_MEMORY_HANDLE_DESC = CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1;
-/// External memory buffer descriptor
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st {
- /// Offset into the memory object where the buffer's base is
- pub offset: ::core::ffi::c_ulonglong,
- /// Size of the buffer
- pub size: ::core::ffi::c_ulonglong,
- /// Flags reserved for future use. Must be zero.
- pub flags: ::core::ffi::c_uint,
- pub reserved: [::core::ffi::c_uint; 16usize],
-}
-/// External memory buffer descriptor
-pub type CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 = CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st;
-/// External memory buffer descriptor
-pub type CUDA_EXTERNAL_MEMORY_BUFFER_DESC = CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1;
-/// External memory mipmap descriptor
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st {
- /** Offset into the memory object where the base level of the
- mipmap chain is.*/
- pub offset: ::core::ffi::c_ulonglong,
- /// Format, dimension and type of base level of the mipmap chain
- pub arrayDesc: CUDA_ARRAY3D_DESCRIPTOR,
- /// Total number of levels in the mipmap chain
- pub numLevels: ::core::ffi::c_uint,
- pub reserved: [::core::ffi::c_uint; 16usize],
-}
-/// External memory mipmap descriptor
-pub type CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 = CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st;
-/// External memory mipmap descriptor
-pub type CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC = CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1;
-impl CUexternalSemaphoreHandleType_enum {
- /// Handle is an opaque file descriptor
- pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
- 1,
- );
-}
-impl CUexternalSemaphoreHandleType_enum {
- /// Handle is an opaque shared NT handle
- pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
- 2,
- );
-}
-impl CUexternalSemaphoreHandleType_enum {
- /// Handle is an opaque, globally shared handle
- pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
- 3,
- );
-}
-impl CUexternalSemaphoreHandleType_enum {
- /// Handle is a shared NT handle referencing a D3D12 fence object
- pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
- 4,
- );
-}
-impl CUexternalSemaphoreHandleType_enum {
- /// Handle is a shared NT handle referencing a D3D11 fence object
- pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
- 5,
- );
-}
-impl CUexternalSemaphoreHandleType_enum {
- /// Opaque handle to NvSciSync Object
- pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
- 6,
- );
-}
-impl CUexternalSemaphoreHandleType_enum {
- /// Handle is a shared NT handle referencing a D3D11 keyed mutex object
- pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
- 7,
- );
-}
-impl CUexternalSemaphoreHandleType_enum {
- /// Handle is a globally shared handle referencing a D3D11 keyed mutex object
- pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
- 8,
- );
-}
-impl CUexternalSemaphoreHandleType_enum {
- /// Handle is an opaque file descriptor referencing a timeline semaphore
- pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
- 9,
- );
-}
-impl CUexternalSemaphoreHandleType_enum {
- /// Handle is an opaque shared NT handle referencing a timeline semaphore
- pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(
- 10,
- );
-}
-#[repr(transparent)]
-/// External semaphore handle types
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUexternalSemaphoreHandleType_enum(pub ::core::ffi::c_uint);
-/// External semaphore handle types
-pub use self::CUexternalSemaphoreHandleType_enum as CUexternalSemaphoreHandleType;
-/// External semaphore handle descriptor
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st {
- /// Type of the handle
- pub type_: CUexternalSemaphoreHandleType,
- pub handle: CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1,
- /// Flags reserved for the future. Must be zero.
- pub flags: ::core::ffi::c_uint,
- pub reserved: [::core::ffi::c_uint; 16usize],
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1 {
- /** File descriptor referencing the semaphore object. Valid
- when type is one of the following:
- - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD
- - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD*/
- pub fd: ::core::ffi::c_int,
- pub win32: CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1,
- /// Valid NvSciSyncObj. Must be non NULL
- pub nvSciSyncObj: *const ::core::ffi::c_void,
-}
-/** Win32 handle referencing the semaphore object. Valid when
- type is one of the following:
- - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32
- - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT
- - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE
- - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE
- - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX
- - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32
- Exactly one of 'handle' and 'name' must be non-NULL. If
- type is one of the following:
- - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT
- - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT
- then 'name' must be NULL.*/
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
- /// Valid NT handle. Must be NULL if 'name' is non-NULL
- pub handle: *mut ::core::ffi::c_void,
- /** Name of a valid synchronization primitive.
- Must be NULL if 'handle' is non-NULL.*/
- pub name: *const ::core::ffi::c_void,
-}
-/// External semaphore handle descriptor
-pub type CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 = CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st;
-/// External semaphore handle descriptor
-pub type CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC = CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1;
-/// External semaphore signal parameters
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st {
- pub params: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1,
- /** Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to
- signal a ::CUexternalSemaphore of type
- ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is
- ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which indicates
- that while signaling the ::CUexternalSemaphore, no memory synchronization
- operations should be performed for any external memory object imported
- as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF.
- For all other types of ::CUexternalSemaphore, flags must be zero.*/
- pub flags: ::core::ffi::c_uint,
- pub reserved: [::core::ffi::c_uint; 16usize],
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1 {
- pub fence: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1,
- pub nvSciSync: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_2,
- pub keyedMutex: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3,
- pub reserved: [::core::ffi::c_uint; 12usize],
-}
-/// Parameters for fence objects
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1 {
- /// Value of fence to be signaled
- pub value: ::core::ffi::c_ulonglong,
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_2 {
- /** Pointer to NvSciSyncFence. Valid if ::CUexternalSemaphoreHandleType
- is of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC.*/
- pub fence: *mut ::core::ffi::c_void,
- pub reserved: ::core::ffi::c_ulonglong,
-}
-/// Parameters for keyed mutex objects
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3 {
- /// Value of key to release the mutex with
- pub key: ::core::ffi::c_ulonglong,
-}
-/// External semaphore signal parameters
-pub type CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 = CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st;
-/// External semaphore signal parameters
-pub type CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS = CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1;
-/// External semaphore wait parameters
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st {
- pub params: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1,
- /** Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on
- a ::CUexternalSemaphore of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC,
- the valid flag is ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC
- which indicates that while waiting for the ::CUexternalSemaphore, no memory
- synchronization operations should be performed for any external memory
- object imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF.
- For all other types of ::CUexternalSemaphore, flags must be zero.*/
- pub flags: ::core::ffi::c_uint,
- pub reserved: [::core::ffi::c_uint; 16usize],
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1 {
- pub fence: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1,
- pub nvSciSync: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_2,
- pub keyedMutex: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3,
- pub reserved: [::core::ffi::c_uint; 10usize],
-}
-/// Parameters for fence objects
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1 {
- /// Value of fence to be waited on
- pub value: ::core::ffi::c_ulonglong,
-}
-/** Pointer to NvSciSyncFence. Valid if CUexternalSemaphoreHandleType
- is of type CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC.*/
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_2 {
- pub fence: *mut ::core::ffi::c_void,
- pub reserved: ::core::ffi::c_ulonglong,
-}
-/// Parameters for keyed mutex objects
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3 {
- /// Value of key to acquire the mutex with
- pub key: ::core::ffi::c_ulonglong,
- /// Timeout in milliseconds to wait to acquire the mutex
- pub timeoutMs: ::core::ffi::c_uint,
-}
-/// External semaphore wait parameters
-pub type CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 = CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st;
-/// External semaphore wait parameters
-pub type CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS = CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1;
-/// Semaphore signal node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st {
- ///< Array of external semaphore handles.
- pub extSemArray: *mut CUexternalSemaphore,
- ///< Array of external semaphore signal parameters.
- pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
- ///< Number of handles and parameters supplied in extSemArray and paramsArray.
- pub numExtSems: ::core::ffi::c_uint,
-}
-/// Semaphore signal node parameters
-pub type CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 = CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st;
-/// Semaphore signal node parameters
-pub type CUDA_EXT_SEM_SIGNAL_NODE_PARAMS = CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1;
-/// Semaphore signal node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st {
- ///< Array of external semaphore handles.
- pub extSemArray: *mut CUexternalSemaphore,
- ///< Array of external semaphore signal parameters.
- pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
- ///< Number of handles and parameters supplied in extSemArray and paramsArray.
- pub numExtSems: ::core::ffi::c_uint,
-}
-/// Semaphore signal node parameters
-pub type CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2 = CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st;
-/// Semaphore wait node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_st {
- ///< Array of external semaphore handles.
- pub extSemArray: *mut CUexternalSemaphore,
- ///< Array of external semaphore wait parameters.
- pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
- ///< Number of handles and parameters supplied in extSemArray and paramsArray.
- pub numExtSems: ::core::ffi::c_uint,
-}
-/// Semaphore wait node parameters
-pub type CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 = CUDA_EXT_SEM_WAIT_NODE_PARAMS_st;
-/// Semaphore wait node parameters
-pub type CUDA_EXT_SEM_WAIT_NODE_PARAMS = CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1;
-/// Semaphore wait node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st {
- ///< Array of external semaphore handles.
- pub extSemArray: *mut CUexternalSemaphore,
- ///< Array of external semaphore wait parameters.
- pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
- ///< Number of handles and parameters supplied in extSemArray and paramsArray.
- pub numExtSems: ::core::ffi::c_uint,
-}
-/// Semaphore wait node parameters
-pub type CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2 = CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st;
-pub type CUmemGenericAllocationHandle_v1 = ::core::ffi::c_ulonglong;
-pub type CUmemGenericAllocationHandle = CUmemGenericAllocationHandle_v1;
-impl CUmemAllocationHandleType_enum {
- ///< Does not allow any export mechanism. >
- pub const CU_MEM_HANDLE_TYPE_NONE: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum(
- 0,
- );
-}
-impl CUmemAllocationHandleType_enum {
- ///< Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int)
- pub const CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum(
- 1,
- );
-}
-impl CUmemAllocationHandleType_enum {
- ///< Allows a Win32 NT handle to be used for exporting. (HANDLE)
- pub const CU_MEM_HANDLE_TYPE_WIN32: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum(
- 2,
- );
-}
-impl CUmemAllocationHandleType_enum {
- ///< Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE)
- pub const CU_MEM_HANDLE_TYPE_WIN32_KMT: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum(
- 4,
- );
-}
-impl CUmemAllocationHandleType_enum {
- ///< Allows a fabric handle to be used for exporting. (CUmemFabricHandle)
- pub const CU_MEM_HANDLE_TYPE_FABRIC: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum(
- 8,
- );
-}
-impl CUmemAllocationHandleType_enum {
- pub const CU_MEM_HANDLE_TYPE_MAX: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum(
- 2147483647,
- );
-}
-#[repr(transparent)]
-/// Flags for specifying particular handle types
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemAllocationHandleType_enum(pub ::core::ffi::c_uint);
-/// Flags for specifying particular handle types
-pub use self::CUmemAllocationHandleType_enum as CUmemAllocationHandleType;
-impl CUmemAccess_flags_enum {
- ///< Default, make the address range not accessible
- pub const CU_MEM_ACCESS_FLAGS_PROT_NONE: CUmemAccess_flags_enum = CUmemAccess_flags_enum(
- 0,
- );
-}
-impl CUmemAccess_flags_enum {
- ///< Make the address range read accessible
- pub const CU_MEM_ACCESS_FLAGS_PROT_READ: CUmemAccess_flags_enum = CUmemAccess_flags_enum(
- 1,
- );
-}
-impl CUmemAccess_flags_enum {
- ///< Make the address range read-write accessible
- pub const CU_MEM_ACCESS_FLAGS_PROT_READWRITE: CUmemAccess_flags_enum = CUmemAccess_flags_enum(
- 3,
- );
-}
-impl CUmemAccess_flags_enum {
- pub const CU_MEM_ACCESS_FLAGS_PROT_MAX: CUmemAccess_flags_enum = CUmemAccess_flags_enum(
- 2147483647,
- );
-}
-#[repr(transparent)]
-/// Specifies the memory protection flags for mapping.
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemAccess_flags_enum(pub ::core::ffi::c_uint);
-/// Specifies the memory protection flags for mapping.
-pub use self::CUmemAccess_flags_enum as CUmemAccess_flags;
-impl CUmemLocationType_enum {
- pub const CU_MEM_LOCATION_TYPE_INVALID: CUmemLocationType_enum = CUmemLocationType_enum(
- 0,
- );
-}
-impl CUmemLocationType_enum {
- ///< Location is a device location, thus id is a device ordinal
- pub const CU_MEM_LOCATION_TYPE_DEVICE: CUmemLocationType_enum = CUmemLocationType_enum(
- 1,
- );
-}
-impl CUmemLocationType_enum {
- ///< Location is host, id is ignored
- pub const CU_MEM_LOCATION_TYPE_HOST: CUmemLocationType_enum = CUmemLocationType_enum(
- 2,
- );
-}
-impl CUmemLocationType_enum {
- ///< Location is a host NUMA node, thus id is a host NUMA node id
- pub const CU_MEM_LOCATION_TYPE_HOST_NUMA: CUmemLocationType_enum = CUmemLocationType_enum(
- 3,
- );
-}
-impl CUmemLocationType_enum {
- ///< Location is a host NUMA node of the current thread, id is ignored
- pub const CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT: CUmemLocationType_enum = CUmemLocationType_enum(
- 4,
- );
-}
-impl CUmemLocationType_enum {
- pub const CU_MEM_LOCATION_TYPE_MAX: CUmemLocationType_enum = CUmemLocationType_enum(
- 2147483647,
- );
-}
-#[repr(transparent)]
-/// Specifies the type of location
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemLocationType_enum(pub ::core::ffi::c_uint);
-/// Specifies the type of location
-pub use self::CUmemLocationType_enum as CUmemLocationType;
-impl CUmemAllocationType_enum {
- pub const CU_MEM_ALLOCATION_TYPE_INVALID: CUmemAllocationType_enum = CUmemAllocationType_enum(
- 0,
- );
-}
-impl CUmemAllocationType_enum {
- /** This allocation type is 'pinned', i.e. cannot migrate from its current
- location while the application is actively using it*/
- pub const CU_MEM_ALLOCATION_TYPE_PINNED: CUmemAllocationType_enum = CUmemAllocationType_enum(
- 1,
- );
-}
-impl CUmemAllocationType_enum {
- /** This allocation type is 'pinned', i.e. cannot migrate from its current
- location while the application is actively using it*/
- pub const CU_MEM_ALLOCATION_TYPE_MAX: CUmemAllocationType_enum = CUmemAllocationType_enum(
- 2147483647,
- );
-}
-#[repr(transparent)]
-/// Defines the allocation types available
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemAllocationType_enum(pub ::core::ffi::c_uint);
-/// Defines the allocation types available
-pub use self::CUmemAllocationType_enum as CUmemAllocationType;
-impl CUmemAllocationGranularity_flags_enum {
- ///< Minimum required granularity for allocation
- pub const CU_MEM_ALLOC_GRANULARITY_MINIMUM: CUmemAllocationGranularity_flags_enum = CUmemAllocationGranularity_flags_enum(
- 0,
- );
-}
-impl CUmemAllocationGranularity_flags_enum {
- ///< Recommended granularity for allocation for best performance
- pub const CU_MEM_ALLOC_GRANULARITY_RECOMMENDED: CUmemAllocationGranularity_flags_enum = CUmemAllocationGranularity_flags_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Flag for requesting different optimal and required granularities for an allocation.
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemAllocationGranularity_flags_enum(pub ::core::ffi::c_uint);
-/// Flag for requesting different optimal and required granularities for an allocation.
-pub use self::CUmemAllocationGranularity_flags_enum as CUmemAllocationGranularity_flags;
-impl CUmemRangeHandleType_enum {
- pub const CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD: CUmemRangeHandleType_enum = CUmemRangeHandleType_enum(
- 1,
- );
-}
-impl CUmemRangeHandleType_enum {
- pub const CU_MEM_RANGE_HANDLE_TYPE_MAX: CUmemRangeHandleType_enum = CUmemRangeHandleType_enum(
- 2147483647,
- );
-}
-#[repr(transparent)]
-/// Specifies the handle type for address range
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemRangeHandleType_enum(pub ::core::ffi::c_uint);
-/// Specifies the handle type for address range
-pub use self::CUmemRangeHandleType_enum as CUmemRangeHandleType;
-impl CUarraySparseSubresourceType_enum {
- pub const CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL: CUarraySparseSubresourceType_enum = CUarraySparseSubresourceType_enum(
- 0,
- );
-}
-impl CUarraySparseSubresourceType_enum {
- pub const CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL: CUarraySparseSubresourceType_enum = CUarraySparseSubresourceType_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Sparse subresource types
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUarraySparseSubresourceType_enum(pub ::core::ffi::c_uint);
-/// Sparse subresource types
-pub use self::CUarraySparseSubresourceType_enum as CUarraySparseSubresourceType;
-impl CUmemOperationType_enum {
- pub const CU_MEM_OPERATION_TYPE_MAP: CUmemOperationType_enum = CUmemOperationType_enum(
- 1,
- );
-}
-impl CUmemOperationType_enum {
- pub const CU_MEM_OPERATION_TYPE_UNMAP: CUmemOperationType_enum = CUmemOperationType_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/// Memory operation types
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemOperationType_enum(pub ::core::ffi::c_uint);
-/// Memory operation types
-pub use self::CUmemOperationType_enum as CUmemOperationType;
-impl CUmemHandleType_enum {
- pub const CU_MEM_HANDLE_TYPE_GENERIC: CUmemHandleType_enum = CUmemHandleType_enum(0);
-}
-#[repr(transparent)]
-/// Memory handle types
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemHandleType_enum(pub ::core::ffi::c_uint);
-/// Memory handle types
-pub use self::CUmemHandleType_enum as CUmemHandleType;
-/// Specifies the CUDA array or CUDA mipmapped array memory mapping information
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUarrayMapInfo_st {
- ///< Resource type
- pub resourceType: CUresourcetype,
- pub resource: CUarrayMapInfo_st__bindgen_ty_1,
- ///< Sparse subresource type
- pub subresourceType: CUarraySparseSubresourceType,
- pub subresource: CUarrayMapInfo_st__bindgen_ty_2,
- ///< Memory operation type
- pub memOperationType: CUmemOperationType,
- ///< Memory handle type
- pub memHandleType: CUmemHandleType,
- pub memHandle: CUarrayMapInfo_st__bindgen_ty_3,
- ///< Offset within the memory
- pub offset: ::core::ffi::c_ulonglong,
- ///< Device ordinal bit mask
- pub deviceBitMask: ::core::ffi::c_uint,
- ///< flags for future use, must be zero now.
- pub flags: ::core::ffi::c_uint,
- ///< Reserved for future use, must be zero now.
- pub reserved: [::core::ffi::c_uint; 2usize],
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUarrayMapInfo_st__bindgen_ty_1 {
- pub mipmap: CUmipmappedArray,
- pub array: CUarray,
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUarrayMapInfo_st__bindgen_ty_2 {
- pub sparseLevel: CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_1,
- pub miptail: CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_2,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_1 {
- ///< For CUDA mipmapped arrays must a valid mipmap level. For CUDA arrays must be zero
- pub level: ::core::ffi::c_uint,
- ///< For CUDA layered arrays must be a valid layer index. Otherwise, must be zero
- pub layer: ::core::ffi::c_uint,
- ///< Starting X offset in elements
- pub offsetX: ::core::ffi::c_uint,
- ///< Starting Y offset in elements
- pub offsetY: ::core::ffi::c_uint,
- ///< Starting Z offset in elements
- pub offsetZ: ::core::ffi::c_uint,
- ///< Width in elements
- pub extentWidth: ::core::ffi::c_uint,
- ///< Height in elements
- pub extentHeight: ::core::ffi::c_uint,
- ///< Depth in elements
- pub extentDepth: ::core::ffi::c_uint,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_2 {
- ///< For CUDA layered arrays must be a valid layer index. Otherwise, must be zero
- pub layer: ::core::ffi::c_uint,
- ///< Offset within mip tail
- pub offset: ::core::ffi::c_ulonglong,
- ///< Extent in bytes
- pub size: ::core::ffi::c_ulonglong,
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUarrayMapInfo_st__bindgen_ty_3 {
- pub memHandle: CUmemGenericAllocationHandle,
-}
-/// Specifies the CUDA array or CUDA mipmapped array memory mapping information
-pub type CUarrayMapInfo_v1 = CUarrayMapInfo_st;
-/// Specifies the CUDA array or CUDA mipmapped array memory mapping information
-pub type CUarrayMapInfo = CUarrayMapInfo_v1;
-/// Specifies a memory location.
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemLocation_st {
- ///< Specifies the location type, which modifies the meaning of id.
- pub type_: CUmemLocationType,
- ///< identifier for a given this location's ::CUmemLocationType.
- pub id: ::core::ffi::c_int,
-}
-/// Specifies a memory location.
-pub type CUmemLocation_v1 = CUmemLocation_st;
-/// Specifies a memory location.
-pub type CUmemLocation = CUmemLocation_v1;
-impl CUmemAllocationCompType_enum {
- ///< Allocating non-compressible memory
- pub const CU_MEM_ALLOCATION_COMP_NONE: CUmemAllocationCompType_enum = CUmemAllocationCompType_enum(
- 0,
- );
-}
-impl CUmemAllocationCompType_enum {
- ///< Allocating compressible memory
- pub const CU_MEM_ALLOCATION_COMP_GENERIC: CUmemAllocationCompType_enum = CUmemAllocationCompType_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Specifies compression attribute for an allocation.
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemAllocationCompType_enum(pub ::core::ffi::c_uint);
-/// Specifies compression attribute for an allocation.
-pub use self::CUmemAllocationCompType_enum as CUmemAllocationCompType;
-/// Specifies the allocation properties for a allocation.
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemAllocationProp_st {
- /// Allocation type
- pub type_: CUmemAllocationType,
- /// requested ::CUmemAllocationHandleType
- pub requestedHandleTypes: CUmemAllocationHandleType,
- /// Location of allocation
- pub location: CUmemLocation,
- /** Windows-specific POBJECT_ATTRIBUTES required when
- ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This object attributes structure
- includes security attributes that define
- the scope of which exported allocations may be transferred to other
- processes. In all other cases, this field is required to be zero.*/
- pub win32HandleMetaData: *mut ::core::ffi::c_void,
- pub allocFlags: CUmemAllocationProp_st__bindgen_ty_1,
-}
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemAllocationProp_st__bindgen_ty_1 {
- /** Allocation hint for requesting compressible memory.
- On devices that support Compute Data Compression, compressible
- memory can be used to accelerate accesses to data with unstructured
- sparsity and other compressible data patterns. Applications are
- expected to query allocation property of the handle obtained with
- ::cuMemCreate using ::cuMemGetAllocationPropertiesFromHandle to
- validate if the obtained allocation is compressible or not. Note that
- compressed memory may not be mappable on all devices.*/
- pub compressionType: ::core::ffi::c_uchar,
- pub gpuDirectRDMACapable: ::core::ffi::c_uchar,
- /// Bitmask indicating intended usage for this allocation
- pub usage: ::core::ffi::c_ushort,
- pub reserved: [::core::ffi::c_uchar; 4usize],
-}
-/// Specifies the allocation properties for a allocation.
-pub type CUmemAllocationProp_v1 = CUmemAllocationProp_st;
-/// Specifies the allocation properties for a allocation.
-pub type CUmemAllocationProp = CUmemAllocationProp_v1;
-impl CUmulticastGranularity_flags_enum {
- ///< Minimum required granularity
- pub const CU_MULTICAST_GRANULARITY_MINIMUM: CUmulticastGranularity_flags_enum = CUmulticastGranularity_flags_enum(
- 0,
- );
-}
-impl CUmulticastGranularity_flags_enum {
- ///< Recommended granularity for best performance
- pub const CU_MULTICAST_GRANULARITY_RECOMMENDED: CUmulticastGranularity_flags_enum = CUmulticastGranularity_flags_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Flags for querying different granularities for a multicast object
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmulticastGranularity_flags_enum(pub ::core::ffi::c_uint);
-/// Flags for querying different granularities for a multicast object
-pub use self::CUmulticastGranularity_flags_enum as CUmulticastGranularity_flags;
-/// Specifies the properties for a multicast object.
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmulticastObjectProp_st {
- /** The number of devices in the multicast team that will bind memory to this
- object*/
- pub numDevices: ::core::ffi::c_uint,
- /** The maximum amount of memory that can be bound to this multicast object
- per device*/
- pub size: usize,
- /** Bitmask of exportable handle types (see ::CUmemAllocationHandleType) for
- this object*/
- pub handleTypes: ::core::ffi::c_ulonglong,
- /// Flags for future use, must be zero now
- pub flags: ::core::ffi::c_ulonglong,
-}
-/// Specifies the properties for a multicast object.
-pub type CUmulticastObjectProp_v1 = CUmulticastObjectProp_st;
-/// Specifies the properties for a multicast object.
-pub type CUmulticastObjectProp = CUmulticastObjectProp_v1;
-/// Memory access descriptor
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemAccessDesc_st {
- ///< Location on which the request is to change it's accessibility
- pub location: CUmemLocation,
- ///< ::CUmemProt accessibility flags to set on the request
- pub flags: CUmemAccess_flags,
-}
-/// Memory access descriptor
-pub type CUmemAccessDesc_v1 = CUmemAccessDesc_st;
-/// Memory access descriptor
-pub type CUmemAccessDesc = CUmemAccessDesc_v1;
-impl CUgraphExecUpdateResult_enum {
- ///< The update succeeded
- pub const CU_GRAPH_EXEC_UPDATE_SUCCESS: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
- 0,
- );
-}
-impl CUgraphExecUpdateResult_enum {
- ///< The update failed for an unexpected reason which is described in the return value of the function
- pub const CU_GRAPH_EXEC_UPDATE_ERROR: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
- 1,
- );
-}
-impl CUgraphExecUpdateResult_enum {
- ///< The update failed because the topology changed
- pub const CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
- 2,
- );
-}
-impl CUgraphExecUpdateResult_enum {
- ///< The update failed because a node type changed
- pub const CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
- 3,
- );
-}
-impl CUgraphExecUpdateResult_enum {
- ///< The update failed because the function of a kernel node changed (CUDA driver < 11.2)
- pub const CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
- 4,
- );
-}
-impl CUgraphExecUpdateResult_enum {
- ///< The update failed because the parameters changed in a way that is not supported
- pub const CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
- 5,
- );
-}
-impl CUgraphExecUpdateResult_enum {
- ///< The update failed because something about the node is not supported
- pub const CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
- 6,
- );
-}
-impl CUgraphExecUpdateResult_enum {
- ///< The update failed because the function of a kernel node changed in an unsupported way
- pub const CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
- 7,
- );
-}
-impl CUgraphExecUpdateResult_enum {
- ///< The update failed because the node attributes changed in a way that is not supported
- pub const CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum(
- 8,
- );
-}
-#[repr(transparent)]
-/// CUDA Graph Update error types
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgraphExecUpdateResult_enum(pub ::core::ffi::c_uint);
-/// CUDA Graph Update error types
-pub use self::CUgraphExecUpdateResult_enum as CUgraphExecUpdateResult;
-/// Result information returned by cuGraphExecUpdate
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgraphExecUpdateResultInfo_st {
- /// Gives more specific detail when a cuda graph update fails.
- pub result: CUgraphExecUpdateResult,
- /** The "to node" of the error edge when the topologies do not match.
- The error node when the error is associated with a specific node.
- NULL when the error is generic.*/
- pub errorNode: CUgraphNode,
- /// The from node of error edge when the topologies do not match. Otherwise NULL.
- pub errorFromNode: CUgraphNode,
-}
-/// Result information returned by cuGraphExecUpdate
-pub type CUgraphExecUpdateResultInfo_v1 = CUgraphExecUpdateResultInfo_st;
-/// Result information returned by cuGraphExecUpdate
-pub type CUgraphExecUpdateResultInfo = CUgraphExecUpdateResultInfo_v1;
-impl CUmemPool_attribute_enum {
- /** (value type = int)
- Allow cuMemAllocAsync to use memory asynchronously freed
- in another streams as long as a stream ordering dependency
- of the allocating stream on the free action exists.
- Cuda events and null stream interactions can create the required
- stream ordered dependencies. (default enabled)*/
- pub const CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
- 1,
- );
-}
-impl CUmemPool_attribute_enum {
- /** (value type = int)
- Allow reuse of already completed frees when there is no dependency
- between the free and allocation. (default enabled)*/
- pub const CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
- 2,
- );
-}
-impl CUmemPool_attribute_enum {
- /** (value type = int)
- Allow cuMemAllocAsync to insert new stream dependencies
- in order to establish the stream ordering required to reuse
- a piece of memory released by cuFreeAsync (default enabled).*/
- pub const CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
- 3,
- );
-}
-impl CUmemPool_attribute_enum {
- /** (value type = cuuint64_t)
- Amount of reserved memory in bytes to hold onto before trying
- to release memory back to the OS. When more than the release
- threshold bytes of memory are held by the memory pool, the
- allocator will try to release memory back to the OS on the
- next call to stream, event or context synchronize. (default 0)*/
- pub const CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
- 4,
- );
-}
-impl CUmemPool_attribute_enum {
- /** (value type = cuuint64_t)
- Amount of backing memory currently allocated for the mempool.*/
- pub const CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
- 5,
- );
-}
-impl CUmemPool_attribute_enum {
- /** (value type = cuuint64_t)
- High watermark of backing memory allocated for the mempool since the
- last time it was reset. High watermark can only be reset to zero.*/
- pub const CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
- 6,
- );
-}
-impl CUmemPool_attribute_enum {
- /** (value type = cuuint64_t)
- Amount of memory from the pool that is currently in use by the application.*/
- pub const CU_MEMPOOL_ATTR_USED_MEM_CURRENT: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
- 7,
- );
-}
-impl CUmemPool_attribute_enum {
- /** (value type = cuuint64_t)
- High watermark of the amount of memory from the pool that was in use by the application since
- the last time it was reset. High watermark can only be reset to zero.*/
- pub const CU_MEMPOOL_ATTR_USED_MEM_HIGH: CUmemPool_attribute_enum = CUmemPool_attribute_enum(
- 8,
- );
-}
-#[repr(transparent)]
-/// CUDA memory pool attributes
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemPool_attribute_enum(pub ::core::ffi::c_uint);
-/// CUDA memory pool attributes
-pub use self::CUmemPool_attribute_enum as CUmemPool_attribute;
-/// Specifies the properties of allocations made from the pool.
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemPoolProps_st {
- ///< Allocation type. Currently must be specified as CU_MEM_ALLOCATION_TYPE_PINNED
- pub allocType: CUmemAllocationType,
- ///< Handle types that will be supported by allocations from the pool.
- pub handleTypes: CUmemAllocationHandleType,
- ///< Location where allocations should reside.
- pub location: CUmemLocation,
- /** Windows-specific LPSECURITYATTRIBUTES required when
- ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute defines
- the scope of which exported allocations may be transferred to other
- processes. In all other cases, this field is required to be zero.*/
- pub win32SecurityAttributes: *mut ::core::ffi::c_void,
- ///< Maximum pool size. When set to 0, defaults to a system dependent value.
- pub maxSize: usize,
- ///< reserved for future use, must be 0
- pub reserved: [::core::ffi::c_uchar; 56usize],
-}
-/// Specifies the properties of allocations made from the pool.
-pub type CUmemPoolProps_v1 = CUmemPoolProps_st;
-/// Specifies the properties of allocations made from the pool.
-pub type CUmemPoolProps = CUmemPoolProps_v1;
-/// Opaque data for exporting a pool allocation
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmemPoolPtrExportData_st {
- pub reserved: [::core::ffi::c_uchar; 64usize],
-}
-/// Opaque data for exporting a pool allocation
-pub type CUmemPoolPtrExportData_v1 = CUmemPoolPtrExportData_st;
-/// Opaque data for exporting a pool allocation
-pub type CUmemPoolPtrExportData = CUmemPoolPtrExportData_v1;
-/// Memory allocation node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_MEM_ALLOC_NODE_PARAMS_v1_st {
- /** in: location where the allocation should reside (specified in ::location).
- ::handleTypes must be ::CU_MEM_HANDLE_TYPE_NONE. IPC is not supported.*/
- pub poolProps: CUmemPoolProps,
- ///< in: array of memory access descriptors. Used to describe peer GPU access
- pub accessDescs: *const CUmemAccessDesc,
- ///< in: number of memory access descriptors. Must not exceed the number of GPUs.
- pub accessDescCount: usize,
- ///< in: size in bytes of the requested allocation
- pub bytesize: usize,
- ///< out: address of the allocation returned by CUDA
- pub dptr: CUdeviceptr,
-}
-/// Memory allocation node parameters
-pub type CUDA_MEM_ALLOC_NODE_PARAMS_v1 = CUDA_MEM_ALLOC_NODE_PARAMS_v1_st;
-/// Memory allocation node parameters
-pub type CUDA_MEM_ALLOC_NODE_PARAMS = CUDA_MEM_ALLOC_NODE_PARAMS_v1;
-/// Memory allocation node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_MEM_ALLOC_NODE_PARAMS_v2_st {
- /** in: location where the allocation should reside (specified in ::location).
- ::handleTypes must be ::CU_MEM_HANDLE_TYPE_NONE. IPC is not supported.*/
- pub poolProps: CUmemPoolProps,
- ///< in: array of memory access descriptors. Used to describe peer GPU access
- pub accessDescs: *const CUmemAccessDesc,
- ///< in: number of memory access descriptors. Must not exceed the number of GPUs.
- pub accessDescCount: usize,
- ///< in: size in bytes of the requested allocation
- pub bytesize: usize,
- ///< out: address of the allocation returned by CUDA
- pub dptr: CUdeviceptr,
-}
-/// Memory allocation node parameters
-pub type CUDA_MEM_ALLOC_NODE_PARAMS_v2 = CUDA_MEM_ALLOC_NODE_PARAMS_v2_st;
-/// Memory free node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_MEM_FREE_NODE_PARAMS_st {
- ///< in: the pointer to free
- pub dptr: CUdeviceptr,
-}
-/// Memory free node parameters
-pub type CUDA_MEM_FREE_NODE_PARAMS = CUDA_MEM_FREE_NODE_PARAMS_st;
-impl CUgraphMem_attribute_enum {
- /** (value type = cuuint64_t)
- Amount of memory, in bytes, currently associated with graphs*/
- pub const CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT: CUgraphMem_attribute_enum = CUgraphMem_attribute_enum(
- 0,
- );
-}
-impl CUgraphMem_attribute_enum {
- /** (value type = cuuint64_t)
- High watermark of memory, in bytes, associated with graphs since the
- last time it was reset. High watermark can only be reset to zero.*/
- pub const CU_GRAPH_MEM_ATTR_USED_MEM_HIGH: CUgraphMem_attribute_enum = CUgraphMem_attribute_enum(
- 1,
- );
-}
-impl CUgraphMem_attribute_enum {
- /** (value type = cuuint64_t)
- Amount of memory, in bytes, currently allocated for use by
- the CUDA graphs asynchronous allocator.*/
- pub const CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT: CUgraphMem_attribute_enum = CUgraphMem_attribute_enum(
- 2,
- );
-}
-impl CUgraphMem_attribute_enum {
- /** (value type = cuuint64_t)
- High watermark of memory, in bytes, currently allocated for use by
- the CUDA graphs asynchronous allocator.*/
- pub const CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH: CUgraphMem_attribute_enum = CUgraphMem_attribute_enum(
- 3,
- );
-}
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgraphMem_attribute_enum(pub ::core::ffi::c_uint);
-pub use self::CUgraphMem_attribute_enum as CUgraphMem_attribute;
-/// Child graph node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_CHILD_GRAPH_NODE_PARAMS_st {
- /**< The child graph to clone into the node for node creation, or
-a handle to the graph owned by the node for node query*/
- pub graph: CUgraph,
-}
-/// Child graph node parameters
-pub type CUDA_CHILD_GRAPH_NODE_PARAMS = CUDA_CHILD_GRAPH_NODE_PARAMS_st;
-/// Event record node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EVENT_RECORD_NODE_PARAMS_st {
- ///< The event to record when the node executes
- pub event: CUevent,
-}
-/// Event record node parameters
-pub type CUDA_EVENT_RECORD_NODE_PARAMS = CUDA_EVENT_RECORD_NODE_PARAMS_st;
-/// Event wait node parameters
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_EVENT_WAIT_NODE_PARAMS_st {
- ///< The event to wait on from the node
- pub event: CUevent,
-}
-/// Event wait node parameters
-pub type CUDA_EVENT_WAIT_NODE_PARAMS = CUDA_EVENT_WAIT_NODE_PARAMS_st;
-/// Graph node parameters. See ::cuGraphAddNode.
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUgraphNodeParams_st {
- ///< Type of the node
- pub type_: CUgraphNodeType,
- ///< Reserved. Must be zero.
- pub reserved0: [::core::ffi::c_int; 3usize],
- pub __bindgen_anon_1: CUgraphNodeParams_st__bindgen_ty_1,
- ///< Reserved bytes. Must be zero.
- pub reserved2: ::core::ffi::c_longlong,
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUgraphNodeParams_st__bindgen_ty_1 {
- ///< Padding. Unused bytes must be zero.
- pub reserved1: [::core::ffi::c_longlong; 29usize],
- ///< Kernel node parameters.
- pub kernel: CUDA_KERNEL_NODE_PARAMS_v3,
- ///< Memcpy node parameters.
- pub memcpy: CUDA_MEMCPY_NODE_PARAMS,
- ///< Memset node parameters.
- pub memset: CUDA_MEMSET_NODE_PARAMS_v2,
- ///< Host node parameters.
- pub host: CUDA_HOST_NODE_PARAMS_v2,
- ///< Child graph node parameters.
- pub graph: CUDA_CHILD_GRAPH_NODE_PARAMS,
- ///< Event wait node parameters.
- pub eventWait: CUDA_EVENT_WAIT_NODE_PARAMS,
- ///< Event record node parameters.
- pub eventRecord: CUDA_EVENT_RECORD_NODE_PARAMS,
- ///< External semaphore signal node parameters.
- pub extSemSignal: CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2,
- ///< External semaphore wait node parameters.
- pub extSemWait: CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2,
- ///< Memory allocation node parameters.
- pub alloc: CUDA_MEM_ALLOC_NODE_PARAMS_v2,
- ///< Memory free node parameters.
- pub free: CUDA_MEM_FREE_NODE_PARAMS,
- ///< MemOp node parameters.
- pub memOp: CUDA_BATCH_MEM_OP_NODE_PARAMS_v2,
- ///< Conditional node parameters.
- pub conditional: CUDA_CONDITIONAL_NODE_PARAMS,
-}
-/// Graph node parameters. See ::cuGraphAddNode.
-pub type CUgraphNodeParams = CUgraphNodeParams_st;
-impl CUflushGPUDirectRDMAWritesOptions_enum {
- ///< ::cuFlushGPUDirectRDMAWrites() and its CUDA Runtime API counterpart are supported on the device.
- pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST: CUflushGPUDirectRDMAWritesOptions_enum = CUflushGPUDirectRDMAWritesOptions_enum(
- 1,
- );
-}
-impl CUflushGPUDirectRDMAWritesOptions_enum {
- ///< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device.
- pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS: CUflushGPUDirectRDMAWritesOptions_enum = CUflushGPUDirectRDMAWritesOptions_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/// Bitmasks for ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUflushGPUDirectRDMAWritesOptions_enum(pub ::core::ffi::c_uint);
-/// Bitmasks for ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS
-pub use self::CUflushGPUDirectRDMAWritesOptions_enum as CUflushGPUDirectRDMAWritesOptions;
-impl CUGPUDirectRDMAWritesOrdering_enum {
- ///< The device does not natively support ordering of remote writes. ::cuFlushGPUDirectRDMAWrites() can be leveraged if supported.
- pub const CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE: CUGPUDirectRDMAWritesOrdering_enum = CUGPUDirectRDMAWritesOrdering_enum(
- 0,
- );
-}
-impl CUGPUDirectRDMAWritesOrdering_enum {
- ///< Natively, the device can consistently consume remote writes, although other CUDA devices may not.
- pub const CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER: CUGPUDirectRDMAWritesOrdering_enum = CUGPUDirectRDMAWritesOrdering_enum(
- 100,
- );
-}
-impl CUGPUDirectRDMAWritesOrdering_enum {
- ///< Any CUDA device in the system can consistently consume remote writes to this device.
- pub const CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES: CUGPUDirectRDMAWritesOrdering_enum = CUGPUDirectRDMAWritesOrdering_enum(
- 200,
- );
-}
-#[repr(transparent)]
-/// Platform native ordering for GPUDirect RDMA writes
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUGPUDirectRDMAWritesOrdering_enum(pub ::core::ffi::c_uint);
-/// Platform native ordering for GPUDirect RDMA writes
-pub use self::CUGPUDirectRDMAWritesOrdering_enum as CUGPUDirectRDMAWritesOrdering;
-impl CUflushGPUDirectRDMAWritesScope_enum {
- ///< Blocks until remote writes are visible to the CUDA device context owning the data.
- pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER: CUflushGPUDirectRDMAWritesScope_enum = CUflushGPUDirectRDMAWritesScope_enum(
- 100,
- );
-}
-impl CUflushGPUDirectRDMAWritesScope_enum {
- ///< Blocks until remote writes are visible to all CUDA device contexts.
- pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES: CUflushGPUDirectRDMAWritesScope_enum = CUflushGPUDirectRDMAWritesScope_enum(
- 200,
- );
-}
-#[repr(transparent)]
-/// The scopes for ::cuFlushGPUDirectRDMAWrites
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUflushGPUDirectRDMAWritesScope_enum(pub ::core::ffi::c_uint);
-/// The scopes for ::cuFlushGPUDirectRDMAWrites
-pub use self::CUflushGPUDirectRDMAWritesScope_enum as CUflushGPUDirectRDMAWritesScope;
-impl CUflushGPUDirectRDMAWritesTarget_enum {
- ///< Sets the target for ::cuFlushGPUDirectRDMAWrites() to the currently active CUDA device context.
- pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX: CUflushGPUDirectRDMAWritesTarget_enum = CUflushGPUDirectRDMAWritesTarget_enum(
- 0,
- );
-}
-#[repr(transparent)]
-/// The targets for ::cuFlushGPUDirectRDMAWrites
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUflushGPUDirectRDMAWritesTarget_enum(pub ::core::ffi::c_uint);
-/// The targets for ::cuFlushGPUDirectRDMAWrites
-pub use self::CUflushGPUDirectRDMAWritesTarget_enum as CUflushGPUDirectRDMAWritesTarget;
-impl CUgraphDebugDot_flags_enum {
- ///< Output all debug data as if every debug flag is enabled
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 1,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Use CUDA Runtime structures for output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 2,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds CUDA_KERNEL_NODE_PARAMS values to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 4,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds CUDA_MEMCPY3D values to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 8,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds CUDA_MEMSET_NODE_PARAMS values to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 16,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds CUDA_HOST_NODE_PARAMS values to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 32,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds CUevent handle from record and wait nodes to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 64,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 128,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 256,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds CUkernelNodeAttrValue values to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 512,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds node handles and every kernel function handle to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 1024,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds memory alloc node parameters to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 2048,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds memory free node parameters to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 4096,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds batch mem op node parameters to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 8192,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds edge numbering information
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 16384,
- );
-}
-impl CUgraphDebugDot_flags_enum {
- ///< Adds conditional node parameters to output
- pub const CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum(
- 32768,
- );
-}
-#[repr(transparent)]
-/// The additional write options for ::cuGraphDebugDotPrint
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgraphDebugDot_flags_enum(pub ::core::ffi::c_uint);
-/// The additional write options for ::cuGraphDebugDotPrint
-pub use self::CUgraphDebugDot_flags_enum as CUgraphDebugDot_flags;
-impl CUuserObject_flags_enum {
- ///< Indicates the destructor execution is not synchronized by any CUDA handle.
- pub const CU_USER_OBJECT_NO_DESTRUCTOR_SYNC: CUuserObject_flags_enum = CUuserObject_flags_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Flags for user objects for graphs
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUuserObject_flags_enum(pub ::core::ffi::c_uint);
-/// Flags for user objects for graphs
-pub use self::CUuserObject_flags_enum as CUuserObject_flags;
-impl CUuserObjectRetain_flags_enum {
- ///< Transfer references from the caller rather than creating new references.
- pub const CU_GRAPH_USER_OBJECT_MOVE: CUuserObjectRetain_flags_enum = CUuserObjectRetain_flags_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/// Flags for retaining user object references for graphs
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUuserObjectRetain_flags_enum(pub ::core::ffi::c_uint);
-/// Flags for retaining user object references for graphs
-pub use self::CUuserObjectRetain_flags_enum as CUuserObjectRetain_flags;
-impl CUgraphInstantiate_flags_enum {
- ///< Automatically free memory allocated in a graph before relaunching.
- pub const CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH: CUgraphInstantiate_flags_enum = CUgraphInstantiate_flags_enum(
- 1,
- );
-}
-impl CUgraphInstantiate_flags_enum {
- /**< Automatically upload the graph after instantiation. Only supported by
-::cuGraphInstantiateWithParams. The upload will be performed using the
-stream provided in \p instantiateParams.*/
- pub const CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD: CUgraphInstantiate_flags_enum = CUgraphInstantiate_flags_enum(
- 2,
- );
-}
-impl CUgraphInstantiate_flags_enum {
- /**< Instantiate the graph to be launchable from the device. This flag can only
-be used on platforms which support unified addressing. This flag cannot be
-used in conjunction with CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH.*/
- pub const CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH: CUgraphInstantiate_flags_enum = CUgraphInstantiate_flags_enum(
- 4,
- );
-}
-impl CUgraphInstantiate_flags_enum {
- /**< Run the graph using the per-node priority attributes rather than the
-priority of the stream it is launched into.*/
- pub const CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY: CUgraphInstantiate_flags_enum = CUgraphInstantiate_flags_enum(
- 8,
- );
-}
-#[repr(transparent)]
-/// Flags for instantiating a graph
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgraphInstantiate_flags_enum(pub ::core::ffi::c_uint);
-/// Flags for instantiating a graph
-pub use self::CUgraphInstantiate_flags_enum as CUgraphInstantiate_flags;
-impl CUdeviceNumaConfig_enum {
- ///< The GPU is not a NUMA node
- pub const CU_DEVICE_NUMA_CONFIG_NONE: CUdeviceNumaConfig_enum = CUdeviceNumaConfig_enum(
- 0,
- );
-}
-impl CUdeviceNumaConfig_enum {
- ///< The GPU is a NUMA node, CU_DEVICE_ATTRIBUTE_NUMA_ID contains its NUMA ID
- pub const CU_DEVICE_NUMA_CONFIG_NUMA_NODE: CUdeviceNumaConfig_enum = CUdeviceNumaConfig_enum(
- 1,
- );
-}
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUdeviceNumaConfig_enum(pub ::core::ffi::c_uint);
-pub use self::CUdeviceNumaConfig_enum as CUdeviceNumaConfig;
-impl CUmoduleLoadingMode_enum {
- ///< Lazy Kernel Loading is not enabled
- pub const CU_MODULE_EAGER_LOADING: CUmoduleLoadingMode_enum = CUmoduleLoadingMode_enum(
- 1,
- );
-}
-impl CUmoduleLoadingMode_enum {
- ///< Lazy Kernel Loading is enabled
- pub const CU_MODULE_LAZY_LOADING: CUmoduleLoadingMode_enum = CUmoduleLoadingMode_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/// CUDA Lazy Loading status
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUmoduleLoadingMode_enum(pub ::core::ffi::c_uint);
-/// CUDA Lazy Loading status
-pub use self::CUmoduleLoadingMode_enum as CUmoduleLoadingMode;
-impl CUfunctionLoadingState_enum {
- pub const CU_FUNCTION_LOADING_STATE_UNLOADED: CUfunctionLoadingState_enum = CUfunctionLoadingState_enum(
- 0,
- );
-}
-impl CUfunctionLoadingState_enum {
- pub const CU_FUNCTION_LOADING_STATE_LOADED: CUfunctionLoadingState_enum = CUfunctionLoadingState_enum(
- 1,
- );
-}
-impl CUfunctionLoadingState_enum {
- pub const CU_FUNCTION_LOADING_STATE_MAX: CUfunctionLoadingState_enum = CUfunctionLoadingState_enum(
- 2,
- );
-}
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUfunctionLoadingState_enum(pub ::core::ffi::c_uint);
-pub use self::CUfunctionLoadingState_enum as CUfunctionLoadingState;
-impl CUcoredumpSettings_enum {
- pub const CU_COREDUMP_ENABLE_ON_EXCEPTION: CUcoredumpSettings_enum = CUcoredumpSettings_enum(
- 1,
- );
-}
-impl CUcoredumpSettings_enum {
- pub const CU_COREDUMP_TRIGGER_HOST: CUcoredumpSettings_enum = CUcoredumpSettings_enum(
- 2,
- );
-}
-impl CUcoredumpSettings_enum {
- pub const CU_COREDUMP_LIGHTWEIGHT: CUcoredumpSettings_enum = CUcoredumpSettings_enum(
- 3,
- );
-}
-impl CUcoredumpSettings_enum {
- pub const CU_COREDUMP_ENABLE_USER_TRIGGER: CUcoredumpSettings_enum = CUcoredumpSettings_enum(
- 4,
- );
-}
-impl CUcoredumpSettings_enum {
- pub const CU_COREDUMP_FILE: CUcoredumpSettings_enum = CUcoredumpSettings_enum(5);
-}
-impl CUcoredumpSettings_enum {
- pub const CU_COREDUMP_PIPE: CUcoredumpSettings_enum = CUcoredumpSettings_enum(6);
-}
-impl CUcoredumpSettings_enum {
- pub const CU_COREDUMP_MAX: CUcoredumpSettings_enum = CUcoredumpSettings_enum(7);
-}
-#[repr(transparent)]
-/// Flags for choosing a coredump attribute to get/set
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUcoredumpSettings_enum(pub ::core::ffi::c_uint);
-/// Flags for choosing a coredump attribute to get/set
-pub use self::CUcoredumpSettings_enum as CUcoredumpSettings;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUgreenCtx_st {
- _unused: [u8; 0],
-}
-/** \typedef typedef struct CUgreenCtx_st* CUgreenCtx
- A green context handle. This handle can be used safely from only one CPU thread at a time.
- Created via ::cuGreenCtxCreate*/
-pub type CUgreenCtx = *mut CUgreenCtx_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUdevResourceDesc_st {
- _unused: [u8; 0],
-}
-/** \typedef struct CUdevResourceDesc_st* CUdevResourceDesc;
- An opaque descriptor handle. The descriptor encapsulates multiple created and configured resources.
- Created via ::cuDevResourceGenerateDesc*/
-pub type CUdevResourceDesc = *mut CUdevResourceDesc_st;
-impl CUgreenCtxCreate_flags {
- ///< Required. Creates a default stream to use inside the green context
- pub const CU_GREEN_CTX_DEFAULT_STREAM: CUgreenCtxCreate_flags = CUgreenCtxCreate_flags(
- 1,
- );
-}
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUgreenCtxCreate_flags(pub ::core::ffi::c_uint);
-impl CUdevResourceType {
- pub const CU_DEV_RESOURCE_TYPE_INVALID: CUdevResourceType = CUdevResourceType(0);
-}
-impl CUdevResourceType {
- ///< Streaming multiprocessors related information
- pub const CU_DEV_RESOURCE_TYPE_SM: CUdevResourceType = CUdevResourceType(1);
-}
-impl CUdevResourceType {
- pub const CU_DEV_RESOURCE_TYPE_MAX: CUdevResourceType = CUdevResourceType(2);
-}
-#[repr(transparent)]
-/** \typedef enum CUdevResourceType
- Type of resource*/
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUdevResourceType(pub ::core::ffi::c_uint);
-/** \struct CUdevSmResource
- Data for SM-related resources*/
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUdevSmResource_st {
- ///< The amount of streaming multiprocessors available in this resource. This is an output parameter only, do not write to this field.
- pub smCount: ::core::ffi::c_uint,
-}
-/** \struct CUdevSmResource
- Data for SM-related resources*/
-pub type CUdevSmResource = CUdevSmResource_st;
-/** \struct CUdevResource
- A tagged union describing different resources identified by the type field. This structure should not be directly modified outside of the API that created it.
- \code
- struct {
- CUdevResourceType type;
- union {
- CUdevSmResource sm;
- };
- };
- \endcode
- - If \p type is \p CU_DEV_RESOURCE_TYPE_INVALID, this resoure is not valid and cannot be further accessed.
- - If \p type is \p CU_DEV_RESOURCE_TYPE_SM, the ::CUdevSmResource structure \p sm is filled in. For example,
- \p sm.smCount will reflect the amount of streaming multiprocessors available in this resource.*/
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUdevResource_st {
- ///< Type of resource, dictates which union field was last set
- pub type_: CUdevResourceType,
- pub _internal_padding: [::core::ffi::c_uchar; 92usize],
- pub __bindgen_anon_1: CUdevResource_st__bindgen_ty_1,
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUdevResource_st__bindgen_ty_1 {
- ///< Resource corresponding to CU_DEV_RESOURCE_TYPE_SM \p. type.
- pub sm: CUdevSmResource,
- pub _oversize: [::core::ffi::c_uchar; 48usize],
-}
-/** \struct CUdevResource
- A tagged union describing different resources identified by the type field. This structure should not be directly modified outside of the API that created it.
- \code
- struct {
- CUdevResourceType type;
- union {
- CUdevSmResource sm;
- };
- };
- \endcode
- - If \p type is \p CU_DEV_RESOURCE_TYPE_INVALID, this resoure is not valid and cannot be further accessed.
- - If \p type is \p CU_DEV_RESOURCE_TYPE_SM, the ::CUdevSmResource structure \p sm is filled in. For example,
- \p sm.smCount will reflect the amount of streaming multiprocessors available in this resource.*/
-pub type CUdevResource_v1 = CUdevResource_st;
-/** \struct CUdevResource
- A tagged union describing different resources identified by the type field. This structure should not be directly modified outside of the API that created it.
- \code
- struct {
- CUdevResourceType type;
- union {
- CUdevSmResource sm;
- };
- };
- \endcode
- - If \p type is \p CU_DEV_RESOURCE_TYPE_INVALID, this resoure is not valid and cannot be further accessed.
- - If \p type is \p CU_DEV_RESOURCE_TYPE_SM, the ::CUdevSmResource structure \p sm is filled in. For example,
- \p sm.smCount will reflect the amount of streaming multiprocessors available in this resource.*/
-pub type CUdevResource = CUdevResource_v1;
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUdeviceptr_v1(pub ::core::ffi::c_uint);
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_MEMCPY2D_v1_st {
- ///< Source X in bytes
- pub srcXInBytes: ::core::ffi::c_uint,
- ///< Source Y
- pub srcY: ::core::ffi::c_uint,
- ///< Source memory type (host, device, array)
- pub srcMemoryType: CUmemorytype,
- ///< Source host pointer
- pub srcHost: *const ::core::ffi::c_void,
- ///< Source device pointer
- pub srcDevice: CUdeviceptr_v1,
- ///< Source array reference
- pub srcArray: CUarray,
- ///< Source pitch (ignored when src is array)
- pub srcPitch: ::core::ffi::c_uint,
- ///< Destination X in bytes
- pub dstXInBytes: ::core::ffi::c_uint,
- ///< Destination Y
- pub dstY: ::core::ffi::c_uint,
- ///< Destination memory type (host, device, array)
- pub dstMemoryType: CUmemorytype,
- ///< Destination host pointer
- pub dstHost: *mut ::core::ffi::c_void,
- ///< Destination device pointer
- pub dstDevice: CUdeviceptr_v1,
- ///< Destination array reference
- pub dstArray: CUarray,
- ///< Destination pitch (ignored when dst is array)
- pub dstPitch: ::core::ffi::c_uint,
- ///< Width of 2D memory copy in bytes
- pub WidthInBytes: ::core::ffi::c_uint,
- ///< Height of 2D memory copy
- pub Height: ::core::ffi::c_uint,
-}
-pub type CUDA_MEMCPY2D_v1 = CUDA_MEMCPY2D_v1_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_MEMCPY3D_v1_st {
- ///< Source X in bytes
- pub srcXInBytes: ::core::ffi::c_uint,
- ///< Source Y
- pub srcY: ::core::ffi::c_uint,
- ///< Source Z
- pub srcZ: ::core::ffi::c_uint,
- ///< Source LOD
- pub srcLOD: ::core::ffi::c_uint,
- ///< Source memory type (host, device, array)
- pub srcMemoryType: CUmemorytype,
- ///< Source host pointer
- pub srcHost: *const ::core::ffi::c_void,
- ///< Source device pointer
- pub srcDevice: CUdeviceptr_v1,
- ///< Source array reference
- pub srcArray: CUarray,
- ///< Must be NULL
- pub reserved0: *mut ::core::ffi::c_void,
- ///< Source pitch (ignored when src is array)
- pub srcPitch: ::core::ffi::c_uint,
- ///< Source height (ignored when src is array; may be 0 if Depth==1)
- pub srcHeight: ::core::ffi::c_uint,
- ///< Destination X in bytes
- pub dstXInBytes: ::core::ffi::c_uint,
- ///< Destination Y
- pub dstY: ::core::ffi::c_uint,
- ///< Destination Z
- pub dstZ: ::core::ffi::c_uint,
- ///< Destination LOD
- pub dstLOD: ::core::ffi::c_uint,
- ///< Destination memory type (host, device, array)
- pub dstMemoryType: CUmemorytype,
- ///< Destination host pointer
- pub dstHost: *mut ::core::ffi::c_void,
- ///< Destination device pointer
- pub dstDevice: CUdeviceptr_v1,
- ///< Destination array reference
- pub dstArray: CUarray,
- ///< Must be NULL
- pub reserved1: *mut ::core::ffi::c_void,
- ///< Destination pitch (ignored when dst is array)
- pub dstPitch: ::core::ffi::c_uint,
- ///< Destination height (ignored when dst is array; may be 0 if Depth==1)
- pub dstHeight: ::core::ffi::c_uint,
- ///< Width of 3D memory copy in bytes
- pub WidthInBytes: ::core::ffi::c_uint,
- ///< Height of 3D memory copy
- pub Height: ::core::ffi::c_uint,
- ///< Depth of 3D memory copy
- pub Depth: ::core::ffi::c_uint,
-}
-pub type CUDA_MEMCPY3D_v1 = CUDA_MEMCPY3D_v1_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_ARRAY_DESCRIPTOR_v1_st {
- ///< Width of array
- pub Width: ::core::ffi::c_uint,
- ///< Height of array
- pub Height: ::core::ffi::c_uint,
- ///< Array format
- pub Format: CUarray_format,
- ///< Channels per array element
- pub NumChannels: ::core::ffi::c_uint,
-}
-pub type CUDA_ARRAY_DESCRIPTOR_v1 = CUDA_ARRAY_DESCRIPTOR_v1_st;
-#[repr(C)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUDA_ARRAY3D_DESCRIPTOR_v1_st {
- ///< Width of 3D array
- pub Width: ::core::ffi::c_uint,
- ///< Height of 3D array
- pub Height: ::core::ffi::c_uint,
- ///< Depth of 3D array
- pub Depth: ::core::ffi::c_uint,
- ///< Array format
- pub Format: CUarray_format,
- ///< Channels per array element
- pub NumChannels: ::core::ffi::c_uint,
- ///< Flags
- pub Flags: ::core::ffi::c_uint,
-}
-pub type CUDA_ARRAY3D_DESCRIPTOR_v1 = CUDA_ARRAY3D_DESCRIPTOR_v1_st;
-impl CUoutput_mode_enum {
- ///< Output mode Key-Value pair format.
- pub const CU_OUT_KEY_VALUE_PAIR: CUoutput_mode_enum = CUoutput_mode_enum(0);
-}
-impl CUoutput_mode_enum {
- ///< Output mode Comma separated values format.
- pub const CU_OUT_CSV: CUoutput_mode_enum = CUoutput_mode_enum(1);
-}
-#[repr(transparent)]
-/// Profiler Output Modes
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUoutput_mode_enum(pub ::core::ffi::c_uint);
-/// Profiler Output Modes
-pub use self::CUoutput_mode_enum as CUoutput_mode;
-pub type GLenum = ::core::ffi::c_uint;
-pub type GLuint = ::core::ffi::c_uint;
-pub type khronos_int32_t = i32;
-impl CUGLDeviceList_enum {
- ///< The CUDA devices for all GPUs used by the current OpenGL context
- pub const CU_GL_DEVICE_LIST_ALL: CUGLDeviceList_enum = CUGLDeviceList_enum(1);
-}
-impl CUGLDeviceList_enum {
- ///< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame
- pub const CU_GL_DEVICE_LIST_CURRENT_FRAME: CUGLDeviceList_enum = CUGLDeviceList_enum(
- 2,
- );
-}
-impl CUGLDeviceList_enum {
- ///< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame
- pub const CU_GL_DEVICE_LIST_NEXT_FRAME: CUGLDeviceList_enum = CUGLDeviceList_enum(3);
-}
-#[repr(transparent)]
-/// CUDA devices corresponding to an OpenGL device
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUGLDeviceList_enum(pub ::core::ffi::c_uint);
-/// CUDA devices corresponding to an OpenGL device
-pub use self::CUGLDeviceList_enum as CUGLDeviceList;
-impl CUGLmap_flags_enum {
- pub const CU_GL_MAP_RESOURCE_FLAGS_NONE: CUGLmap_flags_enum = CUGLmap_flags_enum(0);
-}
-impl CUGLmap_flags_enum {
- pub const CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY: CUGLmap_flags_enum = CUGLmap_flags_enum(
- 1,
- );
-}
-impl CUGLmap_flags_enum {
- pub const CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD: CUGLmap_flags_enum = CUGLmap_flags_enum(
- 2,
- );
-}
-#[repr(transparent)]
-/// Flags to map or unmap a resource
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUGLmap_flags_enum(pub ::core::ffi::c_uint);
-/// Flags to map or unmap a resource
-pub use self::CUGLmap_flags_enum as CUGLmap_flags;
-pub type EGLint = khronos_int32_t;
-pub type EGLSyncKHR = *mut ::core::ffi::c_void;
-pub type EGLImageKHR = *mut ::core::ffi::c_void;
-pub type EGLStreamKHR = *mut ::core::ffi::c_void;
-impl CUeglFrameType_enum {
- ///< Frame type CUDA array
- pub const CU_EGL_FRAME_TYPE_ARRAY: CUeglFrameType_enum = CUeglFrameType_enum(0);
-}
-impl CUeglFrameType_enum {
- ///< Frame type pointer
- pub const CU_EGL_FRAME_TYPE_PITCH: CUeglFrameType_enum = CUeglFrameType_enum(1);
-}
-#[repr(transparent)]
-/// CUDA EglFrame type - array or pointer
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUeglFrameType_enum(pub ::core::ffi::c_uint);
-/// CUDA EglFrame type - array or pointer
-pub use self::CUeglFrameType_enum as CUeglFrameType;
-impl CUeglResourceLocationFlags_enum {
- ///< Resource location sysmem
- pub const CU_EGL_RESOURCE_LOCATION_SYSMEM: CUeglResourceLocationFlags_enum = CUeglResourceLocationFlags_enum(
- 0,
- );
-}
-impl CUeglResourceLocationFlags_enum {
- ///< Resource location vidmem
- pub const CU_EGL_RESOURCE_LOCATION_VIDMEM: CUeglResourceLocationFlags_enum = CUeglResourceLocationFlags_enum(
- 1,
- );
-}
-#[repr(transparent)]
-/** Resource location flags- sysmem or vidmem
-
- For CUDA context on iGPU, since video and system memory are equivalent -
- these flags will not have an effect on the execution.
-
- For CUDA context on dGPU, applications can use the flag ::CUeglResourceLocationFlags
- to give a hint about the desired location.
-
- ::CU_EGL_RESOURCE_LOCATION_SYSMEM - the frame data is made resident on the system memory
- to be accessed by CUDA.
-
- ::CU_EGL_RESOURCE_LOCATION_VIDMEM - the frame data is made resident on the dedicated
- video memory to be accessed by CUDA.
-
- There may be an additional latency due to new allocation and data migration,
- if the frame is produced on a different memory.
-*/
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUeglResourceLocationFlags_enum(pub ::core::ffi::c_uint);
-/** Resource location flags- sysmem or vidmem
-
- For CUDA context on iGPU, since video and system memory are equivalent -
- these flags will not have an effect on the execution.
-
- For CUDA context on dGPU, applications can use the flag ::CUeglResourceLocationFlags
- to give a hint about the desired location.
-
- ::CU_EGL_RESOURCE_LOCATION_SYSMEM - the frame data is made resident on the system memory
- to be accessed by CUDA.
-
- ::CU_EGL_RESOURCE_LOCATION_VIDMEM - the frame data is made resident on the dedicated
- video memory to be accessed by CUDA.
-
- There may be an additional latency due to new allocation and data migration,
- if the frame is produced on a different memory.
-*/
-pub use self::CUeglResourceLocationFlags_enum as CUeglResourceLocationFlags;
-impl CUeglColorFormat_enum {
- ///< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YUV420_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 0,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar.
- pub const CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 1,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YUV422_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 2,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar.
- pub const CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 3,
- );
-}
-impl CUeglColorFormat_enum {
- ///< R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported.
- pub const CU_EGL_COLOR_FORMAT_RGB: CUeglColorFormat_enum = CUeglColorFormat_enum(4);
-}
-impl CUeglColorFormat_enum {
- ///< R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported.
- pub const CU_EGL_COLOR_FORMAT_BGR: CUeglColorFormat_enum = CUeglColorFormat_enum(5);
-}
-impl CUeglColorFormat_enum {
- ///< R/G/B/A four channels in one surface with BGRA byte ordering.
- pub const CU_EGL_COLOR_FORMAT_ARGB: CUeglColorFormat_enum = CUeglColorFormat_enum(6);
-}
-impl CUeglColorFormat_enum {
- ///< R/G/B/A four channels in one surface with ABGR byte ordering.
- pub const CU_EGL_COLOR_FORMAT_RGBA: CUeglColorFormat_enum = CUeglColorFormat_enum(7);
-}
-impl CUeglColorFormat_enum {
- ///< single luminance channel in one surface.
- pub const CU_EGL_COLOR_FORMAT_L: CUeglColorFormat_enum = CUeglColorFormat_enum(8);
-}
-impl CUeglColorFormat_enum {
- ///< single color channel in one surface.
- pub const CU_EGL_COLOR_FORMAT_R: CUeglColorFormat_enum = CUeglColorFormat_enum(9);
-}
-impl CUeglColorFormat_enum {
- ///< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YUV444_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 10,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar.
- pub const CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 11,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, U, V in one surface, interleaved as UYVY in one channel.
- pub const CU_EGL_COLOR_FORMAT_YUYV_422: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 12,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, U, V in one surface, interleaved as YUYV in one channel.
- pub const CU_EGL_COLOR_FORMAT_UYVY_422: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 13,
- );
-}
-impl CUeglColorFormat_enum {
- ///< R/G/B/A four channels in one surface with RGBA byte ordering.
- pub const CU_EGL_COLOR_FORMAT_ABGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 14,
- );
-}
-impl CUeglColorFormat_enum {
- ///< R/G/B/A four channels in one surface with ARGB byte ordering.
- pub const CU_EGL_COLOR_FORMAT_BGRA: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 15,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Alpha color format - one channel in one surface.
- pub const CU_EGL_COLOR_FORMAT_A: CUeglColorFormat_enum = CUeglColorFormat_enum(16);
-}
-impl CUeglColorFormat_enum {
- ///< R/G color format - two channels in one surface with GR byte ordering
- pub const CU_EGL_COLOR_FORMAT_RG: CUeglColorFormat_enum = CUeglColorFormat_enum(17);
-}
-impl CUeglColorFormat_enum {
- ///< Y, U, V, A four channels in one surface, interleaved as VUYA.
- pub const CU_EGL_COLOR_FORMAT_AYUV: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 18,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 19,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 20,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 21,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 22,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 23,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 24,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 25,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel.
- pub const CU_EGL_COLOR_FORMAT_VYUY_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 26,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel.
- pub const CU_EGL_COLOR_FORMAT_UYVY_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 27,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel.
- pub const CU_EGL_COLOR_FORMAT_YUYV_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 28,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel.
- pub const CU_EGL_COLOR_FORMAT_YVYU_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 29,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported.
- pub const CU_EGL_COLOR_FORMAT_YUV_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 30,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY.
- pub const CU_EGL_COLOR_FORMAT_YUVA_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 31,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA.
- pub const CU_EGL_COLOR_FORMAT_AYUV_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 32,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 33,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 34,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 35,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 36,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 37,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 38,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 39,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 40,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 41,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 42,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 43,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 44,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer format - one channel in one surface with interleaved RGGB ordering.
- pub const CU_EGL_COLOR_FORMAT_BAYER_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 45,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer format - one channel in one surface with interleaved BGGR ordering.
- pub const CU_EGL_COLOR_FORMAT_BAYER_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 46,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer format - one channel in one surface with interleaved GRBG ordering.
- pub const CU_EGL_COLOR_FORMAT_BAYER_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 47,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer format - one channel in one surface with interleaved GBRG ordering.
- pub const CU_EGL_COLOR_FORMAT_BAYER_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 48,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER10_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 49,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER10_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 50,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER10_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 51,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER10_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 52,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER12_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 53,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER12_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 54,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER12_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 55,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER12_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 56,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER14_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 57,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER14_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 58,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER14_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 59,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER14_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 60,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER20_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 61,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER20_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 62,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER20_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 63,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER20_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 64,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU444_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 65,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU422_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 66,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU420_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 67,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype.
- pub const CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 68,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype.
- pub const CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 69,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype.
- pub const CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 70,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype.
- pub const CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 71,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer format - one channel in one surface with interleaved BCCR ordering.
- pub const CU_EGL_COLOR_FORMAT_BAYER_BCCR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 72,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer format - one channel in one surface with interleaved RCCB ordering.
- pub const CU_EGL_COLOR_FORMAT_BAYER_RCCB: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 73,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer format - one channel in one surface with interleaved CRBC ordering.
- pub const CU_EGL_COLOR_FORMAT_BAYER_CRBC: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 74,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer format - one channel in one surface with interleaved CBRC ordering.
- pub const CU_EGL_COLOR_FORMAT_BAYER_CBRC: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 75,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER10_CCCC: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 76,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER12_BCCR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 77,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER12_RCCB: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 78,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER12_CRBC: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 79,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER12_CBRC: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 80,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op.
- pub const CU_EGL_COLOR_FORMAT_BAYER12_CCCC: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 81,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Color format for single Y plane.
- pub const CU_EGL_COLOR_FORMAT_Y: CUeglColorFormat_enum = CUeglColorFormat_enum(82);
-}
-impl CUeglColorFormat_enum {
- ///< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 83,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 84,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 85,
- );
-}
-impl CUeglColorFormat_enum {
- /**< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height
-= 1/2 Y height.*/
- pub const CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 86,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 87,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 88,
- );
-}
-impl CUeglColorFormat_enum {
- /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height
-= 1/2 Y height.*/
- pub const CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 89,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 90,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 91,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 92,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 93,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 94,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 95,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Color format for single Y plane.
- pub const CU_EGL_COLOR_FORMAT_Y_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 96,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Color format for single Y plane.
- pub const CU_EGL_COLOR_FORMAT_Y_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 97,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Color format for single Y10 plane.
- pub const CU_EGL_COLOR_FORMAT_Y10_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 98,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Color format for single Y10 plane.
- pub const CU_EGL_COLOR_FORMAT_Y10_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 99,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Color format for single Y12 plane.
- pub const CU_EGL_COLOR_FORMAT_Y12_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 100,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Color format for single Y12 plane.
- pub const CU_EGL_COLOR_FORMAT_Y12_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 101,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, U, V, A four channels in one surface, interleaved as AVUY.
- pub const CU_EGL_COLOR_FORMAT_YUVA: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 102,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported.
- pub const CU_EGL_COLOR_FORMAT_YUV: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 103,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, U, V in one surface, interleaved as YVYU in one channel.
- pub const CU_EGL_COLOR_FORMAT_YVYU: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 104,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Y, U, V in one surface, interleaved as VYUY in one channel.
- pub const CU_EGL_COLOR_FORMAT_VYUY: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 105,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 106,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 107,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 108,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 109,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 110,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.
- pub const CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 111,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 112,
- );
-}
-impl CUeglColorFormat_enum {
- ///< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.
- pub const CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 113,
- );
-}
-impl CUeglColorFormat_enum {
- pub const CU_EGL_COLOR_FORMAT_MAX: CUeglColorFormat_enum = CUeglColorFormat_enum(
- 114,
- );
-}
-#[repr(transparent)]
-/** CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops.
- Three channel formats are currently not supported for ::CU_EGL_FRAME_TYPE_ARRAY*/
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct CUeglColorFormat_enum(pub ::core::ffi::c_uint);
-/** CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops.
- Three channel formats are currently not supported for ::CU_EGL_FRAME_TYPE_ARRAY*/
-pub use self::CUeglColorFormat_enum as CUeglColorFormat;
-/** CUDA EGLFrame structure Descriptor - structure defining one frame of EGL.
-
- Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not.*/
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct CUeglFrame_st {
- pub frame: CUeglFrame_st__bindgen_ty_1,
- ///< Width of first plane
- pub width: ::core::ffi::c_uint,
- ///< Height of first plane
- pub height: ::core::ffi::c_uint,
- ///< Depth of first plane
- pub depth: ::core::ffi::c_uint,
- ///< Pitch of first plane
- pub pitch: ::core::ffi::c_uint,
- ///< Number of planes
- pub planeCount: ::core::ffi::c_uint,
- ///< Number of channels for the plane
- pub numChannels: ::core::ffi::c_uint,
- ///< Array or Pitch
- pub frameType: CUeglFrameType,
- ///< CUDA EGL Color Format
- pub eglColorFormat: CUeglColorFormat,
- ///< CUDA Array Format
- pub cuFormat: CUarray_format,
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union CUeglFrame_st__bindgen_ty_1 {
- ///< Array of CUarray corresponding to each plane
- pub pArray: [CUarray; 3usize],
- ///< Array of Pointers corresponding to each plane
- pub pPitch: [*mut ::core::ffi::c_void; 3usize],
-}
-/** CUDA EGLFrame structure Descriptor - structure defining one frame of EGL.
-
- Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not.*/
-pub type CUeglFrame_v1 = CUeglFrame_st;
-/** CUDA EGLFrame structure Descriptor - structure defining one frame of EGL.
-
- Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not.*/
-pub type CUeglFrame = CUeglFrame_v1;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct CUeglStreamConnection_st {
- _unused: [u8; 0],
-}
-/// CUDA EGLSream Connection
-pub type CUeglStreamConnection = *mut CUeglStreamConnection_st;
-impl VdpStatus {
- pub const VDP_STATUS_OK: VdpStatus = VdpStatus(0);
-}
-impl VdpStatus {
- pub const VDP_STATUS_NO_IMPLEMENTATION: VdpStatus = VdpStatus(1);
-}
-impl VdpStatus {
- pub const VDP_STATUS_DISPLAY_PREEMPTED: VdpStatus = VdpStatus(2);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_HANDLE: VdpStatus = VdpStatus(3);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_POINTER: VdpStatus = VdpStatus(4);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_CHROMA_TYPE: VdpStatus = VdpStatus(5);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_Y_CB_CR_FORMAT: VdpStatus = VdpStatus(6);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_RGBA_FORMAT: VdpStatus = VdpStatus(7);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_INDEXED_FORMAT: VdpStatus = VdpStatus(8);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_COLOR_STANDARD: VdpStatus = VdpStatus(9);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_COLOR_TABLE_FORMAT: VdpStatus = VdpStatus(10);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_BLEND_FACTOR: VdpStatus = VdpStatus(11);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_BLEND_EQUATION: VdpStatus = VdpStatus(12);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_FLAG: VdpStatus = VdpStatus(13);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_DECODER_PROFILE: VdpStatus = VdpStatus(14);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE: VdpStatus = VdpStatus(15);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER: VdpStatus = VdpStatus(16);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE: VdpStatus = VdpStatus(17);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE: VdpStatus = VdpStatus(
- 18,
- );
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_FUNC_ID: VdpStatus = VdpStatus(19);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_SIZE: VdpStatus = VdpStatus(20);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_VALUE: VdpStatus = VdpStatus(21);
-}
-impl VdpStatus {
- pub const VDP_STATUS_INVALID_STRUCT_VERSION: VdpStatus = VdpStatus(22);
-}
-impl VdpStatus {
- pub const VDP_STATUS_RESOURCES: VdpStatus = VdpStatus(23);
-}
-impl VdpStatus {
- pub const VDP_STATUS_HANDLE_DEVICE_MISMATCH: VdpStatus = VdpStatus(24);
-}
-impl VdpStatus {
- pub const VDP_STATUS_ERROR: VdpStatus = VdpStatus(25);
-}
-#[repr(transparent)]
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
-pub struct VdpStatus(pub ::core::ffi::c_uint);
-pub type VdpDevice = u32;
-pub type VdpVideoSurface = u32;
-pub type VdpOutputSurface = u32;
-pub type VdpFuncId = u32;
-pub type VdpGetProcAddress = ::core::option::Option<
- unsafe extern "system" fn(
- device: VdpDevice,
- function_id: VdpFuncId,
- function_pointer: *mut *mut ::core::ffi::c_void,
- ) -> VdpStatus,
->;
-impl CUerror {
- pub const INVALID_VALUE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(1)
- });
- pub const OUT_OF_MEMORY: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(2)
- });
- pub const NOT_INITIALIZED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(3)
- });
- pub const DEINITIALIZED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(4)
- });
- pub const PROFILER_DISABLED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(5)
- });
- pub const PROFILER_NOT_INITIALIZED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(6)
- });
- pub const PROFILER_ALREADY_STARTED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(7)
- });
- pub const PROFILER_ALREADY_STOPPED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(8)
- });
- pub const STUB_LIBRARY: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(34)
- });
- pub const DEVICE_UNAVAILABLE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(46)
- });
- pub const NO_DEVICE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(100)
- });
- pub const INVALID_DEVICE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(101)
- });
- pub const DEVICE_NOT_LICENSED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(102)
- });
- pub const INVALID_IMAGE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(200)
- });
- pub const INVALID_CONTEXT: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(201)
- });
- pub const CONTEXT_ALREADY_CURRENT: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(202)
- });
- pub const MAP_FAILED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(205)
- });
- pub const UNMAP_FAILED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(206)
- });
- pub const ARRAY_IS_MAPPED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(207)
- });
- pub const ALREADY_MAPPED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(208)
- });
- pub const NO_BINARY_FOR_GPU: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(209)
- });
- pub const ALREADY_ACQUIRED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(210)
- });
- pub const NOT_MAPPED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(211)
- });
- pub const NOT_MAPPED_AS_ARRAY: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(212)
- });
- pub const NOT_MAPPED_AS_POINTER: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(213)
- });
- pub const ECC_UNCORRECTABLE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(214)
- });
- pub const UNSUPPORTED_LIMIT: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(215)
- });
- pub const CONTEXT_ALREADY_IN_USE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(216)
- });
- pub const PEER_ACCESS_UNSUPPORTED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(217)
- });
- pub const INVALID_PTX: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(218)
- });
- pub const INVALID_GRAPHICS_CONTEXT: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(219)
- });
- pub const NVLINK_UNCORRECTABLE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(220)
- });
- pub const JIT_COMPILER_NOT_FOUND: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(221)
- });
- pub const UNSUPPORTED_PTX_VERSION: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(222)
- });
- pub const JIT_COMPILATION_DISABLED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(223)
- });
- pub const UNSUPPORTED_EXEC_AFFINITY: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(224)
- });
- pub const UNSUPPORTED_DEVSIDE_SYNC: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(225)
- });
- pub const INVALID_SOURCE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(300)
- });
- pub const FILE_NOT_FOUND: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(301)
- });
- pub const SHARED_OBJECT_SYMBOL_NOT_FOUND: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(302)
- });
- pub const SHARED_OBJECT_INIT_FAILED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(303)
- });
- pub const OPERATING_SYSTEM: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(304)
- });
- pub const INVALID_HANDLE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(400)
- });
- pub const ILLEGAL_STATE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(401)
- });
- pub const LOSSY_QUERY: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(402)
- });
- pub const NOT_FOUND: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(500)
- });
- pub const NOT_READY: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(600)
- });
- pub const ILLEGAL_ADDRESS: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(700)
- });
- pub const LAUNCH_OUT_OF_RESOURCES: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(701)
- });
- pub const LAUNCH_TIMEOUT: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(702)
- });
- pub const LAUNCH_INCOMPATIBLE_TEXTURING: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(703)
- });
- pub const PEER_ACCESS_ALREADY_ENABLED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(704)
- });
- pub const PEER_ACCESS_NOT_ENABLED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(705)
- });
- pub const PRIMARY_CONTEXT_ACTIVE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(708)
- });
- pub const CONTEXT_IS_DESTROYED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(709)
- });
- pub const ASSERT: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(710)
- });
- pub const TOO_MANY_PEERS: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(711)
- });
- pub const HOST_MEMORY_ALREADY_REGISTERED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(712)
- });
- pub const HOST_MEMORY_NOT_REGISTERED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(713)
- });
- pub const HARDWARE_STACK_ERROR: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(714)
- });
- pub const ILLEGAL_INSTRUCTION: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(715)
- });
- pub const MISALIGNED_ADDRESS: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(716)
- });
- pub const INVALID_ADDRESS_SPACE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(717)
- });
- pub const INVALID_PC: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(718)
- });
- pub const LAUNCH_FAILED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(719)
- });
- pub const COOPERATIVE_LAUNCH_TOO_LARGE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(720)
- });
- pub const NOT_PERMITTED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(800)
- });
- pub const NOT_SUPPORTED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(801)
- });
- pub const SYSTEM_NOT_READY: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(802)
- });
- pub const SYSTEM_DRIVER_MISMATCH: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(803)
- });
- pub const COMPAT_NOT_SUPPORTED_ON_DEVICE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(804)
- });
- pub const MPS_CONNECTION_FAILED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(805)
- });
- pub const MPS_RPC_FAILURE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(806)
- });
- pub const MPS_SERVER_NOT_READY: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(807)
- });
- pub const MPS_MAX_CLIENTS_REACHED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(808)
- });
- pub const MPS_MAX_CONNECTIONS_REACHED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(809)
- });
- pub const MPS_CLIENT_TERMINATED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(810)
- });
- pub const CDP_NOT_SUPPORTED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(811)
- });
- pub const CDP_VERSION_MISMATCH: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(812)
- });
- pub const STREAM_CAPTURE_UNSUPPORTED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(900)
- });
- pub const STREAM_CAPTURE_INVALIDATED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(901)
- });
- pub const STREAM_CAPTURE_MERGE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(902)
- });
- pub const STREAM_CAPTURE_UNMATCHED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(903)
- });
- pub const STREAM_CAPTURE_UNJOINED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(904)
- });
- pub const STREAM_CAPTURE_ISOLATION: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(905)
- });
- pub const STREAM_CAPTURE_IMPLICIT: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(906)
- });
- pub const CAPTURED_EVENT: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(907)
- });
- pub const STREAM_CAPTURE_WRONG_THREAD: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(908)
- });
- pub const TIMEOUT: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(909)
- });
- pub const GRAPH_EXEC_UPDATE_FAILURE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(910)
- });
- pub const EXTERNAL_DEVICE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(911)
- });
- pub const INVALID_CLUSTER_SIZE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(912)
- });
- pub const FUNCTION_NOT_LOADED: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(913)
- });
- pub const INVALID_RESOURCE_TYPE: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(914)
- });
- pub const INVALID_RESOURCE_CONFIGURATION: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(915)
- });
- pub const UNKNOWN: CUerror = CUerror(unsafe {
- ::core::num::NonZeroU32::new_unchecked(999)
- });
-}
-#[repr(transparent)]
-#[derive(Debug, Hash, Copy, Clone, PartialEq, Eq)]
-pub struct CUerror(pub ::core::num::NonZeroU32);
-pub trait CUresultConsts {
- const SUCCESS: CUresult = CUresult::Ok(());
- const ERROR_INVALID_VALUE: CUresult = CUresult::Err(CUerror::INVALID_VALUE);
- const ERROR_OUT_OF_MEMORY: CUresult = CUresult::Err(CUerror::OUT_OF_MEMORY);
- const ERROR_NOT_INITIALIZED: CUresult = CUresult::Err(CUerror::NOT_INITIALIZED);
- const ERROR_DEINITIALIZED: CUresult = CUresult::Err(CUerror::DEINITIALIZED);
- const ERROR_PROFILER_DISABLED: CUresult = CUresult::Err(CUerror::PROFILER_DISABLED);
- const ERROR_PROFILER_NOT_INITIALIZED: CUresult = CUresult::Err(
- CUerror::PROFILER_NOT_INITIALIZED,
- );
- const ERROR_PROFILER_ALREADY_STARTED: CUresult = CUresult::Err(
- CUerror::PROFILER_ALREADY_STARTED,
- );
- const ERROR_PROFILER_ALREADY_STOPPED: CUresult = CUresult::Err(
- CUerror::PROFILER_ALREADY_STOPPED,
- );
- const ERROR_STUB_LIBRARY: CUresult = CUresult::Err(CUerror::STUB_LIBRARY);
- const ERROR_DEVICE_UNAVAILABLE: CUresult = CUresult::Err(
- CUerror::DEVICE_UNAVAILABLE,
- );
- const ERROR_NO_DEVICE: CUresult = CUresult::Err(CUerror::NO_DEVICE);
- const ERROR_INVALID_DEVICE: CUresult = CUresult::Err(CUerror::INVALID_DEVICE);
- const ERROR_DEVICE_NOT_LICENSED: CUresult = CUresult::Err(
- CUerror::DEVICE_NOT_LICENSED,
- );
- const ERROR_INVALID_IMAGE: CUresult = CUresult::Err(CUerror::INVALID_IMAGE);
- const ERROR_INVALID_CONTEXT: CUresult = CUresult::Err(CUerror::INVALID_CONTEXT);
- const ERROR_CONTEXT_ALREADY_CURRENT: CUresult = CUresult::Err(
- CUerror::CONTEXT_ALREADY_CURRENT,
- );
- const ERROR_MAP_FAILED: CUresult = CUresult::Err(CUerror::MAP_FAILED);
- const ERROR_UNMAP_FAILED: CUresult = CUresult::Err(CUerror::UNMAP_FAILED);
- const ERROR_ARRAY_IS_MAPPED: CUresult = CUresult::Err(CUerror::ARRAY_IS_MAPPED);
- const ERROR_ALREADY_MAPPED: CUresult = CUresult::Err(CUerror::ALREADY_MAPPED);
- const ERROR_NO_BINARY_FOR_GPU: CUresult = CUresult::Err(CUerror::NO_BINARY_FOR_GPU);
- const ERROR_ALREADY_ACQUIRED: CUresult = CUresult::Err(CUerror::ALREADY_ACQUIRED);
- const ERROR_NOT_MAPPED: CUresult = CUresult::Err(CUerror::NOT_MAPPED);
- const ERROR_NOT_MAPPED_AS_ARRAY: CUresult = CUresult::Err(
- CUerror::NOT_MAPPED_AS_ARRAY,
- );
- const ERROR_NOT_MAPPED_AS_POINTER: CUresult = CUresult::Err(
- CUerror::NOT_MAPPED_AS_POINTER,
- );
- const ERROR_ECC_UNCORRECTABLE: CUresult = CUresult::Err(CUerror::ECC_UNCORRECTABLE);
- const ERROR_UNSUPPORTED_LIMIT: CUresult = CUresult::Err(CUerror::UNSUPPORTED_LIMIT);
- const ERROR_CONTEXT_ALREADY_IN_USE: CUresult = CUresult::Err(
- CUerror::CONTEXT_ALREADY_IN_USE,
- );
- const ERROR_PEER_ACCESS_UNSUPPORTED: CUresult = CUresult::Err(
- CUerror::PEER_ACCESS_UNSUPPORTED,
- );
- const ERROR_INVALID_PTX: CUresult = CUresult::Err(CUerror::INVALID_PTX);
- const ERROR_INVALID_GRAPHICS_CONTEXT: CUresult = CUresult::Err(
- CUerror::INVALID_GRAPHICS_CONTEXT,
- );
- const ERROR_NVLINK_UNCORRECTABLE: CUresult = CUresult::Err(
- CUerror::NVLINK_UNCORRECTABLE,
- );
- const ERROR_JIT_COMPILER_NOT_FOUND: CUresult = CUresult::Err(
- CUerror::JIT_COMPILER_NOT_FOUND,
- );
- const ERROR_UNSUPPORTED_PTX_VERSION: CUresult = CUresult::Err(
- CUerror::UNSUPPORTED_PTX_VERSION,
- );
- const ERROR_JIT_COMPILATION_DISABLED: CUresult = CUresult::Err(
- CUerror::JIT_COMPILATION_DISABLED,
- );
- const ERROR_UNSUPPORTED_EXEC_AFFINITY: CUresult = CUresult::Err(
- CUerror::UNSUPPORTED_EXEC_AFFINITY,
- );
- const ERROR_UNSUPPORTED_DEVSIDE_SYNC: CUresult = CUresult::Err(
- CUerror::UNSUPPORTED_DEVSIDE_SYNC,
- );
- const ERROR_INVALID_SOURCE: CUresult = CUresult::Err(CUerror::INVALID_SOURCE);
- const ERROR_FILE_NOT_FOUND: CUresult = CUresult::Err(CUerror::FILE_NOT_FOUND);
- const ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND: CUresult = CUresult::Err(
- CUerror::SHARED_OBJECT_SYMBOL_NOT_FOUND,
- );
- const ERROR_SHARED_OBJECT_INIT_FAILED: CUresult = CUresult::Err(
- CUerror::SHARED_OBJECT_INIT_FAILED,
- );
- const ERROR_OPERATING_SYSTEM: CUresult = CUresult::Err(CUerror::OPERATING_SYSTEM);
- const ERROR_INVALID_HANDLE: CUresult = CUresult::Err(CUerror::INVALID_HANDLE);
- const ERROR_ILLEGAL_STATE: CUresult = CUresult::Err(CUerror::ILLEGAL_STATE);
- const ERROR_LOSSY_QUERY: CUresult = CUresult::Err(CUerror::LOSSY_QUERY);
- const ERROR_NOT_FOUND: CUresult = CUresult::Err(CUerror::NOT_FOUND);
- const ERROR_NOT_READY: CUresult = CUresult::Err(CUerror::NOT_READY);
- const ERROR_ILLEGAL_ADDRESS: CUresult = CUresult::Err(CUerror::ILLEGAL_ADDRESS);
- const ERROR_LAUNCH_OUT_OF_RESOURCES: CUresult = CUresult::Err(
- CUerror::LAUNCH_OUT_OF_RESOURCES,
- );
- const ERROR_LAUNCH_TIMEOUT: CUresult = CUresult::Err(CUerror::LAUNCH_TIMEOUT);
- const ERROR_LAUNCH_INCOMPATIBLE_TEXTURING: CUresult = CUresult::Err(
- CUerror::LAUNCH_INCOMPATIBLE_TEXTURING,
- );
- const ERROR_PEER_ACCESS_ALREADY_ENABLED: CUresult = CUresult::Err(
- CUerror::PEER_ACCESS_ALREADY_ENABLED,
- );
- const ERROR_PEER_ACCESS_NOT_ENABLED: CUresult = CUresult::Err(
- CUerror::PEER_ACCESS_NOT_ENABLED,
- );
- const ERROR_PRIMARY_CONTEXT_ACTIVE: CUresult = CUresult::Err(
- CUerror::PRIMARY_CONTEXT_ACTIVE,
- );
- const ERROR_CONTEXT_IS_DESTROYED: CUresult = CUresult::Err(
- CUerror::CONTEXT_IS_DESTROYED,
- );
- const ERROR_ASSERT: CUresult = CUresult::Err(CUerror::ASSERT);
- const ERROR_TOO_MANY_PEERS: CUresult = CUresult::Err(CUerror::TOO_MANY_PEERS);
- const ERROR_HOST_MEMORY_ALREADY_REGISTERED: CUresult = CUresult::Err(
- CUerror::HOST_MEMORY_ALREADY_REGISTERED,
- );
- const ERROR_HOST_MEMORY_NOT_REGISTERED: CUresult = CUresult::Err(
- CUerror::HOST_MEMORY_NOT_REGISTERED,
- );
- const ERROR_HARDWARE_STACK_ERROR: CUresult = CUresult::Err(
- CUerror::HARDWARE_STACK_ERROR,
- );
- const ERROR_ILLEGAL_INSTRUCTION: CUresult = CUresult::Err(
- CUerror::ILLEGAL_INSTRUCTION,
- );
- const ERROR_MISALIGNED_ADDRESS: CUresult = CUresult::Err(
- CUerror::MISALIGNED_ADDRESS,
- );
- const ERROR_INVALID_ADDRESS_SPACE: CUresult = CUresult::Err(
- CUerror::INVALID_ADDRESS_SPACE,
- );
- const ERROR_INVALID_PC: CUresult = CUresult::Err(CUerror::INVALID_PC);
- const ERROR_LAUNCH_FAILED: CUresult = CUresult::Err(CUerror::LAUNCH_FAILED);
- const ERROR_COOPERATIVE_LAUNCH_TOO_LARGE: CUresult = CUresult::Err(
- CUerror::COOPERATIVE_LAUNCH_TOO_LARGE,
- );
- const ERROR_NOT_PERMITTED: CUresult = CUresult::Err(CUerror::NOT_PERMITTED);
- const ERROR_NOT_SUPPORTED: CUresult = CUresult::Err(CUerror::NOT_SUPPORTED);
- const ERROR_SYSTEM_NOT_READY: CUresult = CUresult::Err(CUerror::SYSTEM_NOT_READY);
- const ERROR_SYSTEM_DRIVER_MISMATCH: CUresult = CUresult::Err(
- CUerror::SYSTEM_DRIVER_MISMATCH,
- );
- const ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE: CUresult = CUresult::Err(
- CUerror::COMPAT_NOT_SUPPORTED_ON_DEVICE,
- );
- const ERROR_MPS_CONNECTION_FAILED: CUresult = CUresult::Err(
- CUerror::MPS_CONNECTION_FAILED,
- );
- const ERROR_MPS_RPC_FAILURE: CUresult = CUresult::Err(CUerror::MPS_RPC_FAILURE);
- const ERROR_MPS_SERVER_NOT_READY: CUresult = CUresult::Err(
- CUerror::MPS_SERVER_NOT_READY,
- );
- const ERROR_MPS_MAX_CLIENTS_REACHED: CUresult = CUresult::Err(
- CUerror::MPS_MAX_CLIENTS_REACHED,
- );
- const ERROR_MPS_MAX_CONNECTIONS_REACHED: CUresult = CUresult::Err(
- CUerror::MPS_MAX_CONNECTIONS_REACHED,
- );
- const ERROR_MPS_CLIENT_TERMINATED: CUresult = CUresult::Err(
- CUerror::MPS_CLIENT_TERMINATED,
- );
- const ERROR_CDP_NOT_SUPPORTED: CUresult = CUresult::Err(CUerror::CDP_NOT_SUPPORTED);
- const ERROR_CDP_VERSION_MISMATCH: CUresult = CUresult::Err(
- CUerror::CDP_VERSION_MISMATCH,
- );
- const ERROR_STREAM_CAPTURE_UNSUPPORTED: CUresult = CUresult::Err(
- CUerror::STREAM_CAPTURE_UNSUPPORTED,
- );
- const ERROR_STREAM_CAPTURE_INVALIDATED: CUresult = CUresult::Err(
- CUerror::STREAM_CAPTURE_INVALIDATED,
- );
- const ERROR_STREAM_CAPTURE_MERGE: CUresult = CUresult::Err(
- CUerror::STREAM_CAPTURE_MERGE,
- );
- const ERROR_STREAM_CAPTURE_UNMATCHED: CUresult = CUresult::Err(
- CUerror::STREAM_CAPTURE_UNMATCHED,
- );
- const ERROR_STREAM_CAPTURE_UNJOINED: CUresult = CUresult::Err(
- CUerror::STREAM_CAPTURE_UNJOINED,
- );
- const ERROR_STREAM_CAPTURE_ISOLATION: CUresult = CUresult::Err(
- CUerror::STREAM_CAPTURE_ISOLATION,
- );
- const ERROR_STREAM_CAPTURE_IMPLICIT: CUresult = CUresult::Err(
- CUerror::STREAM_CAPTURE_IMPLICIT,
- );
- const ERROR_CAPTURED_EVENT: CUresult = CUresult::Err(CUerror::CAPTURED_EVENT);
- const ERROR_STREAM_CAPTURE_WRONG_THREAD: CUresult = CUresult::Err(
- CUerror::STREAM_CAPTURE_WRONG_THREAD,
- );
- const ERROR_TIMEOUT: CUresult = CUresult::Err(CUerror::TIMEOUT);
- const ERROR_GRAPH_EXEC_UPDATE_FAILURE: CUresult = CUresult::Err(
- CUerror::GRAPH_EXEC_UPDATE_FAILURE,
- );
- const ERROR_EXTERNAL_DEVICE: CUresult = CUresult::Err(CUerror::EXTERNAL_DEVICE);
- const ERROR_INVALID_CLUSTER_SIZE: CUresult = CUresult::Err(
- CUerror::INVALID_CLUSTER_SIZE,
- );
- const ERROR_FUNCTION_NOT_LOADED: CUresult = CUresult::Err(
- CUerror::FUNCTION_NOT_LOADED,
- );
- const ERROR_INVALID_RESOURCE_TYPE: CUresult = CUresult::Err(
- CUerror::INVALID_RESOURCE_TYPE,
- );
- const ERROR_INVALID_RESOURCE_CONFIGURATION: CUresult = CUresult::Err(
- CUerror::INVALID_RESOURCE_CONFIGURATION,
- );
- const ERROR_UNKNOWN: CUresult = CUresult::Err(CUerror::UNKNOWN);
-}
-impl CUresultConsts for CUresult {}
-#[must_use]
-pub type CUresult = ::core::result::Result<(), CUerror>;
-const _: fn() = || {
- let _ = std::mem::transmute::<CUresult, u32>;
-};
-impl From<hip_runtime_sys::hipErrorCode_t> for CUerror {
- fn from(error: hip_runtime_sys::hipErrorCode_t) -> Self {
- Self(error.0)
- }
-}
-unsafe impl Send for CUdeviceptr {}
-unsafe impl Sync for CUdeviceptr {}
-unsafe impl Send for CUcontext {}
-unsafe impl Sync for CUcontext {}
-unsafe impl Send for CUstream {}
-unsafe impl Sync for CUstream {}
-unsafe impl Send for CUmodule {}
-unsafe impl Sync for CUmodule {}
-unsafe impl Send for CUfunction {}
-unsafe impl Sync for CUfunction {}
-unsafe impl Send for CUlibrary {}
-unsafe impl Sync for CUlibrary {}
+pub mod cuda;
+pub mod nvml; \ No newline at end of file
diff --git a/cuda_types/src/nvml.rs b/cuda_types/src/nvml.rs
new file mode 100644
index 0000000..525395d
--- /dev/null
+++ b/cuda_types/src/nvml.rs
@@ -0,0 +1,4185 @@
+// Generated automatically by zluda_bindgen
+// DO NOT EDIT MANUALLY
+#![allow(warnings)]
+pub const NVML_API_VERSION: u32 = 12;
+pub const NVML_API_VERSION_STR: &[u8; 3] = b"12\0";
+pub const NVML_VALUE_NOT_AVAILABLE: i32 = -1;
+pub const NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE: u32 = 32;
+pub const NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE: u32 = 16;
+pub const NVML_DEVICE_PCI_BUS_ID_LEGACY_FMT: &[u8; 17] = b"%04X:%02X:%02X.0\0";
+pub const NVML_DEVICE_PCI_BUS_ID_FMT: &[u8; 17] = b"%08X:%02X:%02X.0\0";
+pub const NVML_NVLINK_MAX_LINKS: u32 = 18;
+pub const NVML_MAX_PHYSICAL_BRIDGE: u32 = 128;
+pub const NVML_MAX_THERMAL_SENSORS_PER_GPU: u32 = 3;
+pub const NVML_MAX_GPU_PERF_PSTATES: u32 = 16;
+pub const NVML_GRID_LICENSE_EXPIRY_NOT_AVAILABLE: u32 = 0;
+pub const NVML_GRID_LICENSE_EXPIRY_INVALID: u32 = 1;
+pub const NVML_GRID_LICENSE_EXPIRY_VALID: u32 = 2;
+pub const NVML_GRID_LICENSE_EXPIRY_NOT_APPLICABLE: u32 = 3;
+pub const NVML_GRID_LICENSE_EXPIRY_PERMANENT: u32 = 4;
+pub const NVML_GRID_LICENSE_BUFFER_SIZE: u32 = 128;
+pub const NVML_VGPU_NAME_BUFFER_SIZE: u32 = 64;
+pub const NVML_GRID_LICENSE_FEATURE_MAX_COUNT: u32 = 3;
+pub const NVML_INVALID_VGPU_PLACEMENT_ID: u32 = 65535;
+pub const NVML_VGPU_VIRTUALIZATION_CAP_MIGRATION_NO: u32 = 0;
+pub const NVML_VGPU_VIRTUALIZATION_CAP_MIGRATION_YES: u32 = 1;
+pub const NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION_NO: u32 = 0;
+pub const NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION_YES: u32 = 1;
+pub const NVML_VGPU_SCHEDULER_POLICY_UNKNOWN: u32 = 0;
+pub const NVML_VGPU_SCHEDULER_POLICY_BEST_EFFORT: u32 = 1;
+pub const NVML_VGPU_SCHEDULER_POLICY_EQUAL_SHARE: u32 = 2;
+pub const NVML_VGPU_SCHEDULER_POLICY_FIXED_SHARE: u32 = 3;
+pub const NVML_SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT: u32 = 3;
+pub const NVML_SCHEDULER_SW_MAX_LOG_ENTRIES: u32 = 200;
+pub const NVML_VGPU_SCHEDULER_ARR_DEFAULT: u32 = 0;
+pub const NVML_VGPU_SCHEDULER_ARR_DISABLE: u32 = 1;
+pub const NVML_VGPU_SCHEDULER_ARR_ENABLE: u32 = 2;
+pub const NVML_GRID_LICENSE_STATE_UNKNOWN: u32 = 0;
+pub const NVML_GRID_LICENSE_STATE_UNINITIALIZED: u32 = 1;
+pub const NVML_GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED: u32 = 2;
+pub const NVML_GRID_LICENSE_STATE_UNLICENSED_RESTRICTED: u32 = 3;
+pub const NVML_GRID_LICENSE_STATE_UNLICENSED: u32 = 4;
+pub const NVML_GRID_LICENSE_STATE_LICENSED: u32 = 5;
+pub const NVML_GSP_FIRMWARE_VERSION_BUF_SIZE: u32 = 64;
+pub const NVML_DEVICE_ARCH_KEPLER: u32 = 2;
+pub const NVML_DEVICE_ARCH_MAXWELL: u32 = 3;
+pub const NVML_DEVICE_ARCH_PASCAL: u32 = 4;
+pub const NVML_DEVICE_ARCH_VOLTA: u32 = 5;
+pub const NVML_DEVICE_ARCH_TURING: u32 = 6;
+pub const NVML_DEVICE_ARCH_AMPERE: u32 = 7;
+pub const NVML_DEVICE_ARCH_ADA: u32 = 8;
+pub const NVML_DEVICE_ARCH_HOPPER: u32 = 9;
+pub const NVML_DEVICE_ARCH_UNKNOWN: u32 = 4294967295;
+pub const NVML_BUS_TYPE_UNKNOWN: u32 = 0;
+pub const NVML_BUS_TYPE_PCI: u32 = 1;
+pub const NVML_BUS_TYPE_PCIE: u32 = 2;
+pub const NVML_BUS_TYPE_FPCI: u32 = 3;
+pub const NVML_BUS_TYPE_AGP: u32 = 4;
+pub const NVML_FAN_POLICY_TEMPERATURE_CONTINOUS_SW: u32 = 0;
+pub const NVML_FAN_POLICY_MANUAL: u32 = 1;
+pub const NVML_POWER_SOURCE_AC: u32 = 0;
+pub const NVML_POWER_SOURCE_BATTERY: u32 = 1;
+pub const NVML_POWER_SOURCE_UNDERSIZED: u32 = 2;
+pub const NVML_PCIE_LINK_MAX_SPEED_INVALID: u32 = 0;
+pub const NVML_PCIE_LINK_MAX_SPEED_2500MBPS: u32 = 1;
+pub const NVML_PCIE_LINK_MAX_SPEED_5000MBPS: u32 = 2;
+pub const NVML_PCIE_LINK_MAX_SPEED_8000MBPS: u32 = 3;
+pub const NVML_PCIE_LINK_MAX_SPEED_16000MBPS: u32 = 4;
+pub const NVML_PCIE_LINK_MAX_SPEED_32000MBPS: u32 = 5;
+pub const NVML_PCIE_LINK_MAX_SPEED_64000MBPS: u32 = 6;
+pub const NVML_ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED: u32 = 0;
+pub const NVML_ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED: u32 = 1;
+pub const NVML_MAX_GPU_UTILIZATIONS: u32 = 8;
+pub const NVML_FI_DEV_ECC_CURRENT: u32 = 1;
+pub const NVML_FI_DEV_ECC_PENDING: u32 = 2;
+pub const NVML_FI_DEV_ECC_SBE_VOL_TOTAL: u32 = 3;
+pub const NVML_FI_DEV_ECC_DBE_VOL_TOTAL: u32 = 4;
+pub const NVML_FI_DEV_ECC_SBE_AGG_TOTAL: u32 = 5;
+pub const NVML_FI_DEV_ECC_DBE_AGG_TOTAL: u32 = 6;
+pub const NVML_FI_DEV_ECC_SBE_VOL_L1: u32 = 7;
+pub const NVML_FI_DEV_ECC_DBE_VOL_L1: u32 = 8;
+pub const NVML_FI_DEV_ECC_SBE_VOL_L2: u32 = 9;
+pub const NVML_FI_DEV_ECC_DBE_VOL_L2: u32 = 10;
+pub const NVML_FI_DEV_ECC_SBE_VOL_DEV: u32 = 11;
+pub const NVML_FI_DEV_ECC_DBE_VOL_DEV: u32 = 12;
+pub const NVML_FI_DEV_ECC_SBE_VOL_REG: u32 = 13;
+pub const NVML_FI_DEV_ECC_DBE_VOL_REG: u32 = 14;
+pub const NVML_FI_DEV_ECC_SBE_VOL_TEX: u32 = 15;
+pub const NVML_FI_DEV_ECC_DBE_VOL_TEX: u32 = 16;
+pub const NVML_FI_DEV_ECC_DBE_VOL_CBU: u32 = 17;
+pub const NVML_FI_DEV_ECC_SBE_AGG_L1: u32 = 18;
+pub const NVML_FI_DEV_ECC_DBE_AGG_L1: u32 = 19;
+pub const NVML_FI_DEV_ECC_SBE_AGG_L2: u32 = 20;
+pub const NVML_FI_DEV_ECC_DBE_AGG_L2: u32 = 21;
+pub const NVML_FI_DEV_ECC_SBE_AGG_DEV: u32 = 22;
+pub const NVML_FI_DEV_ECC_DBE_AGG_DEV: u32 = 23;
+pub const NVML_FI_DEV_ECC_SBE_AGG_REG: u32 = 24;
+pub const NVML_FI_DEV_ECC_DBE_AGG_REG: u32 = 25;
+pub const NVML_FI_DEV_ECC_SBE_AGG_TEX: u32 = 26;
+pub const NVML_FI_DEV_ECC_DBE_AGG_TEX: u32 = 27;
+pub const NVML_FI_DEV_ECC_DBE_AGG_CBU: u32 = 28;
+pub const NVML_FI_DEV_RETIRED_SBE: u32 = 29;
+pub const NVML_FI_DEV_RETIRED_DBE: u32 = 30;
+pub const NVML_FI_DEV_RETIRED_PENDING: u32 = 31;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0: u32 = 32;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1: u32 = 33;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2: u32 = 34;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3: u32 = 35;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4: u32 = 36;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5: u32 = 37;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL: u32 = 38;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0: u32 = 39;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1: u32 = 40;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2: u32 = 41;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3: u32 = 42;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4: u32 = 43;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5: u32 = 44;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL: u32 = 45;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0: u32 = 46;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1: u32 = 47;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2: u32 = 48;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3: u32 = 49;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4: u32 = 50;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5: u32 = 51;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL: u32 = 52;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0: u32 = 53;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1: u32 = 54;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2: u32 = 55;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3: u32 = 56;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4: u32 = 57;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5: u32 = 58;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL: u32 = 59;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L0: u32 = 60;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L1: u32 = 61;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L2: u32 = 62;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L3: u32 = 63;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L4: u32 = 64;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L5: u32 = 65;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_TOTAL: u32 = 66;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L0: u32 = 67;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L1: u32 = 68;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L2: u32 = 69;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L3: u32 = 70;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L4: u32 = 71;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L5: u32 = 72;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_TOTAL: u32 = 73;
+pub const NVML_FI_DEV_PERF_POLICY_POWER: u32 = 74;
+pub const NVML_FI_DEV_PERF_POLICY_THERMAL: u32 = 75;
+pub const NVML_FI_DEV_PERF_POLICY_SYNC_BOOST: u32 = 76;
+pub const NVML_FI_DEV_PERF_POLICY_BOARD_LIMIT: u32 = 77;
+pub const NVML_FI_DEV_PERF_POLICY_LOW_UTILIZATION: u32 = 78;
+pub const NVML_FI_DEV_PERF_POLICY_RELIABILITY: u32 = 79;
+pub const NVML_FI_DEV_PERF_POLICY_TOTAL_APP_CLOCKS: u32 = 80;
+pub const NVML_FI_DEV_PERF_POLICY_TOTAL_BASE_CLOCKS: u32 = 81;
+pub const NVML_FI_DEV_MEMORY_TEMP: u32 = 82;
+pub const NVML_FI_DEV_TOTAL_ENERGY_CONSUMPTION: u32 = 83;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_L0: u32 = 84;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_L1: u32 = 85;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_L2: u32 = 86;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_L3: u32 = 87;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_L4: u32 = 88;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_L5: u32 = 89;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_COMMON: u32 = 90;
+pub const NVML_FI_DEV_NVLINK_LINK_COUNT: u32 = 91;
+pub const NVML_FI_DEV_RETIRED_PENDING_SBE: u32 = 92;
+pub const NVML_FI_DEV_RETIRED_PENDING_DBE: u32 = 93;
+pub const NVML_FI_DEV_PCIE_REPLAY_COUNTER: u32 = 94;
+pub const NVML_FI_DEV_PCIE_REPLAY_ROLLOVER_COUNTER: u32 = 95;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6: u32 = 96;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7: u32 = 97;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8: u32 = 98;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9: u32 = 99;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10: u32 = 100;
+pub const NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11: u32 = 101;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6: u32 = 102;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7: u32 = 103;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8: u32 = 104;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9: u32 = 105;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10: u32 = 106;
+pub const NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11: u32 = 107;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6: u32 = 108;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7: u32 = 109;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8: u32 = 110;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9: u32 = 111;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10: u32 = 112;
+pub const NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11: u32 = 113;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6: u32 = 114;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7: u32 = 115;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8: u32 = 116;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9: u32 = 117;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10: u32 = 118;
+pub const NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11: u32 = 119;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L6: u32 = 120;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L7: u32 = 121;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L8: u32 = 122;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L9: u32 = 123;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L10: u32 = 124;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L11: u32 = 125;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L6: u32 = 126;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L7: u32 = 127;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L8: u32 = 128;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L9: u32 = 129;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L10: u32 = 130;
+pub const NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L11: u32 = 131;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_L6: u32 = 132;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_L7: u32 = 133;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_L8: u32 = 134;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_L9: u32 = 135;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_L10: u32 = 136;
+pub const NVML_FI_DEV_NVLINK_SPEED_MBPS_L11: u32 = 137;
+pub const NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_TX: u32 = 138;
+pub const NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_RX: u32 = 139;
+pub const NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_TX: u32 = 140;
+pub const NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_RX: u32 = 141;
+pub const NVML_FI_DEV_REMAPPED_COR: u32 = 142;
+pub const NVML_FI_DEV_REMAPPED_UNC: u32 = 143;
+pub const NVML_FI_DEV_REMAPPED_PENDING: u32 = 144;
+pub const NVML_FI_DEV_REMAPPED_FAILURE: u32 = 145;
+pub const NVML_FI_DEV_NVLINK_REMOTE_NVLINK_ID: u32 = 146;
+pub const NVML_FI_DEV_NVSWITCH_CONNECTED_LINK_COUNT: u32 = 147;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0: u32 = 148;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1: u32 = 149;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2: u32 = 150;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3: u32 = 151;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4: u32 = 152;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5: u32 = 153;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6: u32 = 154;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7: u32 = 155;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8: u32 = 156;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9: u32 = 157;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10: u32 = 158;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11: u32 = 159;
+pub const NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL: u32 = 160;
+pub const NVML_FI_DEV_NVLINK_ERROR_DL_REPLAY: u32 = 161;
+pub const NVML_FI_DEV_NVLINK_ERROR_DL_RECOVERY: u32 = 162;
+pub const NVML_FI_DEV_NVLINK_ERROR_DL_CRC: u32 = 163;
+pub const NVML_FI_DEV_NVLINK_GET_SPEED: u32 = 164;
+pub const NVML_FI_DEV_NVLINK_GET_STATE: u32 = 165;
+pub const NVML_FI_DEV_NVLINK_GET_VERSION: u32 = 166;
+pub const NVML_FI_DEV_NVLINK_GET_POWER_STATE: u32 = 167;
+pub const NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD: u32 = 168;
+pub const NVML_FI_DEV_PCIE_L0_TO_RECOVERY_COUNTER: u32 = 169;
+pub const NVML_FI_DEV_C2C_LINK_COUNT: u32 = 170;
+pub const NVML_FI_DEV_C2C_LINK_GET_STATUS: u32 = 171;
+pub const NVML_FI_DEV_C2C_LINK_GET_MAX_BW: u32 = 172;
+pub const NVML_FI_DEV_PCIE_COUNT_CORRECTABLE_ERRORS: u32 = 173;
+pub const NVML_FI_DEV_PCIE_COUNT_NAKS_RECEIVED: u32 = 174;
+pub const NVML_FI_DEV_PCIE_COUNT_RECEIVER_ERROR: u32 = 175;
+pub const NVML_FI_DEV_PCIE_COUNT_BAD_TLP: u32 = 176;
+pub const NVML_FI_DEV_PCIE_COUNT_NAKS_SENT: u32 = 177;
+pub const NVML_FI_DEV_PCIE_COUNT_BAD_DLLP: u32 = 178;
+pub const NVML_FI_DEV_PCIE_COUNT_NON_FATAL_ERROR: u32 = 179;
+pub const NVML_FI_DEV_PCIE_COUNT_FATAL_ERROR: u32 = 180;
+pub const NVML_FI_DEV_PCIE_COUNT_UNSUPPORTED_REQ: u32 = 181;
+pub const NVML_FI_DEV_PCIE_COUNT_LCRC_ERROR: u32 = 182;
+pub const NVML_FI_DEV_PCIE_COUNT_LANE_ERROR: u32 = 183;
+pub const NVML_FI_DEV_IS_RESETLESS_MIG_SUPPORTED: u32 = 184;
+pub const NVML_FI_DEV_POWER_AVERAGE: u32 = 185;
+pub const NVML_FI_DEV_POWER_INSTANT: u32 = 186;
+pub const NVML_FI_DEV_POWER_MIN_LIMIT: u32 = 187;
+pub const NVML_FI_DEV_POWER_MAX_LIMIT: u32 = 188;
+pub const NVML_FI_DEV_POWER_DEFAULT_LIMIT: u32 = 189;
+pub const NVML_FI_DEV_POWER_CURRENT_LIMIT: u32 = 190;
+pub const NVML_FI_DEV_ENERGY: u32 = 191;
+pub const NVML_FI_DEV_POWER_REQUESTED_LIMIT: u32 = 192;
+pub const NVML_FI_DEV_TEMPERATURE_SHUTDOWN_TLIMIT: u32 = 193;
+pub const NVML_FI_DEV_TEMPERATURE_SLOWDOWN_TLIMIT: u32 = 194;
+pub const NVML_FI_DEV_TEMPERATURE_MEM_MAX_TLIMIT: u32 = 195;
+pub const NVML_FI_DEV_TEMPERATURE_GPU_MAX_TLIMIT: u32 = 196;
+pub const NVML_FI_DEV_IS_MIG_MODE_INDEPENDENT_MIG_QUERY_CAPABLE: u32 = 199;
+pub const NVML_FI_MAX: u32 = 200;
+pub const NVML_NVFBC_SESSION_FLAG_DIFFMAP_ENABLED: u32 = 1;
+pub const NVML_NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED: u32 = 2;
+pub const NVML_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT: u32 = 4;
+pub const NVML_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE: u32 = 8;
+pub const NVML_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT: u32 = 16;
+pub const NVML_CC_SYSTEM_CPU_CAPS_NONE: u32 = 0;
+pub const NVML_CC_SYSTEM_CPU_CAPS_AMD_SEV: u32 = 1;
+pub const NVML_CC_SYSTEM_CPU_CAPS_INTEL_TDX: u32 = 2;
+pub const NVML_CC_SYSTEM_GPUS_CC_NOT_CAPABLE: u32 = 0;
+pub const NVML_CC_SYSTEM_GPUS_CC_CAPABLE: u32 = 1;
+pub const NVML_CC_SYSTEM_DEVTOOLS_MODE_OFF: u32 = 0;
+pub const NVML_CC_SYSTEM_DEVTOOLS_MODE_ON: u32 = 1;
+pub const NVML_CC_SYSTEM_ENVIRONMENT_UNAVAILABLE: u32 = 0;
+pub const NVML_CC_SYSTEM_ENVIRONMENT_SIM: u32 = 1;
+pub const NVML_CC_SYSTEM_ENVIRONMENT_PROD: u32 = 2;
+pub const NVML_CC_SYSTEM_FEATURE_DISABLED: u32 = 0;
+pub const NVML_CC_SYSTEM_FEATURE_ENABLED: u32 = 1;
+pub const NVML_CC_SYSTEM_MULTIGPU_NONE: u32 = 0;
+pub const NVML_CC_SYSTEM_MULTIGPU_PROTECTED_PCIE: u32 = 1;
+pub const NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE: u32 = 0;
+pub const NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE: u32 = 1;
+pub const NVML_GPU_CERT_CHAIN_SIZE: u32 = 4096;
+pub const NVML_GPU_ATTESTATION_CERT_CHAIN_SIZE: u32 = 5120;
+pub const NVML_CC_GPU_CEC_NONCE_SIZE: u32 = 32;
+pub const NVML_CC_GPU_ATTESTATION_REPORT_SIZE: u32 = 8192;
+pub const NVML_CC_GPU_CEC_ATTESTATION_REPORT_SIZE: u32 = 4096;
+pub const NVML_CC_CEC_ATTESTATION_REPORT_NOT_PRESENT: u32 = 0;
+pub const NVML_CC_CEC_ATTESTATION_REPORT_PRESENT: u32 = 1;
+pub const NVML_CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MIN: u32 = 50;
+pub const NVML_CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MAX: u32 = 75;
+pub const NVML_GPU_FABRIC_UUID_LEN: u32 = 16;
+pub const NVML_GPU_FABRIC_STATE_NOT_SUPPORTED: u32 = 0;
+pub const NVML_GPU_FABRIC_STATE_NOT_STARTED: u32 = 1;
+pub const NVML_GPU_FABRIC_STATE_IN_PROGRESS: u32 = 2;
+pub const NVML_GPU_FABRIC_STATE_COMPLETED: u32 = 3;
+pub const NVML_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_NOT_SUPPORTED: u32 = 0;
+pub const NVML_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_TRUE: u32 = 1;
+pub const NVML_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_FALSE: u32 = 2;
+pub const NVML_GPU_FABRIC_HEALTH_MASK_SHIFT_DEGRADED_BW: u32 = 0;
+pub const NVML_GPU_FABRIC_HEALTH_MASK_WIDTH_DEGRADED_BW: u32 = 17;
+pub const NVML_POWER_SCOPE_GPU: u32 = 0;
+pub const NVML_POWER_SCOPE_MODULE: u32 = 1;
+pub const NVML_POWER_SCOPE_MEMORY: u32 = 2;
+pub const NVML_INIT_FLAG_NO_GPUS: u32 = 1;
+pub const NVML_INIT_FLAG_NO_ATTACH: u32 = 2;
+pub const NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE: u32 = 16;
+pub const NVML_DEVICE_UUID_BUFFER_SIZE: u32 = 80;
+pub const NVML_DEVICE_UUID_V2_BUFFER_SIZE: u32 = 96;
+pub const NVML_DEVICE_PART_NUMBER_BUFFER_SIZE: u32 = 80;
+pub const NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE: u32 = 80;
+pub const NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE: u32 = 80;
+pub const NVML_DEVICE_NAME_BUFFER_SIZE: u32 = 64;
+pub const NVML_DEVICE_NAME_V2_BUFFER_SIZE: u32 = 96;
+pub const NVML_DEVICE_SERIAL_BUFFER_SIZE: u32 = 30;
+pub const NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE: u32 = 32;
+pub const NVML_AFFINITY_SCOPE_NODE: u32 = 0;
+pub const NVML_AFFINITY_SCOPE_SOCKET: u32 = 1;
+pub const NVML_DEVICE_MIG_DISABLE: u32 = 0;
+pub const NVML_DEVICE_MIG_ENABLE: u32 = 1;
+pub const NVML_GPU_INSTANCE_PROFILE_1_SLICE: u32 = 0;
+pub const NVML_GPU_INSTANCE_PROFILE_2_SLICE: u32 = 1;
+pub const NVML_GPU_INSTANCE_PROFILE_3_SLICE: u32 = 2;
+pub const NVML_GPU_INSTANCE_PROFILE_4_SLICE: u32 = 3;
+pub const NVML_GPU_INSTANCE_PROFILE_7_SLICE: u32 = 4;
+pub const NVML_GPU_INSTANCE_PROFILE_8_SLICE: u32 = 5;
+pub const NVML_GPU_INSTANCE_PROFILE_6_SLICE: u32 = 6;
+pub const NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV1: u32 = 7;
+pub const NVML_GPU_INSTANCE_PROFILE_2_SLICE_REV1: u32 = 8;
+pub const NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV2: u32 = 9;
+pub const NVML_GPU_INSTANCE_PROFILE_COUNT: u32 = 10;
+pub const NVML_GPU_INTSTANCE_PROFILE_CAPS_P2P: u32 = 1;
+pub const NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE: u32 = 0;
+pub const NVML_COMPUTE_INSTANCE_PROFILE_2_SLICE: u32 = 1;
+pub const NVML_COMPUTE_INSTANCE_PROFILE_3_SLICE: u32 = 2;
+pub const NVML_COMPUTE_INSTANCE_PROFILE_4_SLICE: u32 = 3;
+pub const NVML_COMPUTE_INSTANCE_PROFILE_7_SLICE: u32 = 4;
+pub const NVML_COMPUTE_INSTANCE_PROFILE_8_SLICE: u32 = 5;
+pub const NVML_COMPUTE_INSTANCE_PROFILE_6_SLICE: u32 = 6;
+pub const NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE_REV1: u32 = 7;
+pub const NVML_COMPUTE_INSTANCE_PROFILE_COUNT: u32 = 8;
+pub const NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED: u32 = 0;
+pub const NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT: u32 = 1;
+pub const NVML_GPM_METRICS_GET_VERSION: u32 = 1;
+pub const NVML_GPM_SUPPORT_VERSION: u32 = 1;
+pub const NVML_NVLINK_POWER_STATE_HIGH_SPEED: u32 = 0;
+pub const NVML_NVLINK_POWER_STATE_LOW: u32 = 1;
+pub const NVML_NVLINK_LOW_POWER_THRESHOLD_MIN: u32 = 1;
+pub const NVML_NVLINK_LOW_POWER_THRESHOLD_MAX: u32 = 8191;
+pub const NVML_NVLINK_LOW_POWER_THRESHOLD_RESET: u32 = 4294967295;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct nvmlDevice_st {
+ _unused: [u8; 0],
+}
+pub type nvmlDevice_t = *mut nvmlDevice_st;
+/// PCI information about a GPU device.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlPciInfoExt_v1_t {
+ ///!< The version number of this struct
+ pub version: ::core::ffi::c_uint,
+ ///!< The PCI domain on which the device's bus resides, 0 to 0xffffffff
+ pub domain: ::core::ffi::c_uint,
+ ///!< The bus on which the device resides, 0 to 0xff
+ pub bus: ::core::ffi::c_uint,
+ ///!< The device's id on the bus, 0 to 31
+ pub device: ::core::ffi::c_uint,
+ ///!< The combined 16-bit device id and 16-bit vendor id
+ pub pciDeviceId: ::core::ffi::c_uint,
+ ///!< The 32-bit Sub System Device ID
+ pub pciSubSystemId: ::core::ffi::c_uint,
+ ///!< The 8-bit PCI base class code
+ pub baseClass: ::core::ffi::c_uint,
+ ///!< The 8-bit PCI sub class code
+ pub subClass: ::core::ffi::c_uint,
+ ///!< The tuple domain:bus:device.function PCI identifier (&amp; NULL terminator)
+ pub busId: [::core::ffi::c_char; 32usize],
+}
+/// PCI information about a GPU device.
+pub type nvmlPciInfoExt_t = nvmlPciInfoExt_v1_t;
+/// PCI information about a GPU device.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlPciInfo_st {
+ ///!< The legacy tuple domain:bus:device.function PCI identifier (&amp; NULL terminator)
+ pub busIdLegacy: [::core::ffi::c_char; 16usize],
+ ///!< The PCI domain on which the device's bus resides, 0 to 0xffffffff
+ pub domain: ::core::ffi::c_uint,
+ ///!< The bus on which the device resides, 0 to 0xff
+ pub bus: ::core::ffi::c_uint,
+ ///!< The device's id on the bus, 0 to 31
+ pub device: ::core::ffi::c_uint,
+ ///!< The combined 16-bit device id and 16-bit vendor id
+ pub pciDeviceId: ::core::ffi::c_uint,
+ ///!< The 32-bit Sub System Device ID
+ pub pciSubSystemId: ::core::ffi::c_uint,
+ ///!< The tuple domain:bus:device.function PCI identifier (&amp; NULL terminator)
+ pub busId: [::core::ffi::c_char; 32usize],
+}
+/// PCI information about a GPU device.
+pub type nvmlPciInfo_t = nvmlPciInfo_st;
+/** Detailed ECC error counts for a device.
+
+ @deprecated Different GPU families can have different memory error counters
+ See \ref nvmlDeviceGetMemoryErrorCounter*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlEccErrorCounts_st {
+ ///!< L1 cache errors
+ pub l1Cache: ::core::ffi::c_ulonglong,
+ ///!< L2 cache errors
+ pub l2Cache: ::core::ffi::c_ulonglong,
+ ///!< Device memory errors
+ pub deviceMemory: ::core::ffi::c_ulonglong,
+ ///!< Register file errors
+ pub registerFile: ::core::ffi::c_ulonglong,
+}
+/** Detailed ECC error counts for a device.
+
+ @deprecated Different GPU families can have different memory error counters
+ See \ref nvmlDeviceGetMemoryErrorCounter*/
+pub type nvmlEccErrorCounts_t = nvmlEccErrorCounts_st;
+/** Utilization information for a device.
+ Each sample period may be between 1 second and 1/6 second, depending on the product being queried.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlUtilization_st {
+ ///!< Percent of time over the past sample period during which one or more kernels was executing on the GPU
+ pub gpu: ::core::ffi::c_uint,
+ ///!< Percent of time over the past sample period during which global (device) memory was being read or written
+ pub memory: ::core::ffi::c_uint,
+}
+/** Utilization information for a device.
+ Each sample period may be between 1 second and 1/6 second, depending on the product being queried.*/
+pub type nvmlUtilization_t = nvmlUtilization_st;
+/** Memory allocation information for a device (v1).
+ The total amount is equal to the sum of the amounts of free and used memory.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlMemory_st {
+ ///!< Total physical device memory (in bytes)
+ pub total: ::core::ffi::c_ulonglong,
+ ///!< Unallocated device memory (in bytes)
+ pub free: ::core::ffi::c_ulonglong,
+ /**!< Sum of Reserved and Allocated device memory (in bytes).
+!< Note that the driver/GPU always sets aside a small amount of memory for bookkeeping*/
+ pub used: ::core::ffi::c_ulonglong,
+}
+/** Memory allocation information for a device (v1).
+ The total amount is equal to the sum of the amounts of free and used memory.*/
+pub type nvmlMemory_t = nvmlMemory_st;
+/** Memory allocation information for a device (v2).
+
+ Version 2 adds versioning for the struct and the amount of system-reserved memory as an output.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlMemory_v2_st {
+ ///!< Structure format version (must be 2)
+ pub version: ::core::ffi::c_uint,
+ ///!< Total physical device memory (in bytes)
+ pub total: ::core::ffi::c_ulonglong,
+ ///!< Device memory (in bytes) reserved for system use (driver or firmware)
+ pub reserved: ::core::ffi::c_ulonglong,
+ ///!< Unallocated device memory (in bytes)
+ pub free: ::core::ffi::c_ulonglong,
+ ///!< Allocated device memory (in bytes).
+ pub used: ::core::ffi::c_ulonglong,
+}
+/** Memory allocation information for a device (v2).
+
+ Version 2 adds versioning for the struct and the amount of system-reserved memory as an output.*/
+pub type nvmlMemory_v2_t = nvmlMemory_v2_st;
+/// BAR1 Memory allocation Information for a device
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlBAR1Memory_st {
+ ///!< Total BAR1 Memory (in bytes)
+ pub bar1Total: ::core::ffi::c_ulonglong,
+ ///!< Unallocated BAR1 Memory (in bytes)
+ pub bar1Free: ::core::ffi::c_ulonglong,
+ ///!< Allocated Used Memory (in bytes)
+ pub bar1Used: ::core::ffi::c_ulonglong,
+}
+/// BAR1 Memory allocation Information for a device
+pub type nvmlBAR1Memory_t = nvmlBAR1Memory_st;
+/** Information about running compute processes on the GPU, legacy version
+ for older versions of the API.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlProcessInfo_v1_st {
+ ///!< Process ID
+ pub pid: ::core::ffi::c_uint,
+ ///!< Amount of used GPU memory in bytes.
+ pub usedGpuMemory: ::core::ffi::c_ulonglong,
+}
+/** Information about running compute processes on the GPU, legacy version
+ for older versions of the API.*/
+pub type nvmlProcessInfo_v1_t = nvmlProcessInfo_v1_st;
+/// Information about running compute processes on the GPU
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlProcessInfo_v2_st {
+ ///!< Process ID
+ pub pid: ::core::ffi::c_uint,
+ ///!< Amount of used GPU memory in bytes.
+ pub usedGpuMemory: ::core::ffi::c_ulonglong,
+ ///!< If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is set to
+ pub gpuInstanceId: ::core::ffi::c_uint,
+ ///!< If MIG is enabled, stores a valid compute instance ID. computeInstanceId is set to
+ pub computeInstanceId: ::core::ffi::c_uint,
+}
+/// Information about running compute processes on the GPU
+pub type nvmlProcessInfo_v2_t = nvmlProcessInfo_v2_st;
+/// Information about running compute processes on the GPU
+pub type nvmlProcessInfo_t = nvmlProcessInfo_v2_st;
+/// Information about running process on the GPU with protected memory
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlProcessDetail_v1_t {
+ ///!< Process ID
+ pub pid: ::core::ffi::c_uint,
+ ///!< Amount of used GPU memory in bytes.
+ pub usedGpuMemory: ::core::ffi::c_ulonglong,
+ ///!< If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is
+ pub gpuInstanceId: ::core::ffi::c_uint,
+ ///!< If MIG is enabled, stores a valid compute instance ID. computeInstanceId
+ pub computeInstanceId: ::core::ffi::c_uint,
+ ///!< Amount of used GPU conf compute protected memory in bytes.
+ pub usedGpuCcProtectedMemory: ::core::ffi::c_ulonglong,
+}
+/// Information about all running processes on the GPU for the given mode
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlProcessDetailList_v1_t {
+ ///!< Struct version, MUST be nvmlProcessDetailList_v1
+ pub version: ::core::ffi::c_uint,
+ ///!< Process mode(Compute/Graphics/MPSCompute)
+ pub mode: ::core::ffi::c_uint,
+ ///!< Number of process entries in procArray
+ pub numProcArrayEntries: ::core::ffi::c_uint,
+ ///!< Process array
+ pub procArray: *mut nvmlProcessDetail_v1_t,
+}
+/// Information about all running processes on the GPU for the given mode
+pub type nvmlProcessDetailList_t = nvmlProcessDetailList_v1_t;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlDeviceAttributes_st {
+ ///!< Streaming Multiprocessor count
+ pub multiprocessorCount: ::core::ffi::c_uint,
+ ///!< Shared Copy Engine count
+ pub sharedCopyEngineCount: ::core::ffi::c_uint,
+ ///!< Shared Decoder Engine count
+ pub sharedDecoderCount: ::core::ffi::c_uint,
+ ///!< Shared Encoder Engine count
+ pub sharedEncoderCount: ::core::ffi::c_uint,
+ ///!< Shared JPEG Engine count
+ pub sharedJpegCount: ::core::ffi::c_uint,
+ ///!< Shared OFA Engine count
+ pub sharedOfaCount: ::core::ffi::c_uint,
+ ///!< GPU instance slice count
+ pub gpuInstanceSliceCount: ::core::ffi::c_uint,
+ ///!< Compute instance slice count
+ pub computeInstanceSliceCount: ::core::ffi::c_uint,
+ ///!< Device memory size (in MiB)
+ pub memorySizeMB: ::core::ffi::c_ulonglong,
+}
+pub type nvmlDeviceAttributes_t = nvmlDeviceAttributes_st;
+/// C2C Mode information for a device
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlC2cModeInfo_v1_t {
+ pub isC2cEnabled: ::core::ffi::c_uint,
+}
+/** Possible values that classify the remap availability for each bank. The max
+ field will contain the number of banks that have maximum remap availability
+ (all reserved rows are available). None means that there are no reserved
+ rows available.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlRowRemapperHistogramValues_st {
+ pub max: ::core::ffi::c_uint,
+ pub high: ::core::ffi::c_uint,
+ pub partial: ::core::ffi::c_uint,
+ pub low: ::core::ffi::c_uint,
+ pub none: ::core::ffi::c_uint,
+}
+/** Possible values that classify the remap availability for each bank. The max
+ field will contain the number of banks that have maximum remap availability
+ (all reserved rows are available). None means that there are no reserved
+ rows available.*/
+pub type nvmlRowRemapperHistogramValues_t = nvmlRowRemapperHistogramValues_st;
+impl nvmlBridgeChipType_enum {
+ pub const NVML_BRIDGE_CHIP_PLX: nvmlBridgeChipType_enum = nvmlBridgeChipType_enum(0);
+}
+impl nvmlBridgeChipType_enum {
+ pub const NVML_BRIDGE_CHIP_BRO4: nvmlBridgeChipType_enum = nvmlBridgeChipType_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Enum to represent type of bridge chip
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlBridgeChipType_enum(pub ::core::ffi::c_uint);
+/// Enum to represent type of bridge chip
+pub use self::nvmlBridgeChipType_enum as nvmlBridgeChipType_t;
+impl nvmlNvLinkUtilizationCountUnits_enum {
+ pub const NVML_NVLINK_COUNTER_UNIT_CYCLES: nvmlNvLinkUtilizationCountUnits_enum = nvmlNvLinkUtilizationCountUnits_enum(
+ 0,
+ );
+}
+impl nvmlNvLinkUtilizationCountUnits_enum {
+ pub const NVML_NVLINK_COUNTER_UNIT_PACKETS: nvmlNvLinkUtilizationCountUnits_enum = nvmlNvLinkUtilizationCountUnits_enum(
+ 1,
+ );
+}
+impl nvmlNvLinkUtilizationCountUnits_enum {
+ pub const NVML_NVLINK_COUNTER_UNIT_BYTES: nvmlNvLinkUtilizationCountUnits_enum = nvmlNvLinkUtilizationCountUnits_enum(
+ 2,
+ );
+}
+impl nvmlNvLinkUtilizationCountUnits_enum {
+ pub const NVML_NVLINK_COUNTER_UNIT_RESERVED: nvmlNvLinkUtilizationCountUnits_enum = nvmlNvLinkUtilizationCountUnits_enum(
+ 3,
+ );
+}
+impl nvmlNvLinkUtilizationCountUnits_enum {
+ pub const NVML_NVLINK_COUNTER_UNIT_COUNT: nvmlNvLinkUtilizationCountUnits_enum = nvmlNvLinkUtilizationCountUnits_enum(
+ 4,
+ );
+}
+#[repr(transparent)]
+/// Enum to represent the NvLink utilization counter packet units
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlNvLinkUtilizationCountUnits_enum(pub ::core::ffi::c_uint);
+/// Enum to represent the NvLink utilization counter packet units
+pub use self::nvmlNvLinkUtilizationCountUnits_enum as nvmlNvLinkUtilizationCountUnits_t;
+impl nvmlNvLinkUtilizationCountPktTypes_enum {
+ pub const NVML_NVLINK_COUNTER_PKTFILTER_NOP: nvmlNvLinkUtilizationCountPktTypes_enum = nvmlNvLinkUtilizationCountPktTypes_enum(
+ 1,
+ );
+}
+impl nvmlNvLinkUtilizationCountPktTypes_enum {
+ pub const NVML_NVLINK_COUNTER_PKTFILTER_READ: nvmlNvLinkUtilizationCountPktTypes_enum = nvmlNvLinkUtilizationCountPktTypes_enum(
+ 2,
+ );
+}
+impl nvmlNvLinkUtilizationCountPktTypes_enum {
+ pub const NVML_NVLINK_COUNTER_PKTFILTER_WRITE: nvmlNvLinkUtilizationCountPktTypes_enum = nvmlNvLinkUtilizationCountPktTypes_enum(
+ 4,
+ );
+}
+impl nvmlNvLinkUtilizationCountPktTypes_enum {
+ pub const NVML_NVLINK_COUNTER_PKTFILTER_RATOM: nvmlNvLinkUtilizationCountPktTypes_enum = nvmlNvLinkUtilizationCountPktTypes_enum(
+ 8,
+ );
+}
+impl nvmlNvLinkUtilizationCountPktTypes_enum {
+ pub const NVML_NVLINK_COUNTER_PKTFILTER_NRATOM: nvmlNvLinkUtilizationCountPktTypes_enum = nvmlNvLinkUtilizationCountPktTypes_enum(
+ 16,
+ );
+}
+impl nvmlNvLinkUtilizationCountPktTypes_enum {
+ pub const NVML_NVLINK_COUNTER_PKTFILTER_FLUSH: nvmlNvLinkUtilizationCountPktTypes_enum = nvmlNvLinkUtilizationCountPktTypes_enum(
+ 32,
+ );
+}
+impl nvmlNvLinkUtilizationCountPktTypes_enum {
+ pub const NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA: nvmlNvLinkUtilizationCountPktTypes_enum = nvmlNvLinkUtilizationCountPktTypes_enum(
+ 64,
+ );
+}
+impl nvmlNvLinkUtilizationCountPktTypes_enum {
+ pub const NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA: nvmlNvLinkUtilizationCountPktTypes_enum = nvmlNvLinkUtilizationCountPktTypes_enum(
+ 128,
+ );
+}
+impl nvmlNvLinkUtilizationCountPktTypes_enum {
+ pub const NVML_NVLINK_COUNTER_PKTFILTER_ALL: nvmlNvLinkUtilizationCountPktTypes_enum = nvmlNvLinkUtilizationCountPktTypes_enum(
+ 255,
+ );
+}
+#[repr(transparent)]
+/** Enum to represent the NvLink utilization counter packet types to count
+ ** this is ONLY applicable with the units as packets or bytes
+ ** as specified in \a nvmlNvLinkUtilizationCountUnits_t
+ ** all packet filter descriptions are target GPU centric
+ ** these can be "OR'd" together*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlNvLinkUtilizationCountPktTypes_enum(pub ::core::ffi::c_uint);
+/** Enum to represent the NvLink utilization counter packet types to count
+ ** this is ONLY applicable with the units as packets or bytes
+ ** as specified in \a nvmlNvLinkUtilizationCountUnits_t
+ ** all packet filter descriptions are target GPU centric
+ ** these can be "OR'd" together*/
+pub use self::nvmlNvLinkUtilizationCountPktTypes_enum as nvmlNvLinkUtilizationCountPktTypes_t;
+/// Struct to define the NVLINK counter controls
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlNvLinkUtilizationControl_st {
+ pub units: nvmlNvLinkUtilizationCountUnits_t,
+ pub pktfilter: nvmlNvLinkUtilizationCountPktTypes_t,
+}
+/// Struct to define the NVLINK counter controls
+pub type nvmlNvLinkUtilizationControl_t = nvmlNvLinkUtilizationControl_st;
+impl nvmlNvLinkCapability_enum {
+ pub const NVML_NVLINK_CAP_P2P_SUPPORTED: nvmlNvLinkCapability_enum = nvmlNvLinkCapability_enum(
+ 0,
+ );
+}
+impl nvmlNvLinkCapability_enum {
+ pub const NVML_NVLINK_CAP_SYSMEM_ACCESS: nvmlNvLinkCapability_enum = nvmlNvLinkCapability_enum(
+ 1,
+ );
+}
+impl nvmlNvLinkCapability_enum {
+ pub const NVML_NVLINK_CAP_P2P_ATOMICS: nvmlNvLinkCapability_enum = nvmlNvLinkCapability_enum(
+ 2,
+ );
+}
+impl nvmlNvLinkCapability_enum {
+ pub const NVML_NVLINK_CAP_SYSMEM_ATOMICS: nvmlNvLinkCapability_enum = nvmlNvLinkCapability_enum(
+ 3,
+ );
+}
+impl nvmlNvLinkCapability_enum {
+ pub const NVML_NVLINK_CAP_SLI_BRIDGE: nvmlNvLinkCapability_enum = nvmlNvLinkCapability_enum(
+ 4,
+ );
+}
+impl nvmlNvLinkCapability_enum {
+ pub const NVML_NVLINK_CAP_VALID: nvmlNvLinkCapability_enum = nvmlNvLinkCapability_enum(
+ 5,
+ );
+}
+impl nvmlNvLinkCapability_enum {
+ pub const NVML_NVLINK_CAP_COUNT: nvmlNvLinkCapability_enum = nvmlNvLinkCapability_enum(
+ 6,
+ );
+}
+#[repr(transparent)]
+/// Enum to represent NvLink queryable capabilities
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlNvLinkCapability_enum(pub ::core::ffi::c_uint);
+/// Enum to represent NvLink queryable capabilities
+pub use self::nvmlNvLinkCapability_enum as nvmlNvLinkCapability_t;
+impl nvmlNvLinkErrorCounter_enum {
+ pub const NVML_NVLINK_ERROR_DL_REPLAY: nvmlNvLinkErrorCounter_enum = nvmlNvLinkErrorCounter_enum(
+ 0,
+ );
+}
+impl nvmlNvLinkErrorCounter_enum {
+ pub const NVML_NVLINK_ERROR_DL_RECOVERY: nvmlNvLinkErrorCounter_enum = nvmlNvLinkErrorCounter_enum(
+ 1,
+ );
+}
+impl nvmlNvLinkErrorCounter_enum {
+ pub const NVML_NVLINK_ERROR_DL_CRC_FLIT: nvmlNvLinkErrorCounter_enum = nvmlNvLinkErrorCounter_enum(
+ 2,
+ );
+}
+impl nvmlNvLinkErrorCounter_enum {
+ pub const NVML_NVLINK_ERROR_DL_CRC_DATA: nvmlNvLinkErrorCounter_enum = nvmlNvLinkErrorCounter_enum(
+ 3,
+ );
+}
+impl nvmlNvLinkErrorCounter_enum {
+ pub const NVML_NVLINK_ERROR_DL_ECC_DATA: nvmlNvLinkErrorCounter_enum = nvmlNvLinkErrorCounter_enum(
+ 4,
+ );
+}
+impl nvmlNvLinkErrorCounter_enum {
+ pub const NVML_NVLINK_ERROR_COUNT: nvmlNvLinkErrorCounter_enum = nvmlNvLinkErrorCounter_enum(
+ 5,
+ );
+}
+#[repr(transparent)]
+/// Enum to represent NvLink queryable error counters
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlNvLinkErrorCounter_enum(pub ::core::ffi::c_uint);
+/// Enum to represent NvLink queryable error counters
+pub use self::nvmlNvLinkErrorCounter_enum as nvmlNvLinkErrorCounter_t;
+impl nvmlIntNvLinkDeviceType_enum {
+ pub const NVML_NVLINK_DEVICE_TYPE_GPU: nvmlIntNvLinkDeviceType_enum = nvmlIntNvLinkDeviceType_enum(
+ 0,
+ );
+}
+impl nvmlIntNvLinkDeviceType_enum {
+ pub const NVML_NVLINK_DEVICE_TYPE_IBMNPU: nvmlIntNvLinkDeviceType_enum = nvmlIntNvLinkDeviceType_enum(
+ 1,
+ );
+}
+impl nvmlIntNvLinkDeviceType_enum {
+ pub const NVML_NVLINK_DEVICE_TYPE_SWITCH: nvmlIntNvLinkDeviceType_enum = nvmlIntNvLinkDeviceType_enum(
+ 2,
+ );
+}
+impl nvmlIntNvLinkDeviceType_enum {
+ pub const NVML_NVLINK_DEVICE_TYPE_UNKNOWN: nvmlIntNvLinkDeviceType_enum = nvmlIntNvLinkDeviceType_enum(
+ 255,
+ );
+}
+#[repr(transparent)]
+/// Enum to represent NvLink's remote device type
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlIntNvLinkDeviceType_enum(pub ::core::ffi::c_uint);
+/// Enum to represent NvLink's remote device type
+pub use self::nvmlIntNvLinkDeviceType_enum as nvmlIntNvLinkDeviceType_t;
+impl nvmlGpuLevel_enum {
+ pub const NVML_TOPOLOGY_INTERNAL: nvmlGpuLevel_enum = nvmlGpuLevel_enum(0);
+}
+impl nvmlGpuLevel_enum {
+ pub const NVML_TOPOLOGY_SINGLE: nvmlGpuLevel_enum = nvmlGpuLevel_enum(10);
+}
+impl nvmlGpuLevel_enum {
+ pub const NVML_TOPOLOGY_MULTIPLE: nvmlGpuLevel_enum = nvmlGpuLevel_enum(20);
+}
+impl nvmlGpuLevel_enum {
+ pub const NVML_TOPOLOGY_HOSTBRIDGE: nvmlGpuLevel_enum = nvmlGpuLevel_enum(30);
+}
+impl nvmlGpuLevel_enum {
+ pub const NVML_TOPOLOGY_NODE: nvmlGpuLevel_enum = nvmlGpuLevel_enum(40);
+}
+impl nvmlGpuLevel_enum {
+ pub const NVML_TOPOLOGY_SYSTEM: nvmlGpuLevel_enum = nvmlGpuLevel_enum(50);
+}
+#[repr(transparent)]
+/** Represents level relationships within a system between two GPUs
+ The enums are spaced to allow for future relationships*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuLevel_enum(pub ::core::ffi::c_uint);
+/** Represents level relationships within a system between two GPUs
+ The enums are spaced to allow for future relationships*/
+pub use self::nvmlGpuLevel_enum as nvmlGpuTopologyLevel_t;
+impl nvmlGpuP2PStatus_enum {
+ pub const NVML_P2P_STATUS_OK: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(0);
+}
+impl nvmlGpuP2PStatus_enum {
+ pub const NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(
+ 1,
+ );
+}
+impl nvmlGpuP2PStatus_enum {
+ pub const NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(
+ 1,
+ );
+}
+impl nvmlGpuP2PStatus_enum {
+ pub const NVML_P2P_STATUS_GPU_NOT_SUPPORTED: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(
+ 2,
+ );
+}
+impl nvmlGpuP2PStatus_enum {
+ pub const NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(
+ 3,
+ );
+}
+impl nvmlGpuP2PStatus_enum {
+ pub const NVML_P2P_STATUS_DISABLED_BY_REGKEY: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(
+ 4,
+ );
+}
+impl nvmlGpuP2PStatus_enum {
+ pub const NVML_P2P_STATUS_NOT_SUPPORTED: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(
+ 5,
+ );
+}
+impl nvmlGpuP2PStatus_enum {
+ pub const NVML_P2P_STATUS_UNKNOWN: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(6);
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuP2PStatus_enum(pub ::core::ffi::c_uint);
+pub use self::nvmlGpuP2PStatus_enum as nvmlGpuP2PStatus_t;
+impl nvmlGpuP2PCapsIndex_enum {
+ pub const NVML_P2P_CAPS_INDEX_READ: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(
+ 0,
+ );
+}
+impl nvmlGpuP2PCapsIndex_enum {
+ pub const NVML_P2P_CAPS_INDEX_WRITE: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(
+ 1,
+ );
+}
+impl nvmlGpuP2PCapsIndex_enum {
+ pub const NVML_P2P_CAPS_INDEX_NVLINK: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(
+ 2,
+ );
+}
+impl nvmlGpuP2PCapsIndex_enum {
+ pub const NVML_P2P_CAPS_INDEX_ATOMICS: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(
+ 3,
+ );
+}
+impl nvmlGpuP2PCapsIndex_enum {
+ pub const NVML_P2P_CAPS_INDEX_PCI: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(
+ 4,
+ );
+}
+impl nvmlGpuP2PCapsIndex_enum {
+ pub const NVML_P2P_CAPS_INDEX_PROP: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(
+ 4,
+ );
+}
+impl nvmlGpuP2PCapsIndex_enum {
+ pub const NVML_P2P_CAPS_INDEX_UNKNOWN: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(
+ 5,
+ );
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuP2PCapsIndex_enum(pub ::core::ffi::c_uint);
+pub use self::nvmlGpuP2PCapsIndex_enum as nvmlGpuP2PCapsIndex_t;
+/// Information about the Bridge Chip Firmware
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlBridgeChipInfo_st {
+ ///!< Type of Bridge Chip
+ pub type_: nvmlBridgeChipType_t,
+ ///!< Firmware Version. 0=Version is unavailable
+ pub fwVersion: ::core::ffi::c_uint,
+}
+/// Information about the Bridge Chip Firmware
+pub type nvmlBridgeChipInfo_t = nvmlBridgeChipInfo_st;
+/** This structure stores the complete Hierarchy of the Bridge Chip within the board. The immediate
+ bridge is stored at index 0 of bridgeInfoList, parent to immediate bridge is at index 1 and so forth.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlBridgeChipHierarchy_st {
+ ///!< Number of Bridge Chips on the Board
+ pub bridgeCount: ::core::ffi::c_uchar,
+ ///!< Hierarchy of Bridge Chips on the board
+ pub bridgeChipInfo: [nvmlBridgeChipInfo_t; 128usize],
+}
+/** This structure stores the complete Hierarchy of the Bridge Chip within the board. The immediate
+ bridge is stored at index 0 of bridgeInfoList, parent to immediate bridge is at index 1 and so forth.*/
+pub type nvmlBridgeChipHierarchy_t = nvmlBridgeChipHierarchy_st;
+impl nvmlSamplingType_enum {
+ ///!< To represent total power drawn by GPU
+ pub const NVML_TOTAL_POWER_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(0);
+}
+impl nvmlSamplingType_enum {
+ ///!< To represent percent of time during which one or more kernels was executing on the GPU
+ pub const NVML_GPU_UTILIZATION_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(
+ 1,
+ );
+}
+impl nvmlSamplingType_enum {
+ ///!< To represent percent of time during which global (device) memory was being read or written
+ pub const NVML_MEMORY_UTILIZATION_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(
+ 2,
+ );
+}
+impl nvmlSamplingType_enum {
+ ///!< To represent percent of time during which NVENC remains busy
+ pub const NVML_ENC_UTILIZATION_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(
+ 3,
+ );
+}
+impl nvmlSamplingType_enum {
+ ///!< To represent percent of time during which NVDEC remains busy
+ pub const NVML_DEC_UTILIZATION_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(
+ 4,
+ );
+}
+impl nvmlSamplingType_enum {
+ ///!< To represent processor clock samples
+ pub const NVML_PROCESSOR_CLK_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(
+ 5,
+ );
+}
+impl nvmlSamplingType_enum {
+ ///!< To represent memory clock samples
+ pub const NVML_MEMORY_CLK_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(6);
+}
+impl nvmlSamplingType_enum {
+ ///!< To represent module power samples for total module starting Grace Hopper
+ pub const NVML_MODULE_POWER_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(
+ 7,
+ );
+}
+impl nvmlSamplingType_enum {
+ ///!< To represent percent of time during which NVJPG remains busy
+ pub const NVML_JPG_UTILIZATION_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(
+ 8,
+ );
+}
+impl nvmlSamplingType_enum {
+ ///!< To represent percent of time during which NVOFA remains busy
+ pub const NVML_OFA_UTILIZATION_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(
+ 9,
+ );
+}
+impl nvmlSamplingType_enum {
+ pub const NVML_SAMPLINGTYPE_COUNT: nvmlSamplingType_enum = nvmlSamplingType_enum(10);
+}
+#[repr(transparent)]
+/// Represents Type of Sampling Event
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlSamplingType_enum(pub ::core::ffi::c_uint);
+/// Represents Type of Sampling Event
+pub use self::nvmlSamplingType_enum as nvmlSamplingType_t;
+impl nvmlPcieUtilCounter_enum {
+ pub const NVML_PCIE_UTIL_TX_BYTES: nvmlPcieUtilCounter_enum = nvmlPcieUtilCounter_enum(
+ 0,
+ );
+}
+impl nvmlPcieUtilCounter_enum {
+ pub const NVML_PCIE_UTIL_RX_BYTES: nvmlPcieUtilCounter_enum = nvmlPcieUtilCounter_enum(
+ 1,
+ );
+}
+impl nvmlPcieUtilCounter_enum {
+ pub const NVML_PCIE_UTIL_COUNT: nvmlPcieUtilCounter_enum = nvmlPcieUtilCounter_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Represents the queryable PCIe utilization counters
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlPcieUtilCounter_enum(pub ::core::ffi::c_uint);
+/// Represents the queryable PCIe utilization counters
+pub use self::nvmlPcieUtilCounter_enum as nvmlPcieUtilCounter_t;
+impl nvmlValueType_enum {
+ pub const NVML_VALUE_TYPE_DOUBLE: nvmlValueType_enum = nvmlValueType_enum(0);
+}
+impl nvmlValueType_enum {
+ pub const NVML_VALUE_TYPE_UNSIGNED_INT: nvmlValueType_enum = nvmlValueType_enum(1);
+}
+impl nvmlValueType_enum {
+ pub const NVML_VALUE_TYPE_UNSIGNED_LONG: nvmlValueType_enum = nvmlValueType_enum(2);
+}
+impl nvmlValueType_enum {
+ pub const NVML_VALUE_TYPE_UNSIGNED_LONG_LONG: nvmlValueType_enum = nvmlValueType_enum(
+ 3,
+ );
+}
+impl nvmlValueType_enum {
+ pub const NVML_VALUE_TYPE_SIGNED_LONG_LONG: nvmlValueType_enum = nvmlValueType_enum(
+ 4,
+ );
+}
+impl nvmlValueType_enum {
+ pub const NVML_VALUE_TYPE_SIGNED_INT: nvmlValueType_enum = nvmlValueType_enum(5);
+}
+impl nvmlValueType_enum {
+ pub const NVML_VALUE_TYPE_COUNT: nvmlValueType_enum = nvmlValueType_enum(6);
+}
+#[repr(transparent)]
+/// Represents the type for sample value returned
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlValueType_enum(pub ::core::ffi::c_uint);
+/// Represents the type for sample value returned
+pub use self::nvmlValueType_enum as nvmlValueType_t;
+/// Union to represent different types of Value
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union nvmlValue_st {
+ ///!< If the value is double
+ pub dVal: f64,
+ ///!< If the value is signed int
+ pub siVal: ::core::ffi::c_int,
+ ///!< If the value is unsigned int
+ pub uiVal: ::core::ffi::c_uint,
+ ///!< If the value is unsigned long
+ pub ulVal: ::core::ffi::c_ulong,
+ ///!< If the value is unsigned long long
+ pub ullVal: ::core::ffi::c_ulonglong,
+ ///!< If the value is signed long long
+ pub sllVal: ::core::ffi::c_longlong,
+}
+/// Union to represent different types of Value
+pub type nvmlValue_t = nvmlValue_st;
+/// Information for Sample
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct nvmlSample_st {
+ ///!< CPU Timestamp in microseconds
+ pub timeStamp: ::core::ffi::c_ulonglong,
+ ///!< Sample Value
+ pub sampleValue: nvmlValue_t,
+}
+/// Information for Sample
+pub type nvmlSample_t = nvmlSample_st;
+impl nvmlPerfPolicyType_enum {
+ ///!< How long did power violations cause the GPU to be below application clocks
+ pub const NVML_PERF_POLICY_POWER: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(
+ 0,
+ );
+}
+impl nvmlPerfPolicyType_enum {
+ ///!< How long did thermal violations cause the GPU to be below application clocks
+ pub const NVML_PERF_POLICY_THERMAL: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(
+ 1,
+ );
+}
+impl nvmlPerfPolicyType_enum {
+ ///!< How long did sync boost cause the GPU to be below application clocks
+ pub const NVML_PERF_POLICY_SYNC_BOOST: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(
+ 2,
+ );
+}
+impl nvmlPerfPolicyType_enum {
+ ///!< How long did the board limit cause the GPU to be below application clocks
+ pub const NVML_PERF_POLICY_BOARD_LIMIT: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(
+ 3,
+ );
+}
+impl nvmlPerfPolicyType_enum {
+ ///!< How long did low utilization cause the GPU to be below application clocks
+ pub const NVML_PERF_POLICY_LOW_UTILIZATION: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(
+ 4,
+ );
+}
+impl nvmlPerfPolicyType_enum {
+ ///!< How long did the board reliability limit cause the GPU to be below application clocks
+ pub const NVML_PERF_POLICY_RELIABILITY: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(
+ 5,
+ );
+}
+impl nvmlPerfPolicyType_enum {
+ ///!< Total time the GPU was held below application clocks by any limiter (0 - 5 above)
+ pub const NVML_PERF_POLICY_TOTAL_APP_CLOCKS: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(
+ 10,
+ );
+}
+impl nvmlPerfPolicyType_enum {
+ ///!< Total time the GPU was held below base clocks
+ pub const NVML_PERF_POLICY_TOTAL_BASE_CLOCKS: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(
+ 11,
+ );
+}
+impl nvmlPerfPolicyType_enum {
+ pub const NVML_PERF_POLICY_COUNT: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(
+ 12,
+ );
+}
+#[repr(transparent)]
+/// Represents type of perf policy for which violation times can be queried
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlPerfPolicyType_enum(pub ::core::ffi::c_uint);
+/// Represents type of perf policy for which violation times can be queried
+pub use self::nvmlPerfPolicyType_enum as nvmlPerfPolicyType_t;
+/// Struct to hold perf policy violation status data
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlViolationTime_st {
+ ///!< referenceTime represents CPU timestamp in microseconds
+ pub referenceTime: ::core::ffi::c_ulonglong,
+ ///!< violationTime in Nanoseconds
+ pub violationTime: ::core::ffi::c_ulonglong,
+}
+/// Struct to hold perf policy violation status data
+pub type nvmlViolationTime_t = nvmlViolationTime_st;
+impl nvmlThermalTarget_t {
+ pub const NVML_THERMAL_TARGET_NONE: nvmlThermalTarget_t = nvmlThermalTarget_t(0);
+}
+impl nvmlThermalTarget_t {
+ ///!< GPU core temperature requires NvPhysicalGpuHandle
+ pub const NVML_THERMAL_TARGET_GPU: nvmlThermalTarget_t = nvmlThermalTarget_t(1);
+}
+impl nvmlThermalTarget_t {
+ ///!< GPU memory temperature requires NvPhysicalGpuHandle
+ pub const NVML_THERMAL_TARGET_MEMORY: nvmlThermalTarget_t = nvmlThermalTarget_t(2);
+}
+impl nvmlThermalTarget_t {
+ ///!< GPU power supply temperature requires NvPhysicalGpuHandle
+ pub const NVML_THERMAL_TARGET_POWER_SUPPLY: nvmlThermalTarget_t = nvmlThermalTarget_t(
+ 4,
+ );
+}
+impl nvmlThermalTarget_t {
+ ///!< GPU board ambient temperature requires NvPhysicalGpuHandle
+ pub const NVML_THERMAL_TARGET_BOARD: nvmlThermalTarget_t = nvmlThermalTarget_t(8);
+}
+impl nvmlThermalTarget_t {
+ ///!< Visual Computing Device Board temperature requires NvVisualComputingDeviceHandle
+ pub const NVML_THERMAL_TARGET_VCD_BOARD: nvmlThermalTarget_t = nvmlThermalTarget_t(
+ 9,
+ );
+}
+impl nvmlThermalTarget_t {
+ ///!< Visual Computing Device Inlet temperature requires NvVisualComputingDeviceHandle
+ pub const NVML_THERMAL_TARGET_VCD_INLET: nvmlThermalTarget_t = nvmlThermalTarget_t(
+ 10,
+ );
+}
+impl nvmlThermalTarget_t {
+ ///!< Visual Computing Device Outlet temperature requires NvVisualComputingDeviceHandle
+ pub const NVML_THERMAL_TARGET_VCD_OUTLET: nvmlThermalTarget_t = nvmlThermalTarget_t(
+ 11,
+ );
+}
+impl nvmlThermalTarget_t {
+ pub const NVML_THERMAL_TARGET_ALL: nvmlThermalTarget_t = nvmlThermalTarget_t(15);
+}
+impl nvmlThermalTarget_t {
+ pub const NVML_THERMAL_TARGET_UNKNOWN: nvmlThermalTarget_t = nvmlThermalTarget_t(-1);
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlThermalTarget_t(pub ::core::ffi::c_int);
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_NONE: nvmlThermalController_t = nvmlThermalController_t(
+ 0,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_GPU_INTERNAL: nvmlThermalController_t = nvmlThermalController_t(
+ 1,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_ADM1032: nvmlThermalController_t = nvmlThermalController_t(
+ 2,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_ADT7461: nvmlThermalController_t = nvmlThermalController_t(
+ 3,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_MAX6649: nvmlThermalController_t = nvmlThermalController_t(
+ 4,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_MAX1617: nvmlThermalController_t = nvmlThermalController_t(
+ 5,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_LM99: nvmlThermalController_t = nvmlThermalController_t(
+ 6,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_LM89: nvmlThermalController_t = nvmlThermalController_t(
+ 7,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_LM64: nvmlThermalController_t = nvmlThermalController_t(
+ 8,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_G781: nvmlThermalController_t = nvmlThermalController_t(
+ 9,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_ADT7473: nvmlThermalController_t = nvmlThermalController_t(
+ 10,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_SBMAX6649: nvmlThermalController_t = nvmlThermalController_t(
+ 11,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_VBIOSEVT: nvmlThermalController_t = nvmlThermalController_t(
+ 12,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_OS: nvmlThermalController_t = nvmlThermalController_t(
+ 13,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS: nvmlThermalController_t = nvmlThermalController_t(
+ 14,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_NVSYSCON_E551: nvmlThermalController_t = nvmlThermalController_t(
+ 15,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_MAX6649R: nvmlThermalController_t = nvmlThermalController_t(
+ 16,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_ADT7473S: nvmlThermalController_t = nvmlThermalController_t(
+ 17,
+ );
+}
+impl nvmlThermalController_t {
+ pub const NVML_THERMAL_CONTROLLER_UNKNOWN: nvmlThermalController_t = nvmlThermalController_t(
+ -1,
+ );
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlThermalController_t(pub ::core::ffi::c_int);
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuThermalSettings_t {
+ pub count: ::core::ffi::c_uint,
+ pub sensor: [nvmlGpuThermalSettings_t__bindgen_ty_1; 3usize],
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuThermalSettings_t__bindgen_ty_1 {
+ pub controller: nvmlThermalController_t,
+ pub defaultMinTemp: ::core::ffi::c_int,
+ pub defaultMaxTemp: ::core::ffi::c_int,
+ pub currentTemp: ::core::ffi::c_int,
+ pub target: nvmlThermalTarget_t,
+}
+impl nvmlEnableState_enum {
+ ///!< Feature disabled
+ pub const NVML_FEATURE_DISABLED: nvmlEnableState_enum = nvmlEnableState_enum(0);
+}
+impl nvmlEnableState_enum {
+ ///!< Feature enabled
+ pub const NVML_FEATURE_ENABLED: nvmlEnableState_enum = nvmlEnableState_enum(1);
+}
+#[repr(transparent)]
+/// Generic enable/disable enum.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlEnableState_enum(pub ::core::ffi::c_uint);
+/// Generic enable/disable enum.
+pub use self::nvmlEnableState_enum as nvmlEnableState_t;
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_UNKNOWN: nvmlBrandType_enum = nvmlBrandType_enum(0);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_QUADRO: nvmlBrandType_enum = nvmlBrandType_enum(1);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_TESLA: nvmlBrandType_enum = nvmlBrandType_enum(2);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_NVS: nvmlBrandType_enum = nvmlBrandType_enum(3);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_GRID: nvmlBrandType_enum = nvmlBrandType_enum(4);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_GEFORCE: nvmlBrandType_enum = nvmlBrandType_enum(5);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_TITAN: nvmlBrandType_enum = nvmlBrandType_enum(6);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_NVIDIA_VAPPS: nvmlBrandType_enum = nvmlBrandType_enum(7);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_NVIDIA_VPC: nvmlBrandType_enum = nvmlBrandType_enum(8);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_NVIDIA_VCS: nvmlBrandType_enum = nvmlBrandType_enum(9);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_NVIDIA_VWS: nvmlBrandType_enum = nvmlBrandType_enum(10);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_NVIDIA_CLOUD_GAMING: nvmlBrandType_enum = nvmlBrandType_enum(
+ 11,
+ );
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_NVIDIA_VGAMING: nvmlBrandType_enum = nvmlBrandType_enum(11);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_QUADRO_RTX: nvmlBrandType_enum = nvmlBrandType_enum(12);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_NVIDIA_RTX: nvmlBrandType_enum = nvmlBrandType_enum(13);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_NVIDIA: nvmlBrandType_enum = nvmlBrandType_enum(14);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_GEFORCE_RTX: nvmlBrandType_enum = nvmlBrandType_enum(15);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_TITAN_RTX: nvmlBrandType_enum = nvmlBrandType_enum(16);
+}
+impl nvmlBrandType_enum {
+ pub const NVML_BRAND_COUNT: nvmlBrandType_enum = nvmlBrandType_enum(17);
+}
+#[repr(transparent)]
+/// * The Brand of the GPU
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlBrandType_enum(pub ::core::ffi::c_uint);
+/// * The Brand of the GPU
+pub use self::nvmlBrandType_enum as nvmlBrandType_t;
+impl nvmlTemperatureThresholds_enum {
+ pub const NVML_TEMPERATURE_THRESHOLD_SHUTDOWN: nvmlTemperatureThresholds_enum = nvmlTemperatureThresholds_enum(
+ 0,
+ );
+}
+impl nvmlTemperatureThresholds_enum {
+ pub const NVML_TEMPERATURE_THRESHOLD_SLOWDOWN: nvmlTemperatureThresholds_enum = nvmlTemperatureThresholds_enum(
+ 1,
+ );
+}
+impl nvmlTemperatureThresholds_enum {
+ pub const NVML_TEMPERATURE_THRESHOLD_MEM_MAX: nvmlTemperatureThresholds_enum = nvmlTemperatureThresholds_enum(
+ 2,
+ );
+}
+impl nvmlTemperatureThresholds_enum {
+ pub const NVML_TEMPERATURE_THRESHOLD_GPU_MAX: nvmlTemperatureThresholds_enum = nvmlTemperatureThresholds_enum(
+ 3,
+ );
+}
+impl nvmlTemperatureThresholds_enum {
+ pub const NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN: nvmlTemperatureThresholds_enum = nvmlTemperatureThresholds_enum(
+ 4,
+ );
+}
+impl nvmlTemperatureThresholds_enum {
+ pub const NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR: nvmlTemperatureThresholds_enum = nvmlTemperatureThresholds_enum(
+ 5,
+ );
+}
+impl nvmlTemperatureThresholds_enum {
+ pub const NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX: nvmlTemperatureThresholds_enum = nvmlTemperatureThresholds_enum(
+ 6,
+ );
+}
+impl nvmlTemperatureThresholds_enum {
+ pub const NVML_TEMPERATURE_THRESHOLD_COUNT: nvmlTemperatureThresholds_enum = nvmlTemperatureThresholds_enum(
+ 7,
+ );
+}
+#[repr(transparent)]
+/// Temperature thresholds.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlTemperatureThresholds_enum(pub ::core::ffi::c_uint);
+/// Temperature thresholds.
+pub use self::nvmlTemperatureThresholds_enum as nvmlTemperatureThresholds_t;
+impl nvmlTemperatureSensors_enum {
+ ///!< Temperature sensor for the GPU die
+ pub const NVML_TEMPERATURE_GPU: nvmlTemperatureSensors_enum = nvmlTemperatureSensors_enum(
+ 0,
+ );
+}
+impl nvmlTemperatureSensors_enum {
+ pub const NVML_TEMPERATURE_COUNT: nvmlTemperatureSensors_enum = nvmlTemperatureSensors_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Temperature sensors.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlTemperatureSensors_enum(pub ::core::ffi::c_uint);
+/// Temperature sensors.
+pub use self::nvmlTemperatureSensors_enum as nvmlTemperatureSensors_t;
+impl nvmlComputeMode_enum {
+ ///!< Default compute mode -- multiple contexts per device
+ pub const NVML_COMPUTEMODE_DEFAULT: nvmlComputeMode_enum = nvmlComputeMode_enum(0);
+}
+impl nvmlComputeMode_enum {
+ ///!< Support Removed
+ pub const NVML_COMPUTEMODE_EXCLUSIVE_THREAD: nvmlComputeMode_enum = nvmlComputeMode_enum(
+ 1,
+ );
+}
+impl nvmlComputeMode_enum {
+ ///!< Compute-prohibited mode -- no contexts per device
+ pub const NVML_COMPUTEMODE_PROHIBITED: nvmlComputeMode_enum = nvmlComputeMode_enum(
+ 2,
+ );
+}
+impl nvmlComputeMode_enum {
+ ///!< Compute-exclusive-process mode -- only one context per device, usable from multiple threads at a time
+ pub const NVML_COMPUTEMODE_EXCLUSIVE_PROCESS: nvmlComputeMode_enum = nvmlComputeMode_enum(
+ 3,
+ );
+}
+impl nvmlComputeMode_enum {
+ pub const NVML_COMPUTEMODE_COUNT: nvmlComputeMode_enum = nvmlComputeMode_enum(4);
+}
+#[repr(transparent)]
+/** Compute mode.
+
+ NVML_COMPUTEMODE_EXCLUSIVE_PROCESS was added in CUDA 4.0.
+ Earlier CUDA versions supported a single exclusive mode,
+ which is equivalent to NVML_COMPUTEMODE_EXCLUSIVE_THREAD in CUDA 4.0 and beyond.*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlComputeMode_enum(pub ::core::ffi::c_uint);
+/** Compute mode.
+
+ NVML_COMPUTEMODE_EXCLUSIVE_PROCESS was added in CUDA 4.0.
+ Earlier CUDA versions supported a single exclusive mode,
+ which is equivalent to NVML_COMPUTEMODE_EXCLUSIVE_THREAD in CUDA 4.0 and beyond.*/
+pub use self::nvmlComputeMode_enum as nvmlComputeMode_t;
+/// Clock Monitor error types
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlClkMonFaultInfo_struct {
+ /// The Domain which faulted
+ pub clkApiDomain: ::core::ffi::c_uint,
+ /// Faults Information
+ pub clkDomainFaultMask: ::core::ffi::c_uint,
+}
+/// Clock Monitor error types
+pub type nvmlClkMonFaultInfo_t = nvmlClkMonFaultInfo_struct;
+/// Clock Monitor Status
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlClkMonStatus_status {
+ /// Fault status Indicator
+ pub bGlobalStatus: ::core::ffi::c_uint,
+ /// Total faulted domain numbers
+ pub clkMonListSize: ::core::ffi::c_uint,
+ /// The fault Information structure
+ pub clkMonList: [nvmlClkMonFaultInfo_t; 32usize],
+}
+/// Clock Monitor Status
+pub type nvmlClkMonStatus_t = nvmlClkMonStatus_status;
+impl nvmlMemoryErrorType_enum {
+ /** A memory error that was corrected
+
+ For ECC errors, these are single bit errors
+ For Texture memory, these are errors fixed by resend*/
+ pub const NVML_MEMORY_ERROR_TYPE_CORRECTED: nvmlMemoryErrorType_enum = nvmlMemoryErrorType_enum(
+ 0,
+ );
+}
+impl nvmlMemoryErrorType_enum {
+ /** A memory error that was not corrected
+
+ For ECC errors, these are double bit errors
+ For Texture memory, these are errors where the resend fails*/
+ pub const NVML_MEMORY_ERROR_TYPE_UNCORRECTED: nvmlMemoryErrorType_enum = nvmlMemoryErrorType_enum(
+ 1,
+ );
+}
+impl nvmlMemoryErrorType_enum {
+ ///!< Count of memory error types
+ pub const NVML_MEMORY_ERROR_TYPE_COUNT: nvmlMemoryErrorType_enum = nvmlMemoryErrorType_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Memory error types
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlMemoryErrorType_enum(pub ::core::ffi::c_uint);
+/// Memory error types
+pub use self::nvmlMemoryErrorType_enum as nvmlMemoryErrorType_t;
+impl nvmlEccCounterType_enum {
+ ///!< Volatile counts are reset each time the driver loads.
+ pub const NVML_VOLATILE_ECC: nvmlEccCounterType_enum = nvmlEccCounterType_enum(0);
+}
+impl nvmlEccCounterType_enum {
+ ///!< Aggregate counts persist across reboots (i.e. for the lifetime of the device)
+ pub const NVML_AGGREGATE_ECC: nvmlEccCounterType_enum = nvmlEccCounterType_enum(1);
+}
+impl nvmlEccCounterType_enum {
+ ///!< Count of memory counter types
+ pub const NVML_ECC_COUNTER_TYPE_COUNT: nvmlEccCounterType_enum = nvmlEccCounterType_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/** ECC counter types.
+
+ Note: Volatile counts are reset each time the driver loads. On Windows this is once per boot. On Linux this can be more frequent.
+ On Linux the driver unloads when no active clients exist. If persistence mode is enabled or there is always a driver
+ client active (e.g. X11), then Linux also sees per-boot behavior. If not, volatile counts are reset each time a compute app
+ is run.*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlEccCounterType_enum(pub ::core::ffi::c_uint);
+/** ECC counter types.
+
+ Note: Volatile counts are reset each time the driver loads. On Windows this is once per boot. On Linux this can be more frequent.
+ On Linux the driver unloads when no active clients exist. If persistence mode is enabled or there is always a driver
+ client active (e.g. X11), then Linux also sees per-boot behavior. If not, volatile counts are reset each time a compute app
+ is run.*/
+pub use self::nvmlEccCounterType_enum as nvmlEccCounterType_t;
+impl nvmlClockType_enum {
+ ///!< Graphics clock domain
+ pub const NVML_CLOCK_GRAPHICS: nvmlClockType_enum = nvmlClockType_enum(0);
+}
+impl nvmlClockType_enum {
+ ///!< SM clock domain
+ pub const NVML_CLOCK_SM: nvmlClockType_enum = nvmlClockType_enum(1);
+}
+impl nvmlClockType_enum {
+ ///!< Memory clock domain
+ pub const NVML_CLOCK_MEM: nvmlClockType_enum = nvmlClockType_enum(2);
+}
+impl nvmlClockType_enum {
+ ///!< Video encoder/decoder clock domain
+ pub const NVML_CLOCK_VIDEO: nvmlClockType_enum = nvmlClockType_enum(3);
+}
+impl nvmlClockType_enum {
+ ///!< Count of clock types
+ pub const NVML_CLOCK_COUNT: nvmlClockType_enum = nvmlClockType_enum(4);
+}
+#[repr(transparent)]
+/** Clock types.
+
+ All speeds are in Mhz.*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlClockType_enum(pub ::core::ffi::c_uint);
+/** Clock types.
+
+ All speeds are in Mhz.*/
+pub use self::nvmlClockType_enum as nvmlClockType_t;
+impl nvmlClockId_enum {
+ ///!< Current actual clock value
+ pub const NVML_CLOCK_ID_CURRENT: nvmlClockId_enum = nvmlClockId_enum(0);
+}
+impl nvmlClockId_enum {
+ ///!< Target application clock
+ pub const NVML_CLOCK_ID_APP_CLOCK_TARGET: nvmlClockId_enum = nvmlClockId_enum(1);
+}
+impl nvmlClockId_enum {
+ ///!< Default application clock target
+ pub const NVML_CLOCK_ID_APP_CLOCK_DEFAULT: nvmlClockId_enum = nvmlClockId_enum(2);
+}
+impl nvmlClockId_enum {
+ ///!< OEM-defined maximum clock rate
+ pub const NVML_CLOCK_ID_CUSTOMER_BOOST_MAX: nvmlClockId_enum = nvmlClockId_enum(3);
+}
+impl nvmlClockId_enum {
+ ///!< Count of Clock Ids.
+ pub const NVML_CLOCK_ID_COUNT: nvmlClockId_enum = nvmlClockId_enum(4);
+}
+#[repr(transparent)]
+/** Clock Ids. These are used in combination with nvmlClockType_t
+ to specify a single clock value.*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlClockId_enum(pub ::core::ffi::c_uint);
+/** Clock Ids. These are used in combination with nvmlClockType_t
+ to specify a single clock value.*/
+pub use self::nvmlClockId_enum as nvmlClockId_t;
+impl nvmlDriverModel_enum {
+ ///!< WDDM driver model -- GPU treated as a display device
+ pub const NVML_DRIVER_WDDM: nvmlDriverModel_enum = nvmlDriverModel_enum(0);
+}
+impl nvmlDriverModel_enum {
+ ///!< WDM (TCC) model (recommended) -- GPU treated as a generic device
+ pub const NVML_DRIVER_WDM: nvmlDriverModel_enum = nvmlDriverModel_enum(1);
+}
+#[repr(transparent)]
+/** Driver models.
+
+ Windows only.*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlDriverModel_enum(pub ::core::ffi::c_uint);
+/** Driver models.
+
+ Windows only.*/
+pub use self::nvmlDriverModel_enum as nvmlDriverModel_t;
+impl nvmlPStates_enum {
+ ///!< Performance state 0 -- Maximum Performance
+ pub const NVML_PSTATE_0: nvmlPStates_enum = nvmlPStates_enum(0);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 1
+ pub const NVML_PSTATE_1: nvmlPStates_enum = nvmlPStates_enum(1);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 2
+ pub const NVML_PSTATE_2: nvmlPStates_enum = nvmlPStates_enum(2);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 3
+ pub const NVML_PSTATE_3: nvmlPStates_enum = nvmlPStates_enum(3);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 4
+ pub const NVML_PSTATE_4: nvmlPStates_enum = nvmlPStates_enum(4);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 5
+ pub const NVML_PSTATE_5: nvmlPStates_enum = nvmlPStates_enum(5);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 6
+ pub const NVML_PSTATE_6: nvmlPStates_enum = nvmlPStates_enum(6);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 7
+ pub const NVML_PSTATE_7: nvmlPStates_enum = nvmlPStates_enum(7);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 8
+ pub const NVML_PSTATE_8: nvmlPStates_enum = nvmlPStates_enum(8);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 9
+ pub const NVML_PSTATE_9: nvmlPStates_enum = nvmlPStates_enum(9);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 10
+ pub const NVML_PSTATE_10: nvmlPStates_enum = nvmlPStates_enum(10);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 11
+ pub const NVML_PSTATE_11: nvmlPStates_enum = nvmlPStates_enum(11);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 12
+ pub const NVML_PSTATE_12: nvmlPStates_enum = nvmlPStates_enum(12);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 13
+ pub const NVML_PSTATE_13: nvmlPStates_enum = nvmlPStates_enum(13);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 14
+ pub const NVML_PSTATE_14: nvmlPStates_enum = nvmlPStates_enum(14);
+}
+impl nvmlPStates_enum {
+ ///!< Performance state 15 -- Minimum Performance
+ pub const NVML_PSTATE_15: nvmlPStates_enum = nvmlPStates_enum(15);
+}
+impl nvmlPStates_enum {
+ ///!< Unknown performance state
+ pub const NVML_PSTATE_UNKNOWN: nvmlPStates_enum = nvmlPStates_enum(32);
+}
+#[repr(transparent)]
+/// Allowed PStates.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlPStates_enum(pub ::core::ffi::c_uint);
+/// Allowed PStates.
+pub use self::nvmlPStates_enum as nvmlPstates_t;
+impl nvmlGom_enum {
+ ///!< Everything is enabled and running at full speed
+ pub const NVML_GOM_ALL_ON: nvmlGom_enum = nvmlGom_enum(0);
+}
+impl nvmlGom_enum {
+ /**!< Designed for running only compute tasks. Graphics operations
+!< are not allowed*/
+ pub const NVML_GOM_COMPUTE: nvmlGom_enum = nvmlGom_enum(1);
+}
+impl nvmlGom_enum {
+ /**!< Designed for running graphics applications that don't require
+!< high bandwidth double precision*/
+ pub const NVML_GOM_LOW_DP: nvmlGom_enum = nvmlGom_enum(2);
+}
+#[repr(transparent)]
+/** GPU Operation Mode
+
+ GOM allows to reduce power usage and optimize GPU throughput by disabling GPU features.
+
+ Each GOM is designed to meet specific user needs.*/
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGom_enum(pub ::core::ffi::c_uint);
+/** GPU Operation Mode
+
+ GOM allows to reduce power usage and optimize GPU throughput by disabling GPU features.
+
+ Each GOM is designed to meet specific user needs.*/
+pub use self::nvmlGom_enum as nvmlGpuOperationMode_t;
+impl nvmlInforomObject_enum {
+ ///!< An object defined by OEM
+ pub const NVML_INFOROM_OEM: nvmlInforomObject_enum = nvmlInforomObject_enum(0);
+}
+impl nvmlInforomObject_enum {
+ ///!< The ECC object determining the level of ECC support
+ pub const NVML_INFOROM_ECC: nvmlInforomObject_enum = nvmlInforomObject_enum(1);
+}
+impl nvmlInforomObject_enum {
+ ///!< The power management object
+ pub const NVML_INFOROM_POWER: nvmlInforomObject_enum = nvmlInforomObject_enum(2);
+}
+impl nvmlInforomObject_enum {
+ ///!< This counts the number of infoROM objects the driver knows about
+ pub const NVML_INFOROM_COUNT: nvmlInforomObject_enum = nvmlInforomObject_enum(3);
+}
+#[repr(transparent)]
+/// Available infoROM objects.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlInforomObject_enum(pub ::core::ffi::c_uint);
+/// Available infoROM objects.
+pub use self::nvmlInforomObject_enum as nvmlInforomObject_t;
+/// Return values for NVML API calls.
+pub type nvmlReturn_enum = ::core::ffi::c_uint;
+impl nvmlMemoryLocation_enum {
+ ///!< GPU L1 Cache
+ pub const NVML_MEMORY_LOCATION_L1_CACHE: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(
+ 0,
+ );
+}
+impl nvmlMemoryLocation_enum {
+ ///!< GPU L2 Cache
+ pub const NVML_MEMORY_LOCATION_L2_CACHE: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(
+ 1,
+ );
+}
+impl nvmlMemoryLocation_enum {
+ ///!< Turing+ DRAM
+ pub const NVML_MEMORY_LOCATION_DRAM: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(
+ 2,
+ );
+}
+impl nvmlMemoryLocation_enum {
+ ///!< GPU Device Memory
+ pub const NVML_MEMORY_LOCATION_DEVICE_MEMORY: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(
+ 2,
+ );
+}
+impl nvmlMemoryLocation_enum {
+ ///!< GPU Register File
+ pub const NVML_MEMORY_LOCATION_REGISTER_FILE: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(
+ 3,
+ );
+}
+impl nvmlMemoryLocation_enum {
+ ///!< GPU Texture Memory
+ pub const NVML_MEMORY_LOCATION_TEXTURE_MEMORY: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(
+ 4,
+ );
+}
+impl nvmlMemoryLocation_enum {
+ ///!< Shared memory
+ pub const NVML_MEMORY_LOCATION_TEXTURE_SHM: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(
+ 5,
+ );
+}
+impl nvmlMemoryLocation_enum {
+ ///!< CBU
+ pub const NVML_MEMORY_LOCATION_CBU: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(
+ 6,
+ );
+}
+impl nvmlMemoryLocation_enum {
+ ///!< Turing+ SRAM
+ pub const NVML_MEMORY_LOCATION_SRAM: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(
+ 7,
+ );
+}
+impl nvmlMemoryLocation_enum {
+ ///!< This counts the number of memory locations the driver knows about
+ pub const NVML_MEMORY_LOCATION_COUNT: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(
+ 8,
+ );
+}
+#[repr(transparent)]
+/// See \ref nvmlDeviceGetMemoryErrorCounter
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlMemoryLocation_enum(pub ::core::ffi::c_uint);
+/// See \ref nvmlDeviceGetMemoryErrorCounter
+pub use self::nvmlMemoryLocation_enum as nvmlMemoryLocation_t;
+impl nvmlPageRetirementCause_enum {
+ ///!< Page was retired due to multiple single bit ECC error
+ pub const NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS: nvmlPageRetirementCause_enum = nvmlPageRetirementCause_enum(
+ 0,
+ );
+}
+impl nvmlPageRetirementCause_enum {
+ ///!< Page was retired due to double bit ECC error
+ pub const NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR: nvmlPageRetirementCause_enum = nvmlPageRetirementCause_enum(
+ 1,
+ );
+}
+impl nvmlPageRetirementCause_enum {
+ pub const NVML_PAGE_RETIREMENT_CAUSE_COUNT: nvmlPageRetirementCause_enum = nvmlPageRetirementCause_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// Causes for page retirement
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlPageRetirementCause_enum(pub ::core::ffi::c_uint);
+/// Causes for page retirement
+pub use self::nvmlPageRetirementCause_enum as nvmlPageRetirementCause_t;
+impl nvmlRestrictedAPI_enum {
+ /**!< APIs that change application clocks, see nvmlDeviceSetApplicationsClocks
+!< and see nvmlDeviceResetApplicationsClocks*/
+ pub const NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS: nvmlRestrictedAPI_enum = nvmlRestrictedAPI_enum(
+ 0,
+ );
+}
+impl nvmlRestrictedAPI_enum {
+ /**!< APIs that enable/disable Auto Boosted clocks
+!< see nvmlDeviceSetAutoBoostedClocksEnabled*/
+ pub const NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS: nvmlRestrictedAPI_enum = nvmlRestrictedAPI_enum(
+ 1,
+ );
+}
+impl nvmlRestrictedAPI_enum {
+ pub const NVML_RESTRICTED_API_COUNT: nvmlRestrictedAPI_enum = nvmlRestrictedAPI_enum(
+ 2,
+ );
+}
+#[repr(transparent)]
+/// API types that allow changes to default permission restrictions
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlRestrictedAPI_enum(pub ::core::ffi::c_uint);
+/// API types that allow changes to default permission restrictions
+pub use self::nvmlRestrictedAPI_enum as nvmlRestrictedAPI_t;
+impl nvmlGpuVirtualizationMode {
+ ///!< Represents Bare Metal GPU
+ pub const NVML_GPU_VIRTUALIZATION_MODE_NONE: nvmlGpuVirtualizationMode = nvmlGpuVirtualizationMode(
+ 0,
+ );
+}
+impl nvmlGpuVirtualizationMode {
+ ///!< Device is associated with GPU-Passthorugh
+ pub const NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH: nvmlGpuVirtualizationMode = nvmlGpuVirtualizationMode(
+ 1,
+ );
+}
+impl nvmlGpuVirtualizationMode {
+ ///!< Device is associated with vGPU inside virtual machine.
+ pub const NVML_GPU_VIRTUALIZATION_MODE_VGPU: nvmlGpuVirtualizationMode = nvmlGpuVirtualizationMode(
+ 2,
+ );
+}
+impl nvmlGpuVirtualizationMode {
+ ///!< Device is associated with VGX hypervisor in vGPU mode
+ pub const NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU: nvmlGpuVirtualizationMode = nvmlGpuVirtualizationMode(
+ 3,
+ );
+}
+impl nvmlGpuVirtualizationMode {
+ ///!< Device is associated with VGX hypervisor in vSGA mode
+ pub const NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA: nvmlGpuVirtualizationMode = nvmlGpuVirtualizationMode(
+ 4,
+ );
+}
+#[repr(transparent)]
+/// GPU virtualization mode types.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuVirtualizationMode(pub ::core::ffi::c_uint);
+/// GPU virtualization mode types.
+pub use self::nvmlGpuVirtualizationMode as nvmlGpuVirtualizationMode_t;
+impl nvmlHostVgpuMode_enum {
+ ///!< Non SR-IOV mode
+ pub const NVML_HOST_VGPU_MODE_NON_SRIOV: nvmlHostVgpuMode_enum = nvmlHostVgpuMode_enum(
+ 0,
+ );
+}
+impl nvmlHostVgpuMode_enum {
+ ///!< SR-IOV mode
+ pub const NVML_HOST_VGPU_MODE_SRIOV: nvmlHostVgpuMode_enum = nvmlHostVgpuMode_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Host vGPU modes
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlHostVgpuMode_enum(pub ::core::ffi::c_uint);
+/// Host vGPU modes
+pub use self::nvmlHostVgpuMode_enum as nvmlHostVgpuMode_t;
+impl nvmlVgpuVmIdType {
+ ///!< VM ID represents DOMAIN ID
+ pub const NVML_VGPU_VM_ID_DOMAIN_ID: nvmlVgpuVmIdType = nvmlVgpuVmIdType(0);
+}
+impl nvmlVgpuVmIdType {
+ ///!< VM ID represents UUID
+ pub const NVML_VGPU_VM_ID_UUID: nvmlVgpuVmIdType = nvmlVgpuVmIdType(1);
+}
+#[repr(transparent)]
+/// Types of VM identifiers
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuVmIdType(pub ::core::ffi::c_uint);
+/// Types of VM identifiers
+pub use self::nvmlVgpuVmIdType as nvmlVgpuVmIdType_t;
+impl nvmlVgpuGuestInfoState_enum {
+ ///!< Guest-dependent fields uninitialized
+ pub const NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED: nvmlVgpuGuestInfoState_enum = nvmlVgpuGuestInfoState_enum(
+ 0,
+ );
+}
+impl nvmlVgpuGuestInfoState_enum {
+ ///!< Guest-dependent fields initialized
+ pub const NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED: nvmlVgpuGuestInfoState_enum = nvmlVgpuGuestInfoState_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// vGPU GUEST info state
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuGuestInfoState_enum(pub ::core::ffi::c_uint);
+/// vGPU GUEST info state
+pub use self::nvmlVgpuGuestInfoState_enum as nvmlVgpuGuestInfoState_t;
+impl nvmlGridLicenseFeatureCode_t {
+ ///!< Unknown
+ pub const NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN: nvmlGridLicenseFeatureCode_t = nvmlGridLicenseFeatureCode_t(
+ 0,
+ );
+}
+impl nvmlGridLicenseFeatureCode_t {
+ ///!< Virtual GPU
+ pub const NVML_GRID_LICENSE_FEATURE_CODE_VGPU: nvmlGridLicenseFeatureCode_t = nvmlGridLicenseFeatureCode_t(
+ 1,
+ );
+}
+impl nvmlGridLicenseFeatureCode_t {
+ ///!< Nvidia RTX
+ pub const NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX: nvmlGridLicenseFeatureCode_t = nvmlGridLicenseFeatureCode_t(
+ 2,
+ );
+}
+impl nvmlGridLicenseFeatureCode_t {
+ ///!< Deprecated, do not use.
+ pub const NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION: nvmlGridLicenseFeatureCode_t = nvmlGridLicenseFeatureCode_t(
+ 2,
+ );
+}
+impl nvmlGridLicenseFeatureCode_t {
+ ///!< Gaming
+ pub const NVML_GRID_LICENSE_FEATURE_CODE_GAMING: nvmlGridLicenseFeatureCode_t = nvmlGridLicenseFeatureCode_t(
+ 3,
+ );
+}
+impl nvmlGridLicenseFeatureCode_t {
+ ///!< Compute
+ pub const NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE: nvmlGridLicenseFeatureCode_t = nvmlGridLicenseFeatureCode_t(
+ 4,
+ );
+}
+#[repr(transparent)]
+/// vGPU software licensable features
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGridLicenseFeatureCode_t(pub ::core::ffi::c_uint);
+impl nvmlVgpuCapability_enum {
+ ///!< P2P over NVLink is supported
+ pub const NVML_VGPU_CAP_NVLINK_P2P: nvmlVgpuCapability_enum = nvmlVgpuCapability_enum(
+ 0,
+ );
+}
+impl nvmlVgpuCapability_enum {
+ ///!< GPUDirect capability is supported
+ pub const NVML_VGPU_CAP_GPUDIRECT: nvmlVgpuCapability_enum = nvmlVgpuCapability_enum(
+ 1,
+ );
+}
+impl nvmlVgpuCapability_enum {
+ ///!< vGPU profile cannot be mixed with other vGPU profiles in same VM
+ pub const NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE: nvmlVgpuCapability_enum = nvmlVgpuCapability_enum(
+ 2,
+ );
+}
+impl nvmlVgpuCapability_enum {
+ ///!< vGPU profile cannot run on a GPU alongside other profiles of different type
+ pub const NVML_VGPU_CAP_EXCLUSIVE_TYPE: nvmlVgpuCapability_enum = nvmlVgpuCapability_enum(
+ 3,
+ );
+}
+impl nvmlVgpuCapability_enum {
+ ///!< vGPU profile cannot run on a GPU alongside other profiles of different size
+ pub const NVML_VGPU_CAP_EXCLUSIVE_SIZE: nvmlVgpuCapability_enum = nvmlVgpuCapability_enum(
+ 4,
+ );
+}
+impl nvmlVgpuCapability_enum {
+ pub const NVML_VGPU_CAP_COUNT: nvmlVgpuCapability_enum = nvmlVgpuCapability_enum(5);
+}
+#[repr(transparent)]
+/// vGPU queryable capabilities
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuCapability_enum(pub ::core::ffi::c_uint);
+/// vGPU queryable capabilities
+pub use self::nvmlVgpuCapability_enum as nvmlVgpuCapability_t;
+impl nvmlVgpuDriverCapability_enum {
+ ///!< Supports mixing of different vGPU profiles within one guest VM
+ pub const NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU: nvmlVgpuDriverCapability_enum = nvmlVgpuDriverCapability_enum(
+ 0,
+ );
+}
+impl nvmlVgpuDriverCapability_enum {
+ pub const NVML_VGPU_DRIVER_CAP_COUNT: nvmlVgpuDriverCapability_enum = nvmlVgpuDriverCapability_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// vGPU driver queryable capabilities
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuDriverCapability_enum(pub ::core::ffi::c_uint);
+/// vGPU driver queryable capabilities
+pub use self::nvmlVgpuDriverCapability_enum as nvmlVgpuDriverCapability_t;
+impl nvmlDeviceVgpuCapability_enum {
+ ///!< Query if the fractional vGPU profiles on this GPU can be used in multi-vGPU configurations
+ pub const NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU: nvmlDeviceVgpuCapability_enum = nvmlDeviceVgpuCapability_enum(
+ 0,
+ );
+}
+impl nvmlDeviceVgpuCapability_enum {
+ ///!< Query if the GPU support concurrent execution of timesliced vGPU profiles of differing types
+ pub const NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES: nvmlDeviceVgpuCapability_enum = nvmlDeviceVgpuCapability_enum(
+ 1,
+ );
+}
+impl nvmlDeviceVgpuCapability_enum {
+ ///!< Query if the GPU support concurrent execution of timesliced vGPU profiles of differing framebuffer sizes
+ pub const NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES: nvmlDeviceVgpuCapability_enum = nvmlDeviceVgpuCapability_enum(
+ 2,
+ );
+}
+impl nvmlDeviceVgpuCapability_enum {
+ ///!< Query the GPU's read_device_buffer expected bandwidth capacity in megabytes per second
+ pub const NVML_DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW: nvmlDeviceVgpuCapability_enum = nvmlDeviceVgpuCapability_enum(
+ 3,
+ );
+}
+impl nvmlDeviceVgpuCapability_enum {
+ ///!< Query the GPU's write_device_buffer expected bandwidth capacity in megabytes per second
+ pub const NVML_DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW: nvmlDeviceVgpuCapability_enum = nvmlDeviceVgpuCapability_enum(
+ 4,
+ );
+}
+impl nvmlDeviceVgpuCapability_enum {
+ ///!< Query if vGPU profiles on the GPU supports migration data streaming
+ pub const NVML_DEVICE_VGPU_CAP_DEVICE_STREAMING: nvmlDeviceVgpuCapability_enum = nvmlDeviceVgpuCapability_enum(
+ 5,
+ );
+}
+impl nvmlDeviceVgpuCapability_enum {
+ ///!< Set/Get support for mini-quarter vGPU profiles
+ pub const NVML_DEVICE_VGPU_CAP_MINI_QUARTER_GPU: nvmlDeviceVgpuCapability_enum = nvmlDeviceVgpuCapability_enum(
+ 6,
+ );
+}
+impl nvmlDeviceVgpuCapability_enum {
+ ///!< Set/Get support for compute media engine vGPU profiles
+ pub const NVML_DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU: nvmlDeviceVgpuCapability_enum = nvmlDeviceVgpuCapability_enum(
+ 7,
+ );
+}
+impl nvmlDeviceVgpuCapability_enum {
+ pub const NVML_DEVICE_VGPU_CAP_COUNT: nvmlDeviceVgpuCapability_enum = nvmlDeviceVgpuCapability_enum(
+ 8,
+ );
+}
+#[repr(transparent)]
+/// Device vGPU queryable capabilities
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlDeviceVgpuCapability_enum(pub ::core::ffi::c_uint);
+/// Device vGPU queryable capabilities
+pub use self::nvmlDeviceVgpuCapability_enum as nvmlDeviceVgpuCapability_t;
+#[doc = "/\n/** @defgroup nvmlVgpuStructs vGPU Structs\n @{\n/\n/"]
+pub type nvmlVgpuTypeId_t = ::core::ffi::c_uint;
+pub type nvmlVgpuInstance_t = ::core::ffi::c_uint;
+/// Structure to store the vGPU heterogeneous mode of device -- version 1
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuHeterogeneousMode_v1_t {
+ ///!< The version number of this struct
+ pub version: ::core::ffi::c_uint,
+ ///!< The vGPU heterogeneous mode
+ pub mode: ::core::ffi::c_uint,
+}
+/// Structure to store the vGPU heterogeneous mode of device -- version 1
+pub type nvmlVgpuHeterogeneousMode_t = nvmlVgpuHeterogeneousMode_v1_t;
+/// Structure to store the placement ID of vGPU instance -- version 1
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuPlacementId_v1_t {
+ ///!< The version number of this struct
+ pub version: ::core::ffi::c_uint,
+ ///!< Placement ID of the active vGPU instance
+ pub placementId: ::core::ffi::c_uint,
+}
+/// Structure to store the placement ID of vGPU instance -- version 1
+pub type nvmlVgpuPlacementId_t = nvmlVgpuPlacementId_v1_t;
+/// Structure to store the list of vGPU placements -- version 1
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuPlacementList_v1_t {
+ ///!< The version number of this struct
+ pub version: ::core::ffi::c_uint,
+ ///!< The number of slots occupied by the vGPU type
+ pub placementSize: ::core::ffi::c_uint,
+ ///!< Count of placement IDs fetched
+ pub count: ::core::ffi::c_uint,
+ ///!< Placement IDs for the vGPU type
+ pub placementIds: *mut ::core::ffi::c_uint,
+}
+/// Structure to store the list of vGPU placements -- version 1
+pub type nvmlVgpuPlacementList_t = nvmlVgpuPlacementList_v1_t;
+/// Structure to store Utilization Value and vgpuInstance
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct nvmlVgpuInstanceUtilizationSample_st {
+ ///!< vGPU Instance
+ pub vgpuInstance: nvmlVgpuInstance_t,
+ ///!< CPU Timestamp in microseconds
+ pub timeStamp: ::core::ffi::c_ulonglong,
+ ///!< SM (3D/Compute) Util Value
+ pub smUtil: nvmlValue_t,
+ ///!< Frame Buffer Memory Util Value
+ pub memUtil: nvmlValue_t,
+ ///!< Encoder Util Value
+ pub encUtil: nvmlValue_t,
+ ///!< Decoder Util Value
+ pub decUtil: nvmlValue_t,
+}
+/// Structure to store Utilization Value and vgpuInstance
+pub type nvmlVgpuInstanceUtilizationSample_t = nvmlVgpuInstanceUtilizationSample_st;
+/// Structure to store Utilization Value and vgpuInstance Info -- Version 1
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct nvmlVgpuInstanceUtilizationInfo_v1_t {
+ ///!< CPU Timestamp in microseconds
+ pub timeStamp: ::core::ffi::c_ulonglong,
+ ///!< vGPU Instance
+ pub vgpuInstance: nvmlVgpuInstance_t,
+ ///!< SM (3D/Compute) Util Value
+ pub smUtil: nvmlValue_t,
+ ///!< Frame Buffer Memory Util Value
+ pub memUtil: nvmlValue_t,
+ ///!< Encoder Util Value
+ pub encUtil: nvmlValue_t,
+ ///!< Decoder Util Value
+ pub decUtil: nvmlValue_t,
+ ///!< Jpeg Util Value
+ pub jpgUtil: nvmlValue_t,
+ ///!< Ofa Util Value
+ pub ofaUtil: nvmlValue_t,
+}
+/// Structure to store recent utilization for vGPU instances running on a device -- version 1
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuInstancesUtilizationInfo_v1_t {
+ ///!< The version number of this struct
+ pub version: ::core::ffi::c_uint,
+ ///!< Hold the type of returned sample values
+ pub sampleValType: nvmlValueType_t,
+ ///!< Hold the number of vGPU instances
+ pub vgpuInstanceCount: ::core::ffi::c_uint,
+ ///!< Return only samples with timestamp greater than lastSeenTimeStamp
+ pub lastSeenTimeStamp: ::core::ffi::c_ulonglong,
+ ///!< The array (allocated by caller) in which vGPU utilization are returned
+ pub vgpuUtilArray: *mut nvmlVgpuInstanceUtilizationInfo_v1_t,
+}
+/// Structure to store recent utilization for vGPU instances running on a device -- version 1
+pub type nvmlVgpuInstancesUtilizationInfo_t = nvmlVgpuInstancesUtilizationInfo_v1_t;
+/// Structure to store Utilization Value, vgpuInstance and subprocess information
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuProcessUtilizationSample_st {
+ ///!< vGPU Instance
+ pub vgpuInstance: nvmlVgpuInstance_t,
+ ///!< PID of process running within the vGPU VM
+ pub pid: ::core::ffi::c_uint,
+ ///!< Name of process running within the vGPU VM
+ pub processName: [::core::ffi::c_char; 64usize],
+ ///!< CPU Timestamp in microseconds
+ pub timeStamp: ::core::ffi::c_ulonglong,
+ ///!< SM (3D/Compute) Util Value
+ pub smUtil: ::core::ffi::c_uint,
+ ///!< Frame Buffer Memory Util Value
+ pub memUtil: ::core::ffi::c_uint,
+ ///!< Encoder Util Value
+ pub encUtil: ::core::ffi::c_uint,
+ ///!< Decoder Util Value
+ pub decUtil: ::core::ffi::c_uint,
+}
+/// Structure to store Utilization Value, vgpuInstance and subprocess information
+pub type nvmlVgpuProcessUtilizationSample_t = nvmlVgpuProcessUtilizationSample_st;
+/// Structure to store Utilization Value, vgpuInstance and subprocess information for process running on vGPU instance -- version 1
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuProcessUtilizationInfo_v1_t {
+ ///!< Name of process running within the vGPU VM
+ pub processName: [::core::ffi::c_char; 64usize],
+ ///!< CPU Timestamp in microseconds
+ pub timeStamp: ::core::ffi::c_ulonglong,
+ ///!< vGPU Instance
+ pub vgpuInstance: nvmlVgpuInstance_t,
+ ///!< PID of process running within the vGPU VM
+ pub pid: ::core::ffi::c_uint,
+ ///!< SM (3D/Compute) Util Value
+ pub smUtil: ::core::ffi::c_uint,
+ ///!< Frame Buffer Memory Util Value
+ pub memUtil: ::core::ffi::c_uint,
+ ///!< Encoder Util Value
+ pub encUtil: ::core::ffi::c_uint,
+ ///!< Decoder Util Value
+ pub decUtil: ::core::ffi::c_uint,
+ ///!< Jpeg Util Value
+ pub jpgUtil: ::core::ffi::c_uint,
+ ///!< Ofa Util Value
+ pub ofaUtil: ::core::ffi::c_uint,
+}
+/// Structure to store recent utilization, vgpuInstance and subprocess information for processes running on vGPU instances active on a device -- version 1
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuProcessesUtilizationInfo_v1_t {
+ ///!< The version number of this struct
+ pub version: ::core::ffi::c_uint,
+ ///!< Hold the number of processes running on vGPU instances
+ pub vgpuProcessCount: ::core::ffi::c_uint,
+ ///!< Return only samples with timestamp greater than lastSeenTimeStamp
+ pub lastSeenTimeStamp: ::core::ffi::c_ulonglong,
+ ///!< The array (allocated by caller) in which utilization of processes running on vGPU instances are returned
+ pub vgpuProcUtilArray: *mut nvmlVgpuProcessUtilizationInfo_v1_t,
+}
+/// Structure to store recent utilization, vgpuInstance and subprocess information for processes running on vGPU instances active on a device -- version 1
+pub type nvmlVgpuProcessesUtilizationInfo_t = nvmlVgpuProcessesUtilizationInfo_v1_t;
+/// Union to represent the vGPU Scheduler Parameters
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union nvmlVgpuSchedulerParams_t {
+ pub vgpuSchedDataWithARR: nvmlVgpuSchedulerParams_t__bindgen_ty_1,
+ pub vgpuSchedData: nvmlVgpuSchedulerParams_t__bindgen_ty_2,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuSchedulerParams_t__bindgen_ty_1 {
+ ///!< Average factor in compensating the timeslice for Adaptive Round Robin mode
+ pub avgFactor: ::core::ffi::c_uint,
+ ///!< The timeslice in ns for each software run list as configured, or the default value otherwise
+ pub timeslice: ::core::ffi::c_uint,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuSchedulerParams_t__bindgen_ty_2 {
+ ///!< The timeslice in ns for each software run list as configured, or the default value otherwise
+ pub timeslice: ::core::ffi::c_uint,
+}
+/// Structure to store the state and logs of a software runlist
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuSchedulerLogEntries_st {
+ ///!< Timestamp in ns when this software runlist was preeempted
+ pub timestamp: ::core::ffi::c_ulonglong,
+ ///!< Total time in ns this software runlist has run
+ pub timeRunTotal: ::core::ffi::c_ulonglong,
+ ///!< Time in ns this software runlist ran before preemption
+ pub timeRun: ::core::ffi::c_ulonglong,
+ ///!< Software runlist Id
+ pub swRunlistId: ::core::ffi::c_uint,
+ ///!< The actual timeslice after deduction
+ pub targetTimeSlice: ::core::ffi::c_ulonglong,
+ ///!< Preemption time in ns for this SW runlist
+ pub cumulativePreemptionTime: ::core::ffi::c_ulonglong,
+}
+/// Structure to store the state and logs of a software runlist
+pub type nvmlVgpuSchedulerLogEntry_t = nvmlVgpuSchedulerLogEntries_st;
+/// Structure to store a vGPU software scheduler log
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct nvmlVgpuSchedulerLog_st {
+ ///!< Engine whose software runlist log entries are fetched
+ pub engineId: ::core::ffi::c_uint,
+ ///!< Scheduler policy
+ pub schedulerPolicy: ::core::ffi::c_uint,
+ ///!< Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*.
+ pub arrMode: ::core::ffi::c_uint,
+ pub schedulerParams: nvmlVgpuSchedulerParams_t,
+ ///!< Count of log entries fetched
+ pub entriesCount: ::core::ffi::c_uint,
+ pub logEntries: [nvmlVgpuSchedulerLogEntry_t; 200usize],
+}
+/// Structure to store a vGPU software scheduler log
+pub type nvmlVgpuSchedulerLog_t = nvmlVgpuSchedulerLog_st;
+/// Structure to store the vGPU scheduler state
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct nvmlVgpuSchedulerGetState_st {
+ ///!< Scheduler policy
+ pub schedulerPolicy: ::core::ffi::c_uint,
+ ///!< Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*.
+ pub arrMode: ::core::ffi::c_uint,
+ pub schedulerParams: nvmlVgpuSchedulerParams_t,
+}
+/// Structure to store the vGPU scheduler state
+pub type nvmlVgpuSchedulerGetState_t = nvmlVgpuSchedulerGetState_st;
+/// Union to represent the vGPU Scheduler set Parameters
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union nvmlVgpuSchedulerSetParams_t {
+ pub vgpuSchedDataWithARR: nvmlVgpuSchedulerSetParams_t__bindgen_ty_1,
+ pub vgpuSchedData: nvmlVgpuSchedulerSetParams_t__bindgen_ty_2,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuSchedulerSetParams_t__bindgen_ty_1 {
+ ///!< Average factor in compensating the timeslice for Adaptive Round Robin mode
+ pub avgFactor: ::core::ffi::c_uint,
+ ///!< Frequency for Adaptive Round Robin mode
+ pub frequency: ::core::ffi::c_uint,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuSchedulerSetParams_t__bindgen_ty_2 {
+ ///!< The timeslice in ns(Nanoseconds) for each software run list as configured, or the default value otherwise
+ pub timeslice: ::core::ffi::c_uint,
+}
+/// Structure to set the vGPU scheduler state
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct nvmlVgpuSchedulerSetState_st {
+ ///!< Scheduler policy
+ pub schedulerPolicy: ::core::ffi::c_uint,
+ ///!< Adaptive Round Robin scheduler
+ pub enableARRMode: ::core::ffi::c_uint,
+ pub schedulerParams: nvmlVgpuSchedulerSetParams_t,
+}
+/// Structure to set the vGPU scheduler state
+pub type nvmlVgpuSchedulerSetState_t = nvmlVgpuSchedulerSetState_st;
+/// Structure to store the vGPU scheduler capabilities
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuSchedulerCapabilities_st {
+ ///!< List the supported vGPU schedulers on the device
+ pub supportedSchedulers: [::core::ffi::c_uint; 3usize],
+ ///!< Maximum timeslice value in ns
+ pub maxTimeslice: ::core::ffi::c_uint,
+ ///!< Minimum timeslice value in ns
+ pub minTimeslice: ::core::ffi::c_uint,
+ ///!< Flag to check Adaptive Round Robin mode enabled/disabled.
+ pub isArrModeSupported: ::core::ffi::c_uint,
+ ///!< Maximum frequency for Adaptive Round Robin mode
+ pub maxFrequencyForARR: ::core::ffi::c_uint,
+ ///!< Minimum frequency for Adaptive Round Robin mode
+ pub minFrequencyForARR: ::core::ffi::c_uint,
+ ///!< Maximum averaging factor for Adaptive Round Robin mode
+ pub maxAvgFactorForARR: ::core::ffi::c_uint,
+ ///!< Minimum averaging factor for Adaptive Round Robin mode
+ pub minAvgFactorForARR: ::core::ffi::c_uint,
+}
+/// Structure to store the vGPU scheduler capabilities
+pub type nvmlVgpuSchedulerCapabilities_t = nvmlVgpuSchedulerCapabilities_st;
+/// Structure to store the vGPU license expiry details
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuLicenseExpiry_st {
+ ///!< Year of license expiry
+ pub year: ::core::ffi::c_uint,
+ ///!< Month of license expiry
+ pub month: ::core::ffi::c_ushort,
+ ///!< Day of license expiry
+ pub day: ::core::ffi::c_ushort,
+ ///!< Hour of license expiry
+ pub hour: ::core::ffi::c_ushort,
+ ///!< Minutes of license expiry
+ pub min: ::core::ffi::c_ushort,
+ ///!< Seconds of license expiry
+ pub sec: ::core::ffi::c_ushort,
+ ///!< License expiry status
+ pub status: ::core::ffi::c_uchar,
+}
+/// Structure to store the vGPU license expiry details
+pub type nvmlVgpuLicenseExpiry_t = nvmlVgpuLicenseExpiry_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuLicenseInfo_st {
+ ///!< License status
+ pub isLicensed: ::core::ffi::c_uchar,
+ ///!< License expiry information
+ pub licenseExpiry: nvmlVgpuLicenseExpiry_t,
+ ///!< Current license state
+ pub currentState: ::core::ffi::c_uint,
+}
+pub type nvmlVgpuLicenseInfo_t = nvmlVgpuLicenseInfo_st;
+/// Structure to store utilization value and process Id
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlProcessUtilizationSample_st {
+ ///!< PID of process
+ pub pid: ::core::ffi::c_uint,
+ ///!< CPU Timestamp in microseconds
+ pub timeStamp: ::core::ffi::c_ulonglong,
+ ///!< SM (3D/Compute) Util Value
+ pub smUtil: ::core::ffi::c_uint,
+ ///!< Frame Buffer Memory Util Value
+ pub memUtil: ::core::ffi::c_uint,
+ ///!< Encoder Util Value
+ pub encUtil: ::core::ffi::c_uint,
+ ///!< Decoder Util Value
+ pub decUtil: ::core::ffi::c_uint,
+}
+/// Structure to store utilization value and process Id
+pub type nvmlProcessUtilizationSample_t = nvmlProcessUtilizationSample_st;
+/// Structure to store utilization value and process Id -- version 1
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlProcessUtilizationInfo_v1_t {
+ ///!< CPU Timestamp in microseconds
+ pub timeStamp: ::core::ffi::c_ulonglong,
+ ///!< PID of process
+ pub pid: ::core::ffi::c_uint,
+ ///!< SM (3D/Compute) Util Value
+ pub smUtil: ::core::ffi::c_uint,
+ ///!< Frame Buffer Memory Util Value
+ pub memUtil: ::core::ffi::c_uint,
+ ///!< Encoder Util Value
+ pub encUtil: ::core::ffi::c_uint,
+ ///!< Decoder Util Value
+ pub decUtil: ::core::ffi::c_uint,
+ ///!< Jpeg Util Value
+ pub jpgUtil: ::core::ffi::c_uint,
+ ///!< Ofa Util Value
+ pub ofaUtil: ::core::ffi::c_uint,
+}
+/// Structure to store utilization and process ID for each running process -- version 1
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlProcessesUtilizationInfo_v1_t {
+ ///!< The version number of this struct
+ pub version: ::core::ffi::c_uint,
+ ///!< Caller-supplied array size, and returns number of processes running
+ pub processSamplesCount: ::core::ffi::c_uint,
+ ///!< Return only samples with timestamp greater than lastSeenTimeStamp
+ pub lastSeenTimeStamp: ::core::ffi::c_ulonglong,
+ ///!< The array (allocated by caller) of the utilization of GPU SM, framebuffer, video encoder, video decoder, JPEG, and OFA
+ pub procUtilArray: *mut nvmlProcessUtilizationInfo_v1_t,
+}
+/// Structure to store utilization and process ID for each running process -- version 1
+pub type nvmlProcessesUtilizationInfo_t = nvmlProcessesUtilizationInfo_v1_t;
+/// Structure to store license expiry date and time values
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGridLicenseExpiry_st {
+ ///!< Year value of license expiry
+ pub year: ::core::ffi::c_uint,
+ ///!< Month value of license expiry
+ pub month: ::core::ffi::c_ushort,
+ ///!< Day value of license expiry
+ pub day: ::core::ffi::c_ushort,
+ ///!< Hour value of license expiry
+ pub hour: ::core::ffi::c_ushort,
+ ///!< Minutes value of license expiry
+ pub min: ::core::ffi::c_ushort,
+ ///!< Seconds value of license expiry
+ pub sec: ::core::ffi::c_ushort,
+ ///!< License expiry status
+ pub status: ::core::ffi::c_uchar,
+}
+/// Structure to store license expiry date and time values
+pub type nvmlGridLicenseExpiry_t = nvmlGridLicenseExpiry_st;
+/// Structure containing vGPU software licensable feature information
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGridLicensableFeature_st {
+ ///!< Licensed feature code
+ pub featureCode: nvmlGridLicenseFeatureCode_t,
+ ///!< Non-zero if feature is currently licensed, otherwise zero
+ pub featureState: ::core::ffi::c_uint,
+ ///!< Deprecated.
+ pub licenseInfo: [::core::ffi::c_char; 128usize],
+ ///!< Product name of feature
+ pub productName: [::core::ffi::c_char; 128usize],
+ ///!< Non-zero if feature is enabled, otherwise zero
+ pub featureEnabled: ::core::ffi::c_uint,
+ ///!< License expiry structure containing date and time
+ pub licenseExpiry: nvmlGridLicenseExpiry_t,
+}
+/// Structure containing vGPU software licensable feature information
+pub type nvmlGridLicensableFeature_t = nvmlGridLicensableFeature_st;
+/// Structure to store vGPU software licensable features
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGridLicensableFeatures_st {
+ ///!< Non-zero if vGPU Software Licensing is supported on the system, otherwise zero
+ pub isGridLicenseSupported: ::core::ffi::c_int,
+ ///!< Entries returned in \a gridLicensableFeatures array
+ pub licensableFeaturesCount: ::core::ffi::c_uint,
+ ///!< Array of vGPU software licensable features.
+ pub gridLicensableFeatures: [nvmlGridLicensableFeature_t; 3usize],
+}
+/// Structure to store vGPU software licensable features
+pub type nvmlGridLicensableFeatures_t = nvmlGridLicensableFeatures_st;
+/// Structure to store SRAM uncorrectable error counters
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlEccSramErrorStatus_v1_t {
+ ///!< the API version number
+ pub version: ::core::ffi::c_uint,
+ ///!< aggregate uncorrectable parity error count
+ pub aggregateUncParity: ::core::ffi::c_ulonglong,
+ ///!< aggregate uncorrectable SEC-DED error count
+ pub aggregateUncSecDed: ::core::ffi::c_ulonglong,
+ ///!< aggregate correctable error count
+ pub aggregateCor: ::core::ffi::c_ulonglong,
+ ///!< volatile uncorrectable parity error count
+ pub volatileUncParity: ::core::ffi::c_ulonglong,
+ ///!< volatile uncorrectable SEC-DED error count
+ pub volatileUncSecDed: ::core::ffi::c_ulonglong,
+ ///!< volatile correctable error count
+ pub volatileCor: ::core::ffi::c_ulonglong,
+ ///!< aggregate uncorrectable error count for L2 cache bucket
+ pub aggregateUncBucketL2: ::core::ffi::c_ulonglong,
+ ///!< aggregate uncorrectable error count for SM bucket
+ pub aggregateUncBucketSm: ::core::ffi::c_ulonglong,
+ ///!< aggregate uncorrectable error count for PCIE bucket
+ pub aggregateUncBucketPcie: ::core::ffi::c_ulonglong,
+ ///!< aggregate uncorrectable error count for Microcontroller bucket
+ pub aggregateUncBucketMcu: ::core::ffi::c_ulonglong,
+ ///!< aggregate uncorrectable error count for Other bucket
+ pub aggregateUncBucketOther: ::core::ffi::c_ulonglong,
+ ///!< if the error threshold of field diag is exceeded
+ pub bThresholdExceeded: ::core::ffi::c_uint,
+}
+/// Structure to store SRAM uncorrectable error counters
+pub type nvmlEccSramErrorStatus_t = nvmlEccSramErrorStatus_v1_t;
+pub type nvmlDeviceArchitecture_t = ::core::ffi::c_uint;
+pub type nvmlBusType_t = ::core::ffi::c_uint;
+pub type nvmlFanControlPolicy_t = ::core::ffi::c_uint;
+pub type nvmlPowerSource_t = ::core::ffi::c_uint;
+impl nvmlGpuUtilizationDomainId_t {
+ ///!< Graphics engine domain
+ pub const NVML_GPU_UTILIZATION_DOMAIN_GPU: nvmlGpuUtilizationDomainId_t = nvmlGpuUtilizationDomainId_t(
+ 0,
+ );
+}
+impl nvmlGpuUtilizationDomainId_t {
+ ///!< Frame buffer domain
+ pub const NVML_GPU_UTILIZATION_DOMAIN_FB: nvmlGpuUtilizationDomainId_t = nvmlGpuUtilizationDomainId_t(
+ 1,
+ );
+}
+impl nvmlGpuUtilizationDomainId_t {
+ ///!< Video engine domain
+ pub const NVML_GPU_UTILIZATION_DOMAIN_VID: nvmlGpuUtilizationDomainId_t = nvmlGpuUtilizationDomainId_t(
+ 2,
+ );
+}
+impl nvmlGpuUtilizationDomainId_t {
+ ///!< Bus interface domain
+ pub const NVML_GPU_UTILIZATION_DOMAIN_BUS: nvmlGpuUtilizationDomainId_t = nvmlGpuUtilizationDomainId_t(
+ 3,
+ );
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuUtilizationDomainId_t(pub ::core::ffi::c_uint);
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuDynamicPstatesInfo_st {
+ ///!< Reserved for future use
+ pub flags: ::core::ffi::c_uint,
+ pub utilization: [nvmlGpuDynamicPstatesInfo_st__bindgen_ty_1; 8usize],
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuDynamicPstatesInfo_st__bindgen_ty_1 {
+ ///!< Set if this utilization domain is present on this GPU
+ pub bIsPresent: ::core::ffi::c_uint,
+ ///!< Percentage of time where the domain is considered busy in the last 1-second interval
+ pub percentage: ::core::ffi::c_uint,
+ ///!< Utilization threshold that can trigger a perf-increasing P-State change when crossed
+ pub incThreshold: ::core::ffi::c_uint,
+ ///!< Utilization threshold that can trigger a perf-decreasing P-State change when crossed
+ pub decThreshold: ::core::ffi::c_uint,
+}
+pub type nvmlGpuDynamicPstatesInfo_t = nvmlGpuDynamicPstatesInfo_st;
+/// Information for a Field Value Sample
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct nvmlFieldValue_st {
+ ///!< ID of the NVML field to retrieve. This must be set before any call that uses this struct. See the constants starting with NVML_FI_ above.
+ pub fieldId: ::core::ffi::c_uint,
+ ///!< Scope ID can represent data used by NVML depending on fieldId's context. For example, for NVLink throughput counter data, scopeId can represent linkId.
+ pub scopeId: ::core::ffi::c_uint,
+ ///!< CPU Timestamp of this value in microseconds since 1970
+ pub timestamp: ::core::ffi::c_longlong,
+ ///!< How long this field value took to update (in usec) within NVML. This may be averaged across several fields that are serviced by the same driver call.
+ pub latencyUsec: ::core::ffi::c_longlong,
+ ///!< Type of the value stored in value
+ pub valueType: nvmlValueType_t,
+ ///!< Return code for retrieving this value. This must be checked before looking at value, as value is undefined if nvmlReturn != NVML_SUCCESS
+ pub nvmlReturn: nvmlReturn_t,
+ ///!< Value for this field. This is only valid if nvmlReturn == NVML_SUCCESS
+ pub value: nvmlValue_t,
+}
+/// Information for a Field Value Sample
+pub type nvmlFieldValue_t = nvmlFieldValue_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct nvmlUnit_st {
+ _unused: [u8; 0],
+}
+#[doc = "/\n/** @defgroup nvmlUnitStructs Unit Structs\n @{\n/\n/"]
+pub type nvmlUnit_t = *mut nvmlUnit_st;
+/// Description of HWBC entry
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlHwbcEntry_st {
+ pub hwbcId: ::core::ffi::c_uint,
+ pub firmwareVersion: [::core::ffi::c_char; 32usize],
+}
+/// Description of HWBC entry
+pub type nvmlHwbcEntry_t = nvmlHwbcEntry_st;
+impl nvmlFanState_enum {
+ ///!< Fan is working properly
+ pub const NVML_FAN_NORMAL: nvmlFanState_enum = nvmlFanState_enum(0);
+}
+impl nvmlFanState_enum {
+ ///!< Fan has failed
+ pub const NVML_FAN_FAILED: nvmlFanState_enum = nvmlFanState_enum(1);
+}
+#[repr(transparent)]
+/// Fan state enum.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlFanState_enum(pub ::core::ffi::c_uint);
+/// Fan state enum.
+pub use self::nvmlFanState_enum as nvmlFanState_t;
+impl nvmlLedColor_enum {
+ ///!< GREEN, indicates good health
+ pub const NVML_LED_COLOR_GREEN: nvmlLedColor_enum = nvmlLedColor_enum(0);
+}
+impl nvmlLedColor_enum {
+ ///!< AMBER, indicates problem
+ pub const NVML_LED_COLOR_AMBER: nvmlLedColor_enum = nvmlLedColor_enum(1);
+}
+#[repr(transparent)]
+/// Led color enum.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlLedColor_enum(pub ::core::ffi::c_uint);
+/// Led color enum.
+pub use self::nvmlLedColor_enum as nvmlLedColor_t;
+/// LED states for an S-class unit.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlLedState_st {
+ ///!< If amber, a text description of the cause
+ pub cause: [::core::ffi::c_char; 256usize],
+ ///!< GREEN or AMBER
+ pub color: nvmlLedColor_t,
+}
+/// LED states for an S-class unit.
+pub type nvmlLedState_t = nvmlLedState_st;
+/// Static S-class unit info.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlUnitInfo_st {
+ ///!< Product name
+ pub name: [::core::ffi::c_char; 96usize],
+ ///!< Product identifier
+ pub id: [::core::ffi::c_char; 96usize],
+ ///!< Product serial number
+ pub serial: [::core::ffi::c_char; 96usize],
+ ///!< Firmware version
+ pub firmwareVersion: [::core::ffi::c_char; 96usize],
+}
+/// Static S-class unit info.
+pub type nvmlUnitInfo_t = nvmlUnitInfo_st;
+/** Power usage information for an S-class unit.
+ The power supply state is a human readable string that equals "Normal" or contains
+ a combination of "Abnormal" plus one or more of the following:
+
+ - High voltage
+ - Fan failure
+ - Heatsink temperature
+ - Current limit
+ - Voltage below UV alarm threshold
+ - Low-voltage
+ - SI2C remote off command
+ - MOD_DISABLE input
+ - Short pin transition*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlPSUInfo_st {
+ ///!< The power supply state
+ pub state: [::core::ffi::c_char; 256usize],
+ ///!< PSU current (A)
+ pub current: ::core::ffi::c_uint,
+ ///!< PSU voltage (V)
+ pub voltage: ::core::ffi::c_uint,
+ ///!< PSU power draw (W)
+ pub power: ::core::ffi::c_uint,
+}
+/** Power usage information for an S-class unit.
+ The power supply state is a human readable string that equals "Normal" or contains
+ a combination of "Abnormal" plus one or more of the following:
+
+ - High voltage
+ - Fan failure
+ - Heatsink temperature
+ - Current limit
+ - Voltage below UV alarm threshold
+ - Low-voltage
+ - SI2C remote off command
+ - MOD_DISABLE input
+ - Short pin transition*/
+pub type nvmlPSUInfo_t = nvmlPSUInfo_st;
+/// Fan speed reading for a single fan in an S-class unit.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlUnitFanInfo_st {
+ ///!< Fan speed (RPM)
+ pub speed: ::core::ffi::c_uint,
+ ///!< Flag that indicates whether fan is working properly
+ pub state: nvmlFanState_t,
+}
+/// Fan speed reading for a single fan in an S-class unit.
+pub type nvmlUnitFanInfo_t = nvmlUnitFanInfo_st;
+/// Fan speed readings for an entire S-class unit.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlUnitFanSpeeds_st {
+ ///!< Fan speed data for each fan
+ pub fans: [nvmlUnitFanInfo_t; 24usize],
+ ///!< Number of fans in unit
+ pub count: ::core::ffi::c_uint,
+}
+/// Fan speed readings for an entire S-class unit.
+pub type nvmlUnitFanSpeeds_t = nvmlUnitFanSpeeds_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct nvmlEventSet_st {
+ _unused: [u8; 0],
+}
+/// Handle to an event set
+pub type nvmlEventSet_t = *mut nvmlEventSet_st;
+/// Information about occurred event
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlEventData_st {
+ ///!< Specific device where the event occurred
+ pub device: nvmlDevice_t,
+ ///!< Information about what specific event occurred
+ pub eventType: ::core::ffi::c_ulonglong,
+ ///!< Stores XID error for the device in the event of nvmlEventTypeXidCriticalError,
+ pub eventData: ::core::ffi::c_ulonglong,
+ ///!< If MIG is enabled and nvmlEventTypeXidCriticalError event is attributable to a GPU
+ pub gpuInstanceId: ::core::ffi::c_uint,
+ ///!< If MIG is enabled and nvmlEventTypeXidCriticalError event is attributable to a
+ pub computeInstanceId: ::core::ffi::c_uint,
+}
+/// Information about occurred event
+pub type nvmlEventData_t = nvmlEventData_st;
+/// Describes accounting statistics of a process.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlAccountingStats_st {
+ ///!< Percent of time over the process's lifetime during which one or more kernels was executing on the GPU.
+ pub gpuUtilization: ::core::ffi::c_uint,
+ ///!< Percent of time over the process's lifetime during which global (device) memory was being read or written.
+ pub memoryUtilization: ::core::ffi::c_uint,
+ ///!< Maximum total memory in bytes that was ever allocated by the process.
+ pub maxMemoryUsage: ::core::ffi::c_ulonglong,
+ /**!< Amount of time in ms during which the compute context was active. The time is reported as 0 if
+!< the process is not terminated*/
+ pub time: ::core::ffi::c_ulonglong,
+ ///!< CPU Timestamp in usec representing start time for the process
+ pub startTime: ::core::ffi::c_ulonglong,
+ ///!< Flag to represent if the process is running (1 for running, 0 for terminated)
+ pub isRunning: ::core::ffi::c_uint,
+ ///!< Reserved for future use
+ pub reserved: [::core::ffi::c_uint; 5usize],
+}
+/// Describes accounting statistics of a process.
+pub type nvmlAccountingStats_t = nvmlAccountingStats_st;
+impl nvmlEncoderQueryType_enum {
+ ///!< H264 encoder
+ pub const NVML_ENCODER_QUERY_H264: nvmlEncoderQueryType_enum = nvmlEncoderQueryType_enum(
+ 0,
+ );
+}
+impl nvmlEncoderQueryType_enum {
+ ///!< HEVC encoder
+ pub const NVML_ENCODER_QUERY_HEVC: nvmlEncoderQueryType_enum = nvmlEncoderQueryType_enum(
+ 1,
+ );
+}
+impl nvmlEncoderQueryType_enum {
+ ///!< AV1 encoder
+ pub const NVML_ENCODER_QUERY_AV1: nvmlEncoderQueryType_enum = nvmlEncoderQueryType_enum(
+ 2,
+ );
+}
+impl nvmlEncoderQueryType_enum {
+ ///!< Unknown encoder
+ pub const NVML_ENCODER_QUERY_UNKNOWN: nvmlEncoderQueryType_enum = nvmlEncoderQueryType_enum(
+ 255,
+ );
+}
+#[repr(transparent)]
+/// Represents type of encoder for capacity can be queried
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlEncoderQueryType_enum(pub ::core::ffi::c_uint);
+/// Represents type of encoder for capacity can be queried
+pub use self::nvmlEncoderQueryType_enum as nvmlEncoderType_t;
+/// Structure to hold encoder session data
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlEncoderSessionInfo_st {
+ ///!< Unique session ID
+ pub sessionId: ::core::ffi::c_uint,
+ ///!< Owning process ID
+ pub pid: ::core::ffi::c_uint,
+ ///!< Owning vGPU instance ID (only valid on vGPU hosts, otherwise zero)
+ pub vgpuInstance: nvmlVgpuInstance_t,
+ ///!< Video encoder type
+ pub codecType: nvmlEncoderType_t,
+ ///!< Current encode horizontal resolution
+ pub hResolution: ::core::ffi::c_uint,
+ ///!< Current encode vertical resolution
+ pub vResolution: ::core::ffi::c_uint,
+ ///!< Moving average encode frames per second
+ pub averageFps: ::core::ffi::c_uint,
+ ///!< Moving average encode latency in microseconds
+ pub averageLatency: ::core::ffi::c_uint,
+}
+/// Structure to hold encoder session data
+pub type nvmlEncoderSessionInfo_t = nvmlEncoderSessionInfo_st;
+impl nvmlFBCSessionType_enum {
+ ///!< Unknown
+ pub const NVML_FBC_SESSION_TYPE_UNKNOWN: nvmlFBCSessionType_enum = nvmlFBCSessionType_enum(
+ 0,
+ );
+}
+impl nvmlFBCSessionType_enum {
+ ///!< ToSys
+ pub const NVML_FBC_SESSION_TYPE_TOSYS: nvmlFBCSessionType_enum = nvmlFBCSessionType_enum(
+ 1,
+ );
+}
+impl nvmlFBCSessionType_enum {
+ ///!< Cuda
+ pub const NVML_FBC_SESSION_TYPE_CUDA: nvmlFBCSessionType_enum = nvmlFBCSessionType_enum(
+ 2,
+ );
+}
+impl nvmlFBCSessionType_enum {
+ ///!< Vid
+ pub const NVML_FBC_SESSION_TYPE_VID: nvmlFBCSessionType_enum = nvmlFBCSessionType_enum(
+ 3,
+ );
+}
+impl nvmlFBCSessionType_enum {
+ ///!< HEnc
+ pub const NVML_FBC_SESSION_TYPE_HWENC: nvmlFBCSessionType_enum = nvmlFBCSessionType_enum(
+ 4,
+ );
+}
+#[repr(transparent)]
+/// Represents frame buffer capture session type
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlFBCSessionType_enum(pub ::core::ffi::c_uint);
+/// Represents frame buffer capture session type
+pub use self::nvmlFBCSessionType_enum as nvmlFBCSessionType_t;
+/// Structure to hold frame buffer capture sessions stats
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlFBCStats_st {
+ ///!< Total no of sessions
+ pub sessionsCount: ::core::ffi::c_uint,
+ ///!< Moving average new frames captured per second
+ pub averageFPS: ::core::ffi::c_uint,
+ ///!< Moving average new frame capture latency in microseconds
+ pub averageLatency: ::core::ffi::c_uint,
+}
+/// Structure to hold frame buffer capture sessions stats
+pub type nvmlFBCStats_t = nvmlFBCStats_st;
+/// Structure to hold FBC session data
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlFBCSessionInfo_st {
+ ///!< Unique session ID
+ pub sessionId: ::core::ffi::c_uint,
+ ///!< Owning process ID
+ pub pid: ::core::ffi::c_uint,
+ ///!< Owning vGPU instance ID (only valid on vGPU hosts, otherwise zero)
+ pub vgpuInstance: nvmlVgpuInstance_t,
+ ///!< Display identifier
+ pub displayOrdinal: ::core::ffi::c_uint,
+ ///!< Type of frame buffer capture session
+ pub sessionType: nvmlFBCSessionType_t,
+ ///!< Session flags (one or more of NVML_NVFBC_SESSION_FLAG_XXX).
+ pub sessionFlags: ::core::ffi::c_uint,
+ ///!< Max horizontal resolution supported by the capture session
+ pub hMaxResolution: ::core::ffi::c_uint,
+ ///!< Max vertical resolution supported by the capture session
+ pub vMaxResolution: ::core::ffi::c_uint,
+ ///!< Horizontal resolution requested by caller in capture call
+ pub hResolution: ::core::ffi::c_uint,
+ ///!< Vertical resolution requested by caller in capture call
+ pub vResolution: ::core::ffi::c_uint,
+ ///!< Moving average new frames captured per second
+ pub averageFPS: ::core::ffi::c_uint,
+ ///!< Moving average new frame capture latency in microseconds
+ pub averageLatency: ::core::ffi::c_uint,
+}
+/// Structure to hold FBC session data
+pub type nvmlFBCSessionInfo_t = nvmlFBCSessionInfo_st;
+impl nvmlDetachGpuState_enum {
+ pub const NVML_DETACH_GPU_KEEP: nvmlDetachGpuState_enum = nvmlDetachGpuState_enum(0);
+}
+impl nvmlDetachGpuState_enum {
+ pub const NVML_DETACH_GPU_REMOVE: nvmlDetachGpuState_enum = nvmlDetachGpuState_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Is the GPU device to be removed from the kernel by nvmlDeviceRemoveGpu()
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlDetachGpuState_enum(pub ::core::ffi::c_uint);
+/// Is the GPU device to be removed from the kernel by nvmlDeviceRemoveGpu()
+pub use self::nvmlDetachGpuState_enum as nvmlDetachGpuState_t;
+impl nvmlPcieLinkState_enum {
+ pub const NVML_PCIE_LINK_KEEP: nvmlPcieLinkState_enum = nvmlPcieLinkState_enum(0);
+}
+impl nvmlPcieLinkState_enum {
+ pub const NVML_PCIE_LINK_SHUT_DOWN: nvmlPcieLinkState_enum = nvmlPcieLinkState_enum(
+ 1,
+ );
+}
+#[repr(transparent)]
+/// Parent bridge PCIe link state requested by nvmlDeviceRemoveGpu()
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlPcieLinkState_enum(pub ::core::ffi::c_uint);
+/// Parent bridge PCIe link state requested by nvmlDeviceRemoveGpu()
+pub use self::nvmlPcieLinkState_enum as nvmlPcieLinkState_t;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlConfComputeSystemCaps_st {
+ pub cpuCaps: ::core::ffi::c_uint,
+ pub gpusCaps: ::core::ffi::c_uint,
+}
+pub type nvmlConfComputeSystemCaps_t = nvmlConfComputeSystemCaps_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlConfComputeSystemState_st {
+ pub environment: ::core::ffi::c_uint,
+ pub ccFeature: ::core::ffi::c_uint,
+ pub devToolsMode: ::core::ffi::c_uint,
+}
+pub type nvmlConfComputeSystemState_t = nvmlConfComputeSystemState_st;
+/// Confidential Compute System settings
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlSystemConfComputeSettings_v1_t {
+ pub version: ::core::ffi::c_uint,
+ pub environment: ::core::ffi::c_uint,
+ pub ccFeature: ::core::ffi::c_uint,
+ pub devToolsMode: ::core::ffi::c_uint,
+ pub multiGpuMode: ::core::ffi::c_uint,
+}
+/// Confidential Compute System settings
+pub type nvmlSystemConfComputeSettings_t = nvmlSystemConfComputeSettings_v1_t;
+/// Protected memory size
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlConfComputeMemSizeInfo_st {
+ pub protectedMemSizeKib: ::core::ffi::c_ulonglong,
+ pub unprotectedMemSizeKib: ::core::ffi::c_ulonglong,
+}
+/// Protected memory size
+pub type nvmlConfComputeMemSizeInfo_t = nvmlConfComputeMemSizeInfo_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlConfComputeGpuCertificate_st {
+ pub certChainSize: ::core::ffi::c_uint,
+ pub attestationCertChainSize: ::core::ffi::c_uint,
+ pub certChain: [::core::ffi::c_uchar; 4096usize],
+ pub attestationCertChain: [::core::ffi::c_uchar; 5120usize],
+}
+pub type nvmlConfComputeGpuCertificate_t = nvmlConfComputeGpuCertificate_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlConfComputeGpuAttestationReport_st {
+ pub isCecAttestationReportPresent: ::core::ffi::c_uint,
+ pub attestationReportSize: ::core::ffi::c_uint,
+ pub cecAttestationReportSize: ::core::ffi::c_uint,
+ pub nonce: [::core::ffi::c_uchar; 32usize],
+ pub attestationReport: [::core::ffi::c_uchar; 8192usize],
+ pub cecAttestationReport: [::core::ffi::c_uchar; 4096usize],
+}
+pub type nvmlConfComputeGpuAttestationReport_t = nvmlConfComputeGpuAttestationReport_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlConfComputeSetKeyRotationThresholdInfo_st {
+ pub version: ::core::ffi::c_uint,
+ pub maxAttackerAdvantage: ::core::ffi::c_ulonglong,
+}
+pub type nvmlConfComputeSetKeyRotationThresholdInfo_v1_t = nvmlConfComputeSetKeyRotationThresholdInfo_st;
+pub type nvmlConfComputeSetKeyRotationThresholdInfo_t = nvmlConfComputeSetKeyRotationThresholdInfo_v1_t;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlConfComputeGetKeyRotationThresholdInfo_st {
+ pub version: ::core::ffi::c_uint,
+ pub attackerAdvantage: ::core::ffi::c_ulonglong,
+}
+pub type nvmlConfComputeGetKeyRotationThresholdInfo_v1_t = nvmlConfComputeGetKeyRotationThresholdInfo_st;
+pub type nvmlConfComputeGetKeyRotationThresholdInfo_t = nvmlConfComputeGetKeyRotationThresholdInfo_v1_t;
+pub type nvmlGpuFabricState_t = ::core::ffi::c_uchar;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuFabricInfo_t {
+ ///!< Uuid of the cluster to which this GPU belongs
+ pub clusterUuid: [::core::ffi::c_uchar; 16usize],
+ ///!< Error status, if any. Must be checked only if state returns "complete".
+ pub status: nvmlReturn_t,
+ ///!< ID of the fabric clique to which this GPU belongs
+ pub cliqueId: ::core::ffi::c_uint,
+ ///!< Current state of GPU registration process
+ pub state: nvmlGpuFabricState_t,
+}
+/** GPU Fabric information (v2).
+
+ Version 2 adds the \ref nvmlGpuFabricInfo_v2_t.version field
+ to the start of the structure, and the \ref nvmlGpuFabricInfo_v2_t.healthMask
+ field to the end. This structure is not backwards-compatible with
+ \ref nvmlGpuFabricInfo_t.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuFabricInfo_v2_t {
+ ///!< Structure version identifier (set to \ref nvmlGpuFabricInfo_v2)
+ pub version: ::core::ffi::c_uint,
+ ///!< Uuid of the cluster to which this GPU belongs
+ pub clusterUuid: [::core::ffi::c_uchar; 16usize],
+ ///!< Error status, if any. Must be checked only if state returns "complete".
+ pub status: nvmlReturn_t,
+ ///!< ID of the fabric clique to which this GPU belongs
+ pub cliqueId: ::core::ffi::c_uint,
+ ///!< Current state of GPU registration process
+ pub state: nvmlGpuFabricState_t,
+ ///!< GPU Fabric health Status Mask
+ pub healthMask: ::core::ffi::c_uint,
+}
+/** GPU Fabric information (v2).
+
+ Version 2 adds the \ref nvmlGpuFabricInfo_v2_t.version field
+ to the start of the structure, and the \ref nvmlGpuFabricInfo_v2_t.healthMask
+ field to the end. This structure is not backwards-compatible with
+ \ref nvmlGpuFabricInfo_t.*/
+pub type nvmlGpuFabricInfoV_t = nvmlGpuFabricInfo_v2_t;
+pub type nvmlPowerScopeType_t = ::core::ffi::c_uchar;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlPowerValue_v2_t {
+ ///!< Structure format version (must be 1)
+ pub version: ::core::ffi::c_uint,
+ ///!< [in] Device type: GPU or Total Module
+ pub powerScope: nvmlPowerScopeType_t,
+ ///!< [out] Power value to retrieve or set in milliwatts
+ pub powerValueMw: ::core::ffi::c_uint,
+}
+pub type nvmlAffinityScope_t = ::core::ffi::c_uint;
+impl nvmlClockLimitId_enum {
+ pub const NVML_CLOCK_LIMIT_ID_RANGE_START: nvmlClockLimitId_enum = nvmlClockLimitId_enum(
+ 4294967040,
+ );
+}
+impl nvmlClockLimitId_enum {
+ pub const NVML_CLOCK_LIMIT_ID_TDP: nvmlClockLimitId_enum = nvmlClockLimitId_enum(
+ 4294967041,
+ );
+}
+impl nvmlClockLimitId_enum {
+ pub const NVML_CLOCK_LIMIT_ID_UNLIMITED: nvmlClockLimitId_enum = nvmlClockLimitId_enum(
+ 4294967042,
+ );
+}
+#[repr(transparent)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlClockLimitId_enum(pub ::core::ffi::c_uint);
+pub use self::nvmlClockLimitId_enum as nvmlClockLimitId_t;
+/// Structure representing range of vGPU versions.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuVersion_st {
+ ///!< Minimum vGPU version.
+ pub minVersion: ::core::ffi::c_uint,
+ ///!< Maximum vGPU version.
+ pub maxVersion: ::core::ffi::c_uint,
+}
+/// Structure representing range of vGPU versions.
+pub type nvmlVgpuVersion_t = nvmlVgpuVersion_st;
+/// vGPU metadata structure.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuMetadata_st {
+ ///!< Current version of the structure
+ pub version: ::core::ffi::c_uint,
+ ///!< Current revision of the structure
+ pub revision: ::core::ffi::c_uint,
+ ///!< Current state of Guest-dependent fields
+ pub guestInfoState: nvmlVgpuGuestInfoState_t,
+ ///!< Version of driver installed in guest
+ pub guestDriverVersion: [::core::ffi::c_char; 80usize],
+ ///!< Version of driver installed in host
+ pub hostDriverVersion: [::core::ffi::c_char; 80usize],
+ ///!< Reserved for internal use
+ pub reserved: [::core::ffi::c_uint; 6usize],
+ ///!< vGPU virtualization capabilities bitfield
+ pub vgpuVirtualizationCaps: ::core::ffi::c_uint,
+ ///!< vGPU version of guest driver
+ pub guestVgpuVersion: ::core::ffi::c_uint,
+ ///!< Size of opaque data field in bytes
+ pub opaqueDataSize: ::core::ffi::c_uint,
+ ///!< Opaque data
+ pub opaqueData: [::core::ffi::c_char; 4usize],
+}
+/// vGPU metadata structure.
+pub type nvmlVgpuMetadata_t = nvmlVgpuMetadata_st;
+/// Physical GPU metadata structure
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuPgpuMetadata_st {
+ ///!< Current version of the structure
+ pub version: ::core::ffi::c_uint,
+ ///!< Current revision of the structure
+ pub revision: ::core::ffi::c_uint,
+ ///!< Host driver version
+ pub hostDriverVersion: [::core::ffi::c_char; 80usize],
+ ///!< Pgpu virtualization capabilities bitfield
+ pub pgpuVirtualizationCaps: ::core::ffi::c_uint,
+ ///!< Reserved for internal use
+ pub reserved: [::core::ffi::c_uint; 5usize],
+ ///!< vGPU version range supported by host driver
+ pub hostSupportedVgpuRange: nvmlVgpuVersion_t,
+ ///!< Size of opaque data field in bytes
+ pub opaqueDataSize: ::core::ffi::c_uint,
+ ///!< Opaque data
+ pub opaqueData: [::core::ffi::c_char; 4usize],
+}
+/// Physical GPU metadata structure
+pub type nvmlVgpuPgpuMetadata_t = nvmlVgpuPgpuMetadata_st;
+impl nvmlVgpuVmCompatibility_enum {
+ ///!< vGPU is not runnable
+ pub const NVML_VGPU_VM_COMPATIBILITY_NONE: nvmlVgpuVmCompatibility_enum = nvmlVgpuVmCompatibility_enum(
+ 0,
+ );
+}
+impl nvmlVgpuVmCompatibility_enum {
+ ///!< vGPU is runnable from a cold / powered-off state (ACPI S5)
+ pub const NVML_VGPU_VM_COMPATIBILITY_COLD: nvmlVgpuVmCompatibility_enum = nvmlVgpuVmCompatibility_enum(
+ 1,
+ );
+}
+impl nvmlVgpuVmCompatibility_enum {
+ ///!< vGPU is runnable from a hibernated state (ACPI S4)
+ pub const NVML_VGPU_VM_COMPATIBILITY_HIBERNATE: nvmlVgpuVmCompatibility_enum = nvmlVgpuVmCompatibility_enum(
+ 2,
+ );
+}
+impl nvmlVgpuVmCompatibility_enum {
+ ///!< vGPU is runnable from a sleeped state (ACPI S3)
+ pub const NVML_VGPU_VM_COMPATIBILITY_SLEEP: nvmlVgpuVmCompatibility_enum = nvmlVgpuVmCompatibility_enum(
+ 4,
+ );
+}
+impl nvmlVgpuVmCompatibility_enum {
+ ///!< vGPU is runnable from a live/paused (ACPI S0)
+ pub const NVML_VGPU_VM_COMPATIBILITY_LIVE: nvmlVgpuVmCompatibility_enum = nvmlVgpuVmCompatibility_enum(
+ 8,
+ );
+}
+#[repr(transparent)]
+/// vGPU VM compatibility codes
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuVmCompatibility_enum(pub ::core::ffi::c_uint);
+/// vGPU VM compatibility codes
+pub use self::nvmlVgpuVmCompatibility_enum as nvmlVgpuVmCompatibility_t;
+impl nvmlVgpuPgpuCompatibilityLimitCode_enum {
+ ///!< Compatibility is not limited.
+ pub const NVML_VGPU_COMPATIBILITY_LIMIT_NONE: nvmlVgpuPgpuCompatibilityLimitCode_enum = nvmlVgpuPgpuCompatibilityLimitCode_enum(
+ 0,
+ );
+}
+impl nvmlVgpuPgpuCompatibilityLimitCode_enum {
+ ///!< ompatibility is limited by host driver version.
+ pub const NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER: nvmlVgpuPgpuCompatibilityLimitCode_enum = nvmlVgpuPgpuCompatibilityLimitCode_enum(
+ 1,
+ );
+}
+impl nvmlVgpuPgpuCompatibilityLimitCode_enum {
+ ///!< Compatibility is limited by guest driver version.
+ pub const NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER: nvmlVgpuPgpuCompatibilityLimitCode_enum = nvmlVgpuPgpuCompatibilityLimitCode_enum(
+ 2,
+ );
+}
+impl nvmlVgpuPgpuCompatibilityLimitCode_enum {
+ ///!< Compatibility is limited by GPU hardware.
+ pub const NVML_VGPU_COMPATIBILITY_LIMIT_GPU: nvmlVgpuPgpuCompatibilityLimitCode_enum = nvmlVgpuPgpuCompatibilityLimitCode_enum(
+ 4,
+ );
+}
+impl nvmlVgpuPgpuCompatibilityLimitCode_enum {
+ ///!< Compatibility is limited by an undefined factor.
+ pub const NVML_VGPU_COMPATIBILITY_LIMIT_OTHER: nvmlVgpuPgpuCompatibilityLimitCode_enum = nvmlVgpuPgpuCompatibilityLimitCode_enum(
+ 2147483648,
+ );
+}
+#[repr(transparent)]
+/// vGPU-pGPU compatibility limit codes
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuPgpuCompatibilityLimitCode_enum(pub ::core::ffi::c_uint);
+/// vGPU-pGPU compatibility limit codes
+pub use self::nvmlVgpuPgpuCompatibilityLimitCode_enum as nvmlVgpuPgpuCompatibilityLimitCode_t;
+/// vGPU-pGPU compatibility structure
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlVgpuPgpuCompatibility_st {
+ ///!< Compatibility of vGPU VM. See \ref nvmlVgpuVmCompatibility_t
+ pub vgpuVmCompatibility: nvmlVgpuVmCompatibility_t,
+ ///!< Limiting factor for vGPU-pGPU compatibility. See \ref nvmlVgpuPgpuCompatibilityLimitCode_t
+ pub compatibilityLimitCode: nvmlVgpuPgpuCompatibilityLimitCode_t,
+}
+/// vGPU-pGPU compatibility structure
+pub type nvmlVgpuPgpuCompatibility_t = nvmlVgpuPgpuCompatibility_st;
+/// Excluded GPU device information
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlExcludedDeviceInfo_st {
+ ///!< The PCI information for the excluded GPU
+ pub pciInfo: nvmlPciInfo_t,
+ ///!< The ASCII string UUID for the excluded GPU
+ pub uuid: [::core::ffi::c_char; 80usize],
+}
+/// Excluded GPU device information
+pub type nvmlExcludedDeviceInfo_t = nvmlExcludedDeviceInfo_st;
+/** MIG compute instance profile capability.
+
+ Bit field values representing MIG profile capabilities
+ \ref nvmlComputeInstanceProfileInfo_v3_t.capabilities*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuInstancePlacement_st {
+ ///!< Index of first occupied memory slice
+ pub start: ::core::ffi::c_uint,
+ ///!< Number of memory slices occupied
+ pub size: ::core::ffi::c_uint,
+}
+/** MIG compute instance profile capability.
+
+ Bit field values representing MIG profile capabilities
+ \ref nvmlComputeInstanceProfileInfo_v3_t.capabilities*/
+pub type nvmlGpuInstancePlacement_t = nvmlGpuInstancePlacement_st;
+/// GPU instance profile information.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuInstanceProfileInfo_st {
+ ///!< Unique profile ID within the device
+ pub id: ::core::ffi::c_uint,
+ ///!< Peer-to-Peer support
+ pub isP2pSupported: ::core::ffi::c_uint,
+ ///!< GPU Slice count
+ pub sliceCount: ::core::ffi::c_uint,
+ ///!< GPU instance count
+ pub instanceCount: ::core::ffi::c_uint,
+ ///!< Streaming Multiprocessor count
+ pub multiprocessorCount: ::core::ffi::c_uint,
+ ///!< Copy Engine count
+ pub copyEngineCount: ::core::ffi::c_uint,
+ ///!< Decoder Engine count
+ pub decoderCount: ::core::ffi::c_uint,
+ ///!< Encoder Engine count
+ pub encoderCount: ::core::ffi::c_uint,
+ ///!< JPEG Engine count
+ pub jpegCount: ::core::ffi::c_uint,
+ ///!< OFA Engine count
+ pub ofaCount: ::core::ffi::c_uint,
+ ///!< Memory size in MBytes
+ pub memorySizeMB: ::core::ffi::c_ulonglong,
+}
+/// GPU instance profile information.
+pub type nvmlGpuInstanceProfileInfo_t = nvmlGpuInstanceProfileInfo_st;
+/** GPU instance profile information (v2).
+
+ Version 2 adds the \ref nvmlGpuInstanceProfileInfo_v2_t.version field
+ to the start of the structure, and the \ref nvmlGpuInstanceProfileInfo_v2_t.name
+ field to the end. This structure is not backwards-compatible with
+ \ref nvmlGpuInstanceProfileInfo_t.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuInstanceProfileInfo_v2_st {
+ ///!< Structure version identifier (set to \ref nvmlGpuInstanceProfileInfo_v2)
+ pub version: ::core::ffi::c_uint,
+ ///!< Unique profile ID within the device
+ pub id: ::core::ffi::c_uint,
+ ///!< Peer-to-Peer support
+ pub isP2pSupported: ::core::ffi::c_uint,
+ ///!< GPU Slice count
+ pub sliceCount: ::core::ffi::c_uint,
+ ///!< GPU instance count
+ pub instanceCount: ::core::ffi::c_uint,
+ ///!< Streaming Multiprocessor count
+ pub multiprocessorCount: ::core::ffi::c_uint,
+ ///!< Copy Engine count
+ pub copyEngineCount: ::core::ffi::c_uint,
+ ///!< Decoder Engine count
+ pub decoderCount: ::core::ffi::c_uint,
+ ///!< Encoder Engine count
+ pub encoderCount: ::core::ffi::c_uint,
+ ///!< JPEG Engine count
+ pub jpegCount: ::core::ffi::c_uint,
+ ///!< OFA Engine count
+ pub ofaCount: ::core::ffi::c_uint,
+ ///!< Memory size in MBytes
+ pub memorySizeMB: ::core::ffi::c_ulonglong,
+ ///!< Profile name
+ pub name: [::core::ffi::c_char; 96usize],
+}
+/** GPU instance profile information (v2).
+
+ Version 2 adds the \ref nvmlGpuInstanceProfileInfo_v2_t.version field
+ to the start of the structure, and the \ref nvmlGpuInstanceProfileInfo_v2_t.name
+ field to the end. This structure is not backwards-compatible with
+ \ref nvmlGpuInstanceProfileInfo_t.*/
+pub type nvmlGpuInstanceProfileInfo_v2_t = nvmlGpuInstanceProfileInfo_v2_st;
+/** GPU instance profile information (v3).
+
+ Version 3 removes isP2pSupported field and adds the \ref nvmlGpuInstanceProfileInfo_v3_t.capabilities
+ field \ref nvmlGpuInstanceProfileInfo_t.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuInstanceProfileInfo_v3_st {
+ ///!< Structure version identifier (set to \ref nvmlGpuInstanceProfileInfo_v3)
+ pub version: ::core::ffi::c_uint,
+ ///!< Unique profile ID within the device
+ pub id: ::core::ffi::c_uint,
+ ///!< GPU Slice count
+ pub sliceCount: ::core::ffi::c_uint,
+ ///!< GPU instance count
+ pub instanceCount: ::core::ffi::c_uint,
+ ///!< Streaming Multiprocessor count
+ pub multiprocessorCount: ::core::ffi::c_uint,
+ ///!< Copy Engine count
+ pub copyEngineCount: ::core::ffi::c_uint,
+ ///!< Decoder Engine count
+ pub decoderCount: ::core::ffi::c_uint,
+ ///!< Encoder Engine count
+ pub encoderCount: ::core::ffi::c_uint,
+ ///!< JPEG Engine count
+ pub jpegCount: ::core::ffi::c_uint,
+ ///!< OFA Engine count
+ pub ofaCount: ::core::ffi::c_uint,
+ ///!< Memory size in MBytes
+ pub memorySizeMB: ::core::ffi::c_ulonglong,
+ ///!< Profile name
+ pub name: [::core::ffi::c_char; 96usize],
+ ///!< Additional capabilities
+ pub capabilities: ::core::ffi::c_uint,
+}
+/** GPU instance profile information (v3).
+
+ Version 3 removes isP2pSupported field and adds the \ref nvmlGpuInstanceProfileInfo_v3_t.capabilities
+ field \ref nvmlGpuInstanceProfileInfo_t.*/
+pub type nvmlGpuInstanceProfileInfo_v3_t = nvmlGpuInstanceProfileInfo_v3_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpuInstanceInfo_st {
+ ///!< Parent device
+ pub device: nvmlDevice_t,
+ ///!< Unique instance ID within the device
+ pub id: ::core::ffi::c_uint,
+ ///!< Unique profile ID within the device
+ pub profileId: ::core::ffi::c_uint,
+ ///!< Placement for this instance
+ pub placement: nvmlGpuInstancePlacement_t,
+}
+pub type nvmlGpuInstanceInfo_t = nvmlGpuInstanceInfo_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct nvmlGpuInstance_st {
+ _unused: [u8; 0],
+}
+pub type nvmlGpuInstance_t = *mut nvmlGpuInstance_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlComputeInstancePlacement_st {
+ ///!< Index of first occupied compute slice
+ pub start: ::core::ffi::c_uint,
+ ///!< Number of compute slices occupied
+ pub size: ::core::ffi::c_uint,
+}
+pub type nvmlComputeInstancePlacement_t = nvmlComputeInstancePlacement_st;
+/// Compute instance profile information.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlComputeInstanceProfileInfo_st {
+ ///!< Unique profile ID within the GPU instance
+ pub id: ::core::ffi::c_uint,
+ ///!< GPU Slice count
+ pub sliceCount: ::core::ffi::c_uint,
+ ///!< Compute instance count
+ pub instanceCount: ::core::ffi::c_uint,
+ ///!< Streaming Multiprocessor count
+ pub multiprocessorCount: ::core::ffi::c_uint,
+ ///!< Shared Copy Engine count
+ pub sharedCopyEngineCount: ::core::ffi::c_uint,
+ ///!< Shared Decoder Engine count
+ pub sharedDecoderCount: ::core::ffi::c_uint,
+ ///!< Shared Encoder Engine count
+ pub sharedEncoderCount: ::core::ffi::c_uint,
+ ///!< Shared JPEG Engine count
+ pub sharedJpegCount: ::core::ffi::c_uint,
+ ///!< Shared OFA Engine count
+ pub sharedOfaCount: ::core::ffi::c_uint,
+}
+/// Compute instance profile information.
+pub type nvmlComputeInstanceProfileInfo_t = nvmlComputeInstanceProfileInfo_st;
+/** Compute instance profile information (v2).
+
+ Version 2 adds the \ref nvmlComputeInstanceProfileInfo_v2_t.version field
+ to the start of the structure, and the \ref nvmlComputeInstanceProfileInfo_v2_t.name
+ field to the end. This structure is not backwards-compatible with
+ \ref nvmlComputeInstanceProfileInfo_t.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlComputeInstanceProfileInfo_v2_st {
+ ///!< Structure version identifier (set to \ref nvmlComputeInstanceProfileInfo_v2)
+ pub version: ::core::ffi::c_uint,
+ ///!< Unique profile ID within the GPU instance
+ pub id: ::core::ffi::c_uint,
+ ///!< GPU Slice count
+ pub sliceCount: ::core::ffi::c_uint,
+ ///!< Compute instance count
+ pub instanceCount: ::core::ffi::c_uint,
+ ///!< Streaming Multiprocessor count
+ pub multiprocessorCount: ::core::ffi::c_uint,
+ ///!< Shared Copy Engine count
+ pub sharedCopyEngineCount: ::core::ffi::c_uint,
+ ///!< Shared Decoder Engine count
+ pub sharedDecoderCount: ::core::ffi::c_uint,
+ ///!< Shared Encoder Engine count
+ pub sharedEncoderCount: ::core::ffi::c_uint,
+ ///!< Shared JPEG Engine count
+ pub sharedJpegCount: ::core::ffi::c_uint,
+ ///!< Shared OFA Engine count
+ pub sharedOfaCount: ::core::ffi::c_uint,
+ ///!< Profile name
+ pub name: [::core::ffi::c_char; 96usize],
+}
+/** Compute instance profile information (v2).
+
+ Version 2 adds the \ref nvmlComputeInstanceProfileInfo_v2_t.version field
+ to the start of the structure, and the \ref nvmlComputeInstanceProfileInfo_v2_t.name
+ field to the end. This structure is not backwards-compatible with
+ \ref nvmlComputeInstanceProfileInfo_t.*/
+pub type nvmlComputeInstanceProfileInfo_v2_t = nvmlComputeInstanceProfileInfo_v2_st;
+/** Compute instance profile information (v3).
+
+ Version 3 adds the \ref nvmlComputeInstanceProfileInfo_v3_t.capabilities field
+ \ref nvmlComputeInstanceProfileInfo_t.*/
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlComputeInstanceProfileInfo_v3_st {
+ ///!< Structure version identifier (set to \ref nvmlComputeInstanceProfileInfo_v3)
+ pub version: ::core::ffi::c_uint,
+ ///!< Unique profile ID within the GPU instance
+ pub id: ::core::ffi::c_uint,
+ ///!< GPU Slice count
+ pub sliceCount: ::core::ffi::c_uint,
+ ///!< Compute instance count
+ pub instanceCount: ::core::ffi::c_uint,
+ ///!< Streaming Multiprocessor count
+ pub multiprocessorCount: ::core::ffi::c_uint,
+ ///!< Shared Copy Engine count
+ pub sharedCopyEngineCount: ::core::ffi::c_uint,
+ ///!< Shared Decoder Engine count
+ pub sharedDecoderCount: ::core::ffi::c_uint,
+ ///!< Shared Encoder Engine count
+ pub sharedEncoderCount: ::core::ffi::c_uint,
+ ///!< Shared JPEG Engine count
+ pub sharedJpegCount: ::core::ffi::c_uint,
+ ///!< Shared OFA Engine count
+ pub sharedOfaCount: ::core::ffi::c_uint,
+ ///!< Profile name
+ pub name: [::core::ffi::c_char; 96usize],
+ ///!< Additional capabilities
+ pub capabilities: ::core::ffi::c_uint,
+}
+/** Compute instance profile information (v3).
+
+ Version 3 adds the \ref nvmlComputeInstanceProfileInfo_v3_t.capabilities field
+ \ref nvmlComputeInstanceProfileInfo_t.*/
+pub type nvmlComputeInstanceProfileInfo_v3_t = nvmlComputeInstanceProfileInfo_v3_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlComputeInstanceInfo_st {
+ ///!< Parent device
+ pub device: nvmlDevice_t,
+ ///!< Parent GPU instance
+ pub gpuInstance: nvmlGpuInstance_t,
+ ///!< Unique instance ID within the GPU instance
+ pub id: ::core::ffi::c_uint,
+ ///!< Unique profile ID within the GPU instance
+ pub profileId: ::core::ffi::c_uint,
+ ///!< Placement for this instance within the GPU instance's compute slice range {0, sliceCount}
+ pub placement: nvmlComputeInstancePlacement_t,
+}
+pub type nvmlComputeInstanceInfo_t = nvmlComputeInstanceInfo_st;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct nvmlComputeInstance_st {
+ _unused: [u8; 0],
+}
+pub type nvmlComputeInstance_t = *mut nvmlComputeInstance_st;
+impl nvmlGpmMetricId_t {
+ ///!< Percentage of time any compute/graphics app was active on the GPU. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_GRAPHICS_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(1);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percentage of SMs that were busy. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_SM_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(2);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percentage of warps that were active vs theoretical maximum. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_SM_OCCUPANCY: nvmlGpmMetricId_t = nvmlGpmMetricId_t(3);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percentage of time the GPU's SMs were doing integer operations. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_INTEGER_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(4);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percentage of time the GPU's SMs were doing ANY tensor operations. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_ANY_TENSOR_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(5);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percentage of time the GPU's SMs were doing DFMA tensor operations. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_DFMA_TENSOR_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(6);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percentage of time the GPU's SMs were doing HMMA tensor operations. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_HMMA_TENSOR_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(7);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percentage of time the GPU's SMs were doing IMMA tensor operations. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_IMMA_TENSOR_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(9);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percentage of DRAM bw used vs theoretical maximum. 0.0 - 100.0 */
+ pub const NVML_GPM_METRIC_DRAM_BW_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(10);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percentage of time the GPU's SMs were doing non-tensor FP64 math. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_FP64_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(11);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percentage of time the GPU's SMs were doing non-tensor FP32 math. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_FP32_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(12);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percentage of time the GPU's SMs were doing non-tensor FP16 math. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_FP16_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(13);
+}
+impl nvmlGpmMetricId_t {
+ ///!< PCIe traffic from this GPU in MiB/sec
+ pub const NVML_GPM_METRIC_PCIE_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(20);
+}
+impl nvmlGpmMetricId_t {
+ ///!< PCIe traffic to this GPU in MiB/sec
+ pub const NVML_GPM_METRIC_PCIE_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(21);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVDEC 0. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVDEC_0_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(30);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVDEC 1. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVDEC_1_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(31);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVDEC 2. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVDEC_2_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(32);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVDEC 3. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVDEC_3_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(33);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVDEC 4. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVDEC_4_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(34);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVDEC 5. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVDEC_5_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(35);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVDEC 6. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVDEC_6_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(36);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVDEC 7. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVDEC_7_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(37);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVJPG 0. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVJPG_0_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(40);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVJPG 1. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVJPG_1_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(41);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVJPG 2. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVJPG_2_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(42);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVJPG 3. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVJPG_3_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(43);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVJPG 4. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVJPG_4_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(44);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVJPG 5. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVJPG_5_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(45);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVJPG 6. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVJPG_6_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(46);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVJPG 7. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVJPG_7_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(47);
+}
+impl nvmlGpmMetricId_t {
+ ///!< Percent utilization of NVOFA 0. 0.0 - 100.0
+ pub const NVML_GPM_METRIC_NVOFA_0_UTIL: nvmlGpmMetricId_t = nvmlGpmMetricId_t(50);
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for all links in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 60,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for all links in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 61,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 0 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 62,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 0 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 63,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 1 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 64,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 1 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 65,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 2 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 66,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 2 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 67,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 3 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 68,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 3 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 69,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 4 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 70,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 4 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 71,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 5 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 72,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 5 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 73,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 6 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 74,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 6 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 75,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 7 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 76,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 7 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 77,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 8 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 78,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 8 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 79,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 9 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 80,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 9 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 81,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 10 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 82,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 10 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 83,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 11 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 84,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 11 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 85,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 12 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 86,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 12 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 87,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 13 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 88,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 13 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 89,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 14 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 90,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 14 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 91,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 15 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 92,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 15 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 93,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 16 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 94,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 16 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 95,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink read bandwidth for link 17 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 96,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< NvLink write bandwidth for link 17 in MiB/sec
+ pub const NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC: nvmlGpmMetricId_t = nvmlGpmMetricId_t(
+ 97,
+ );
+}
+impl nvmlGpmMetricId_t {
+ ///!< Maximum value above +1. Note that changing this should also change NVML_GPM_METRICS_GET_VERSION due to struct size change
+ pub const NVML_GPM_METRIC_MAX: nvmlGpmMetricId_t = nvmlGpmMetricId_t(98);
+}
+#[repr(transparent)]
+/// GPM Metric Identifiers
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpmMetricId_t(pub ::core::ffi::c_uint);
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct nvmlGpmSample_st {
+ _unused: [u8; 0],
+}
+/// Handle to an allocated GPM sample allocated with nvmlGpmSampleAlloc(). Free this with nvmlGpmSampleFree().
+pub type nvmlGpmSample_t = *mut nvmlGpmSample_st;
+/// GPM metric information.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub struct nvmlGpmMetric_t {
+ ///!< IN: NVML_GPM_METRIC_? #define of which metric to retrieve
+ pub metricId: ::core::ffi::c_uint,
+ ///!< OUT: Status of this metric. If this is nonzero, then value is not valid
+ pub nvmlReturn: nvmlReturn_t,
+ ///!< OUT: Value of this metric. Is only valid if nvmlReturn is 0 (NVML_SUCCESS)
+ pub value: f64,
+ ///!< OUT: Metric name and unit. Those can be NULL if not defined
+ pub metricInfo: nvmlGpmMetric_t__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpmMetric_t__bindgen_ty_1 {
+ pub shortName: *mut ::core::ffi::c_char,
+ pub longName: *mut ::core::ffi::c_char,
+ pub unit: *mut ::core::ffi::c_char,
+}
+/// GPM buffer information.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub struct nvmlGpmMetricsGet_t {
+ ///!< IN: Set to NVML_GPM_METRICS_GET_VERSION
+ pub version: ::core::ffi::c_uint,
+ ///!< IN: How many metrics to retrieve in metrics[]
+ pub numMetrics: ::core::ffi::c_uint,
+ ///!< IN: Sample buffer
+ pub sample1: nvmlGpmSample_t,
+ ///!< IN: Sample buffer
+ pub sample2: nvmlGpmSample_t,
+ ///!< IN/OUT: Array of metrics. Set metricId on call. See nvmlReturn and value on return
+ pub metrics: [nvmlGpmMetric_t; 98usize],
+}
+/// GPM device information.
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlGpmSupport_t {
+ ///!< IN: Set to NVML_GPM_SUPPORT_VERSION
+ pub version: ::core::ffi::c_uint,
+ ///!< OUT: Indicates device support
+ pub isSupportedDevice: ::core::ffi::c_uint,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+pub struct nvmlNvLinkPowerThres_st {
+ ///!< Low power threshold (in units of 100us)
+ pub lowPwrThreshold: ::core::ffi::c_uint,
+}
+pub type nvmlNvLinkPowerThres_t = nvmlNvLinkPowerThres_st;
+impl nvmlError_t {
+ pub const UNINITIALIZED: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(1)
+ });
+ pub const INVALID_ARGUMENT: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(2)
+ });
+ pub const NOT_SUPPORTED: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(3)
+ });
+ pub const NO_PERMISSION: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(4)
+ });
+ pub const ALREADY_INITIALIZED: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(5)
+ });
+ pub const NOT_FOUND: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(6)
+ });
+ pub const INSUFFICIENT_SIZE: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(7)
+ });
+ pub const INSUFFICIENT_POWER: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(8)
+ });
+ pub const DRIVER_NOT_LOADED: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(9)
+ });
+ pub const TIMEOUT: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(10)
+ });
+ pub const IRQ_ISSUE: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(11)
+ });
+ pub const LIBRARY_NOT_FOUND: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(12)
+ });
+ pub const FUNCTION_NOT_FOUND: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(13)
+ });
+ pub const CORRUPTED_INFOROM: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(14)
+ });
+ pub const GPU_IS_LOST: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(15)
+ });
+ pub const RESET_REQUIRED: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(16)
+ });
+ pub const OPERATING_SYSTEM: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(17)
+ });
+ pub const LIB_RM_VERSION_MISMATCH: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(18)
+ });
+ pub const IN_USE: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(19)
+ });
+ pub const MEMORY: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(20)
+ });
+ pub const NO_DATA: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(21)
+ });
+ pub const VGPU_ECC_NOT_SUPPORTED: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(22)
+ });
+ pub const INSUFFICIENT_RESOURCES: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(23)
+ });
+ pub const FREQ_NOT_SUPPORTED: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(24)
+ });
+ pub const ARGUMENT_VERSION_MISMATCH: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(25)
+ });
+ pub const DEPRECATED: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(26)
+ });
+ pub const NOT_READY: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(27)
+ });
+ pub const GPU_NOT_FOUND: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(28)
+ });
+ pub const INVALID_STATE: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(29)
+ });
+ pub const UNKNOWN: nvmlError_t = nvmlError_t(unsafe {
+ ::core::num::NonZeroU32::new_unchecked(999)
+ });
+}
+#[repr(transparent)]
+#[derive(Debug, Hash, Copy, Clone, PartialEq, Eq)]
+pub struct nvmlError_t(pub ::core::num::NonZeroU32);
+pub trait nvmlReturn_tConsts {
+ const SUCCESS: nvmlReturn_t = nvmlReturn_t::Ok(());
+ const ERROR_UNINITIALIZED: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::UNINITIALIZED,
+ );
+ const ERROR_INVALID_ARGUMENT: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::INVALID_ARGUMENT,
+ );
+ const ERROR_NOT_SUPPORTED: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::NOT_SUPPORTED,
+ );
+ const ERROR_NO_PERMISSION: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::NO_PERMISSION,
+ );
+ const ERROR_ALREADY_INITIALIZED: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::ALREADY_INITIALIZED,
+ );
+ const ERROR_NOT_FOUND: nvmlReturn_t = nvmlReturn_t::Err(nvmlError_t::NOT_FOUND);
+ const ERROR_INSUFFICIENT_SIZE: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::INSUFFICIENT_SIZE,
+ );
+ const ERROR_INSUFFICIENT_POWER: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::INSUFFICIENT_POWER,
+ );
+ const ERROR_DRIVER_NOT_LOADED: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::DRIVER_NOT_LOADED,
+ );
+ const ERROR_TIMEOUT: nvmlReturn_t = nvmlReturn_t::Err(nvmlError_t::TIMEOUT);
+ const ERROR_IRQ_ISSUE: nvmlReturn_t = nvmlReturn_t::Err(nvmlError_t::IRQ_ISSUE);
+ const ERROR_LIBRARY_NOT_FOUND: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::LIBRARY_NOT_FOUND,
+ );
+ const ERROR_FUNCTION_NOT_FOUND: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::FUNCTION_NOT_FOUND,
+ );
+ const ERROR_CORRUPTED_INFOROM: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::CORRUPTED_INFOROM,
+ );
+ const ERROR_GPU_IS_LOST: nvmlReturn_t = nvmlReturn_t::Err(nvmlError_t::GPU_IS_LOST);
+ const ERROR_RESET_REQUIRED: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::RESET_REQUIRED,
+ );
+ const ERROR_OPERATING_SYSTEM: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::OPERATING_SYSTEM,
+ );
+ const ERROR_LIB_RM_VERSION_MISMATCH: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::LIB_RM_VERSION_MISMATCH,
+ );
+ const ERROR_IN_USE: nvmlReturn_t = nvmlReturn_t::Err(nvmlError_t::IN_USE);
+ const ERROR_MEMORY: nvmlReturn_t = nvmlReturn_t::Err(nvmlError_t::MEMORY);
+ const ERROR_NO_DATA: nvmlReturn_t = nvmlReturn_t::Err(nvmlError_t::NO_DATA);
+ const ERROR_VGPU_ECC_NOT_SUPPORTED: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::VGPU_ECC_NOT_SUPPORTED,
+ );
+ const ERROR_INSUFFICIENT_RESOURCES: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::INSUFFICIENT_RESOURCES,
+ );
+ const ERROR_FREQ_NOT_SUPPORTED: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::FREQ_NOT_SUPPORTED,
+ );
+ const ERROR_ARGUMENT_VERSION_MISMATCH: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::ARGUMENT_VERSION_MISMATCH,
+ );
+ const ERROR_DEPRECATED: nvmlReturn_t = nvmlReturn_t::Err(nvmlError_t::DEPRECATED);
+ const ERROR_NOT_READY: nvmlReturn_t = nvmlReturn_t::Err(nvmlError_t::NOT_READY);
+ const ERROR_GPU_NOT_FOUND: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::GPU_NOT_FOUND,
+ );
+ const ERROR_INVALID_STATE: nvmlReturn_t = nvmlReturn_t::Err(
+ nvmlError_t::INVALID_STATE,
+ );
+ const ERROR_UNKNOWN: nvmlReturn_t = nvmlReturn_t::Err(nvmlError_t::UNKNOWN);
+}
+impl nvmlReturn_tConsts for nvmlReturn_t {}
+#[must_use]
+pub type nvmlReturn_t = ::core::result::Result<(), nvmlError_t>;
+const _: fn() = || {
+ let _ = std::mem::transmute::<nvmlReturn_t, u32>;
+};
diff --git a/ptx/lib/zluda_ptx_impl.bc b/ptx/lib/zluda_ptx_impl.bc
index 4b5a5d8..6cefc81 100644
--- a/ptx/lib/zluda_ptx_impl.bc
+++ b/ptx/lib/zluda_ptx_impl.bc
Binary files differ
diff --git a/ptx/lib/zluda_ptx_impl.cpp b/ptx/lib/zluda_ptx_impl.cpp
index f86a7fd..7af9729 100644
--- a/ptx/lib/zluda_ptx_impl.cpp
+++ b/ptx/lib/zluda_ptx_impl.cpp
@@ -1,8 +1,10 @@
-// Every time this file changes it must te rebuilt, you need `rocm-llvm-dev` and `llvm-17`:
-// /opt/rocm/llvm/bin/clang -Wall -Wextra -Wsign-compare -Wconversion -x hip zluda_ptx_impl.cpp -nogpulib -O3 -mno-wavefrontsize64 -o zluda_ptx_impl.bc -emit-llvm -c --offload-device-only --offload-arch=gfx1010 && /opt/rocm/llvm/bin/llvm-dis zluda_ptx_impl.bc -o - | sed '/@llvm.used/d' | sed '/wchar_size/d' | sed '/llvm.module.flags/d' | sed 's/define hidden/define linkonce_odr/g' | sed 's/\"target-cpu\"=\"gfx1010\"//g' | sed -E 's/\"target-features\"=\"[^\"]+\"//g' | sed 's/ nneg / /g' | sed 's/ disjoint / /g' | llvm-as-17 - -o zluda_ptx_impl.bc && /opt/rocm/llvm/bin/llvm-dis zluda_ptx_impl.bc
+// Every time this file changes it must te rebuilt, you need `rocm-llvm-dev` and `llvm-17`
+// `fdenormal-fp-math=dynamic` is required to make functions eligible for inlining
+// /opt/rocm/llvm/bin/clang -Xclang -fdenormal-fp-math=dynamic -Wall -Wextra -Wsign-compare -Wconversion -x hip zluda_ptx_impl.cpp -nogpulib -O3 -mno-wavefrontsize64 -o zluda_ptx_impl.bc -emit-llvm -c --offload-device-only --offload-arch=gfx1010 && /opt/rocm/llvm/bin/llvm-dis zluda_ptx_impl.bc -o - | sed '/@llvm.used/d' | sed '/wchar_size/d' | sed '/llvm.module.flags/d' | sed 's/define hidden/define linkonce_odr/g' | sed 's/\"target-cpu\"=\"gfx1010\"//g' | sed -E 's/\"target-features\"=\"[^\"]+\"//g' | sed 's/ nneg / /g' | sed 's/ disjoint / /g' | llvm-as-17 - -o zluda_ptx_impl.bc && /opt/rocm/llvm/bin/llvm-dis zluda_ptx_impl.bc
#include <cstddef>
#include <cstdint>
+#include <hip/amd_detail/amd_device_functions.h>
#define FUNC(NAME) __device__ __attribute__((retain)) __zluda_ptx_impl_##NAME
@@ -37,7 +39,7 @@ extern "C"
return (uint32_t)__ockl_get_num_groups(member);
}
- uint32_t __ockl_bfe_u32(uint32_t, uint32_t, uint32_t) __attribute__((device));
+ uint32_t __ockl_bfe_u32(uint32_t, uint32_t, uint32_t) __device__;
uint32_t FUNC(bfe_u32)(uint32_t base, uint32_t pos_32, uint32_t len_32)
{
uint32_t pos = pos_32 & 0xFFU;
@@ -65,7 +67,7 @@ extern "C"
return (base >> pos) & ((1UL << len) - 1UL);
}
- int32_t __ockl_bfe_i32(int32_t, uint32_t, uint32_t) __attribute__((device));
+ int32_t __ockl_bfe_i32(int32_t, uint32_t, uint32_t) __device__;
int32_t FUNC(bfe_s32)(int32_t base, uint32_t pos_32, uint32_t len_32)
{
uint32_t pos = pos_32 & 0xFFU;
@@ -120,7 +122,7 @@ extern "C"
return (base << (64U - pos - len)) >> (64U - len);
}
- uint32_t __ockl_bfm_u32(uint32_t count, uint32_t offset) __attribute__((device));
+ uint32_t __ockl_bfm_u32(uint32_t count, uint32_t offset) __device__;
uint32_t FUNC(bfi_b32)(uint32_t insert, uint32_t base, uint32_t pos_32, uint32_t len_32)
{
uint32_t pos = pos_32 & 0xFFU;
@@ -148,4 +150,20 @@ extern "C"
mask = ((1UL << len) - 1UL) << (pos);
return (~mask & base) | (mask & (insert << pos));
}
+
+ void FUNC(bar_sync)(uint32_t)
+ {
+ __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup");
+ __builtin_amdgcn_s_barrier();
+ }
+
+ void FUNC(__assertfail)(uint64_t message,
+ uint64_t file,
+ uint32_t line,
+ uint64_t function,
+ uint64_t char_size)
+ {
+ (void)char_size;
+ __assert_fail((const char *)message, (const char *)file, line, (const char *)function);
+ }
}
diff --git a/ptx/src/pass/emit_llvm.rs b/ptx/src/pass/emit_llvm.rs
index fa011a3..2d1269d 100644
--- a/ptx/src/pass/emit_llvm.rs
+++ b/ptx/src/pass/emit_llvm.rs
@@ -96,10 +96,6 @@ impl Module {
let memory_buffer = unsafe { LLVMWriteBitcodeToMemoryBuffer(self.get()) };
MemoryBuffer(memory_buffer)
}
-
- fn write_to_stderr(&self) {
- unsafe { LLVMDumpModule(self.get()) };
- }
}
impl Drop for Module {
@@ -183,7 +179,6 @@ pub(super) fn run<'input>(
Directive2::Method(method) => emit_ctx.emit_method(method)?,
}
}
- module.write_to_stderr();
if let Err(err) = module.verify() {
panic!("{:?}", err);
}
@@ -246,6 +241,9 @@ impl<'a, 'input> ModuleEmitContext<'a, 'input> {
.map(|v| get_input_argument_type(self.context, &v.v_type, v.state_space)),
)?;
fn_ = unsafe { LLVMAddFunction(self.module, name.as_ptr(), fn_type) };
+ self.emit_fn_attribute(fn_, "amdgpu-unsafe-fp-atomics", "true");
+ self.emit_fn_attribute(fn_, "uniform-work-group-size", "true");
+ self.emit_fn_attribute(fn_, "no-trapping-math", "true");
}
if let ast::MethodName::Func(name) = func_decl.name {
self.resolver.register(name, fn_);
@@ -404,6 +402,19 @@ impl<'a, 'input> ModuleEmitContext<'a, 'input> {
ptx_parser::ScalarType::BF16x2 => todo!(),
})
}
+
+ fn emit_fn_attribute(&self, llvm_object: LLVMValueRef, key: &str, value: &str) {
+ let attribute = unsafe {
+ LLVMCreateStringAttribute(
+ self.context,
+ key.as_ptr() as _,
+ key.len() as u32,
+ value.as_ptr() as _,
+ value.len() as u32,
+ )
+ };
+ unsafe { LLVMAddAttributeAtIndex(llvm_object, LLVMAttributeFunctionIndex, attribute) };
+ }
}
fn get_input_argument_type(
@@ -529,7 +540,7 @@ impl<'a> MethodEmitContext<'a> {
ast::Instruction::Shl { data, arguments } => self.emit_shl(data, arguments),
ast::Instruction::Ret { data } => Ok(self.emit_ret(data)),
ast::Instruction::Cvta { data, arguments } => self.emit_cvta(data, arguments),
- ast::Instruction::Abs { .. } => todo!(),
+ ast::Instruction::Abs { data, arguments } => self.emit_abs(data, arguments),
ast::Instruction::Mad { data, arguments } => self.emit_mad(data, arguments),
ast::Instruction::Fma { data, arguments } => self.emit_fma(data, arguments),
ast::Instruction::Sub { data, arguments } => self.emit_sub(data, arguments),
@@ -539,7 +550,6 @@ impl<'a> MethodEmitContext<'a> {
ast::Instruction::Sqrt { data, arguments } => self.emit_sqrt(data, arguments),
ast::Instruction::Rsqrt { data, arguments } => self.emit_rsqrt(data, arguments),
ast::Instruction::Selp { data, arguments } => self.emit_selp(data, arguments),
- ast::Instruction::Bar { .. } => todo!(),
ast::Instruction::Atom { data, arguments } => self.emit_atom(data, arguments),
ast::Instruction::AtomCas { data, arguments } => self.emit_atom_cas(data, arguments),
ast::Instruction::Div { data, arguments } => self.emit_div(data, arguments),
@@ -559,6 +569,7 @@ impl<'a> MethodEmitContext<'a> {
ast::Instruction::Trap {} => todo!(),
// replaced by a function call
ast::Instruction::Bfe { .. }
+ | ast::Instruction::Bar { .. }
| ast::Instruction::Bfi { .. }
| ast::Instruction::Activemask { .. } => return Err(error_unreachable()),
}
@@ -1570,8 +1581,12 @@ impl<'a> MethodEmitContext<'a> {
Some(LLVMBuildFPToUI),
)
}
- ptx_parser::CvtMode::FPFromSigned(_) => todo!(),
- ptx_parser::CvtMode::FPFromUnsigned(_) => todo!(),
+ ptx_parser::CvtMode::FPFromSigned(_) => {
+ return self.emit_cvt_int_to_float(data.to, arguments, LLVMBuildSIToFP)
+ }
+ ptx_parser::CvtMode::FPFromUnsigned(_) => {
+ return self.emit_cvt_int_to_float(data.to, arguments, LLVMBuildUIToFP)
+ }
};
let src = self.resolver.value(arguments.src)?;
self.resolver.with_result(arguments.dst, |dst| unsafe {
@@ -1726,6 +1741,25 @@ impl<'a> MethodEmitContext<'a> {
Ok(())
}
+ fn emit_cvt_int_to_float(
+ &mut self,
+ to: ptx_parser::ScalarType,
+ arguments: ptx_parser::CvtArgs<SpirvWord>,
+ llvm_func: unsafe extern "C" fn(
+ arg1: LLVMBuilderRef,
+ Val: LLVMValueRef,
+ DestTy: LLVMTypeRef,
+ Name: *const i8,
+ ) -> LLVMValueRef,
+ ) -> Result<(), TranslateError> {
+ let type_ = get_scalar_type(self.context, to);
+ let src = self.resolver.value(arguments.src)?;
+ self.resolver.with_result(arguments.dst, |dst| unsafe {
+ llvm_func(self.builder, src, type_, dst)
+ });
+ Ok(())
+ }
+
fn emit_rsqrt(
&mut self,
data: ptx_parser::TypeFtz,
@@ -1994,7 +2028,7 @@ impl<'a> MethodEmitContext<'a> {
ptx_parser::MinMaxDetails::Float(ptx_parser::MinMaxFloat { nan: true, .. }) => {
return Err(error_todo())
}
- ptx_parser::MinMaxDetails::Float(ptx_parser::MinMaxFloat { .. }) => "llvm.maxnum",
+ ptx_parser::MinMaxDetails::Float(ptx_parser::MinMaxFloat { .. }) => "llvm.minnum",
};
let intrinsic = format!("{}.{}\0", llvm_prefix, LLVMTypeDisplay(data.type_()));
let llvm_type = get_scalar_type(self.context, data.type_());
@@ -2021,7 +2055,7 @@ impl<'a> MethodEmitContext<'a> {
ptx_parser::MinMaxDetails::Float(ptx_parser::MinMaxFloat { nan: true, .. }) => {
return Err(error_todo())
}
- ptx_parser::MinMaxDetails::Float(ptx_parser::MinMaxFloat { .. }) => "llvm.minnum",
+ ptx_parser::MinMaxDetails::Float(ptx_parser::MinMaxFloat { .. }) => "llvm.maxnum",
};
let intrinsic = format!("{}.{}\0", llvm_prefix, LLVMTypeDisplay(data.type_()));
let llvm_type = get_scalar_type(self.context, data.type_());
@@ -2149,6 +2183,30 @@ impl<'a> MethodEmitContext<'a> {
Ok(())
}
+ fn emit_abs(
+ &mut self,
+ data: ast::TypeFtz,
+ arguments: ptx_parser::AbsArgs<SpirvWord>,
+ ) -> Result<(), TranslateError> {
+ let llvm_type = get_scalar_type(self.context, data.type_);
+ let src = self.resolver.value(arguments.src)?;
+ let (prefix, intrinsic_arguments) = if data.type_.kind() == ast::ScalarKind::Float {
+ ("llvm.fabs", vec![(src, llvm_type)])
+ } else {
+ let pred = get_scalar_type(self.context, ast::ScalarType::Pred);
+ let zero = unsafe { LLVMConstInt(pred, 0, 0) };
+ ("llvm.abs", vec![(src, llvm_type), (zero, pred)])
+ };
+ let llvm_intrinsic = format!("{}.{}\0", prefix, LLVMTypeDisplay(data.type_));
+ self.emit_intrinsic(
+ unsafe { CStr::from_bytes_with_nul_unchecked(llvm_intrinsic.as_bytes()) },
+ Some(arguments.dst),
+ &data.type_.into(),
+ intrinsic_arguments,
+ )?;
+ Ok(())
+ }
+
/*
// Currently unused, LLVM 18 (ROCm 6.2) does not support `llvm.set.rounding`
// Should be available in LLVM 19
diff --git a/ptx/src/pass/insert_explicit_load_store.rs b/ptx/src/pass/insert_explicit_load_store.rs
index 60c4a14..702f733 100644
--- a/ptx/src/pass/insert_explicit_load_store.rs
+++ b/ptx/src/pass/insert_explicit_load_store.rs
@@ -122,6 +122,13 @@ fn run_statement<'a, 'input>(
result.push(Statement::Instruction(instruction));
result.extend(visitor.post.drain(..).map(Statement::Instruction));
}
+ Statement::PtrAccess(ptr_access) => {
+ let statement = Statement::PtrAccess(visitor.visit_ptr_access(ptr_access)?);
+ let statement = statement.visit_map(visitor)?;
+ result.extend(visitor.pre.drain(..).map(Statement::Instruction));
+ result.push(statement);
+ result.extend(visitor.post.drain(..).map(Statement::Instruction));
+ }
s => {
let new_statement = s.visit_map(visitor)?;
result.extend(visitor.pre.drain(..).map(Statement::Instruction));
@@ -259,6 +266,41 @@ impl<'a, 'input> InsertMemSSAVisitor<'a, 'input> {
Ok(ast::Instruction::Ld { data, arguments })
}
+ fn visit_ptr_access(
+ &mut self,
+ ptr_access: PtrAccess<SpirvWord>,
+ ) -> Result<PtrAccess<SpirvWord>, TranslateError> {
+ let (old_space, new_space, name) = match self.variables.get(&ptr_access.ptr_src) {
+ Some(RemapAction::LDStSpaceChange {
+ old_space,
+ new_space,
+ name,
+ }) => (*old_space, *new_space, *name),
+ Some(RemapAction::PreLdPostSt { .. }) | None => return Ok(ptr_access),
+ };
+ if ptr_access.state_space != old_space {
+ return Err(error_mismatched_type());
+ }
+ // Propagate space changes in dst
+ let new_dst = self
+ .resolver
+ .register_unnamed(Some((ptr_access.underlying_type.clone(), new_space)));
+ self.variables.insert(
+ ptr_access.dst,
+ RemapAction::LDStSpaceChange {
+ old_space,
+ new_space,
+ name: new_dst,
+ },
+ );
+ Ok(PtrAccess {
+ ptr_src: name,
+ dst: new_dst,
+ state_space: new_space,
+ ..ptr_access
+ })
+ }
+
fn visit_variable(&mut self, var: &mut ast::Variable<SpirvWord>) -> Result<(), TranslateError> {
let old_space = match var.state_space {
space @ (ptx_parser::StateSpace::Reg | ptx_parser::StateSpace::Param) => space,
diff --git a/ptx/src/pass/mod.rs b/ptx/src/pass/mod.rs
index ef131b4..c32cc39 100644
--- a/ptx/src/pass/mod.rs
+++ b/ptx/src/pass/mod.rs
@@ -22,6 +22,7 @@ mod normalize_identifiers2;
mod normalize_predicates2;
mod replace_instructions_with_function_calls;
mod resolve_function_pointers;
+mod replace_known_functions;
static ZLUDA_PTX_IMPL: &'static [u8] = include_bytes!("../../lib/zluda_ptx_impl.bc");
const ZLUDA_PTX_PREFIX: &'static str = "__zluda_ptx_impl_";
@@ -42,9 +43,10 @@ pub fn to_llvm_module<'input>(ast: ast::Module<'input>) -> Result<Module, Transl
let mut scoped_resolver = ScopedResolver::new(&mut flat_resolver);
let sreg_map = SpecialRegistersMap2::new(&mut scoped_resolver)?;
let directives = normalize_identifiers2::run(&mut scoped_resolver, ast.directives)?;
+ let directives = replace_known_functions::run(&flat_resolver, directives);
let directives = normalize_predicates2::run(&mut flat_resolver, directives)?;
let directives = resolve_function_pointers::run(directives)?;
- let directives = fix_special_registers2::run(&mut flat_resolver, &sreg_map, directives)?;
+ let directives: Vec<Directive2<'_, ptx_parser::Instruction<ptx_parser::ParsedOperand<SpirvWord>>, ptx_parser::ParsedOperand<SpirvWord>>> = fix_special_registers2::run(&mut flat_resolver, &sreg_map, directives)?;
let directives = expand_operands::run(&mut flat_resolver, directives)?;
let directives = deparamize_functions::run(&mut flat_resolver, directives)?;
let directives = insert_explicit_load_store::run(&mut flat_resolver, directives)?;
diff --git a/ptx/src/pass/replace_instructions_with_function_calls.rs b/ptx/src/pass/replace_instructions_with_function_calls.rs
index 70d77d3..668cc21 100644
--- a/ptx/src/pass/replace_instructions_with_function_calls.rs
+++ b/ptx/src/pass/replace_instructions_with_function_calls.rs
@@ -104,6 +104,9 @@ fn run_instruction<'input>(
let name = ["bfi_", scalar_to_ptx_name(data)].concat();
to_call(resolver, fn_declarations, name.into(), i)?
}
+ i @ ptx_parser::Instruction::Bar { .. } => {
+ to_call(resolver, fn_declarations, "bar_sync".into(), i)?
+ }
i => i,
})
}
diff --git a/ptx/src/pass/replace_known_functions.rs b/ptx/src/pass/replace_known_functions.rs
new file mode 100644
index 0000000..56bb7e6
--- /dev/null
+++ b/ptx/src/pass/replace_known_functions.rs
@@ -0,0 +1,38 @@
+use super::{GlobalStringIdentResolver2, NormalizedDirective2, SpirvWord};
+
+pub(crate) fn run<'input>(
+ resolver: &GlobalStringIdentResolver2<'input>,
+ mut directives: Vec<NormalizedDirective2<'input>>,
+) -> Vec<NormalizedDirective2<'input>> {
+ for directive in directives.iter_mut() {
+ match directive {
+ NormalizedDirective2::Method(func) => {
+ func.import_as =
+ replace_with_ptx_impl(resolver, &func.func_decl.name, func.import_as.take());
+ }
+ _ => {}
+ }
+ }
+ directives
+}
+
+fn replace_with_ptx_impl<'input>(
+ resolver: &GlobalStringIdentResolver2<'input>,
+ fn_name: &ptx_parser::MethodName<'input, SpirvWord>,
+ name: Option<String>,
+) -> Option<String> {
+ let known_names = ["__assertfail"];
+ match name {
+ Some(name) if known_names.contains(&&*name) => Some(format!("__zluda_ptx_impl_{}", name)),
+ Some(name) => Some(name),
+ None => match fn_name {
+ ptx_parser::MethodName::Func(name) => match resolver.ident_map.get(name) {
+ Some(super::IdentEntry {
+ name: Some(name), ..
+ }) => Some(format!("__zluda_ptx_impl_{}", name)),
+ _ => None,
+ },
+ ptx_parser::MethodName::Kernel(..) => None,
+ },
+ }
+}
diff --git a/ptx/src/test/spirv_run/mod.rs b/ptx/src/test/spirv_run/mod.rs
index f4b7921..e4171cd 100644
--- a/ptx/src/test/spirv_run/mod.rs
+++ b/ptx/src/test/spirv_run/mod.rs
@@ -298,7 +298,7 @@ fn run_hip<Input: From<u8> + Copy + Debug, Output: From<u8> + Copy + Debug + Def
let mut result = vec![0u8.into(); output.len()];
{
let dev = 0;
- let mut stream = ptr::null_mut();
+ let mut stream = unsafe { mem::zeroed() };
unsafe { hipStreamCreate(&mut stream) }.unwrap();
let mut dev_props = unsafe { mem::zeroed() };
unsafe { hipGetDevicePropertiesR0600(&mut dev_props, dev) }.unwrap();
@@ -308,9 +308,9 @@ fn run_hip<Input: From<u8> + Copy + Debug, Output: From<u8> + Copy + Debug + Def
module.linked_bitcode(),
)
.unwrap();
- let mut module = ptr::null_mut();
+ let mut module = unsafe { mem::zeroed() };
unsafe { hipModuleLoadData(&mut module, elf_module.as_ptr() as _) }.unwrap();
- let mut kernel = ptr::null_mut();
+ let mut kernel = unsafe { mem::zeroed() };
unsafe { hipModuleGetFunction(&mut kernel, module, name.as_ptr()) }.unwrap();
let mut inp_b = ptr::null_mut();
unsafe { hipMalloc(&mut inp_b, input.len() * mem::size_of::<Input>()) }.unwrap();
diff --git a/zluda/src/impl/context.rs b/zluda/src/impl/context.rs
index 973febc..e812e85 100644
--- a/zluda/src/impl/context.rs
+++ b/zluda/src/impl/context.rs
@@ -1,5 +1,5 @@
use super::{driver, FromCuda, ZludaObject};
-use cuda_types::*;
+use cuda_types::cuda::*;
use hip_runtime_sys::*;
use rustc_hash::FxHashSet;
use std::{cell::RefCell, ptr, sync::Mutex};
diff --git a/zluda/src/impl/device.rs b/zluda/src/impl/device.rs
index 8836c1e..13bce63 100644
--- a/zluda/src/impl/device.rs
+++ b/zluda/src/impl/device.rs
@@ -1,4 +1,4 @@
-use cuda_types::*;
+use cuda_types::cuda::*;
use hip_runtime_sys::*;
use std::{mem, ptr};
@@ -70,6 +70,16 @@ pub(crate) fn get_attribute(
attrib: CUdevice_attribute,
dev_idx: hipDevice_t,
) -> hipError_t {
+ fn get_device_prop(
+ pi: &mut i32,
+ dev_idx: hipDevice_t,
+ f: impl FnOnce(&hipDeviceProp_tR0600) -> i32,
+ ) -> hipError_t {
+ let mut props = unsafe { mem::zeroed() };
+ unsafe { hipGetDevicePropertiesR0600(&mut props, dev_idx)? };
+ *pi = f(&props);
+ Ok(())
+ }
match attrib {
CUdevice_attribute::CU_DEVICE_ATTRIBUTE_WARP_SIZE => {
*pi = 32;
@@ -79,6 +89,110 @@ pub(crate) fn get_attribute(
*pi = 0;
return Ok(());
}
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture2DLayered[0])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture2DLayered[1])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture2DLayered[2])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture1DLayered[0])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture1DLayered[1])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER => {
+ return get_device_prop(pi, dev_idx, |props| {
+ (props.maxTexture2DGather[0] > 0 && props.maxTexture2DGather[1] > 0) as i32
+ })
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture2DGather[0])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture2DGather[1])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture3DAlt[0])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture3DAlt[1])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture3DAlt[2])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTextureCubemap)
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTextureCubemapLayered[0])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTextureCubemapLayered[1])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurface1D)
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurface2D[0])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurface2D[1])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurface3D[0])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurface3D[1])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurface3D[2])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurface1DLayered[0])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurface1DLayered[1])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurface2DLayered[0])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurface2DLayered[1])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurface2DLayered[2])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurfaceCubemap)
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurfaceCubemapLayered[0])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS => {
+ return get_device_prop(pi, dev_idx, |props| props.maxSurfaceCubemapLayered[1])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture1DLinear)
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture2DLinear[0])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture2DLinear[1])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture2DLinear[2])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture2DMipmap[0])
+ }
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture2DMipmap[1])
+ }
CUdevice_attribute::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR => {
*pi = COMPUTE_CAPABILITY_MAJOR;
return Ok(());
@@ -87,6 +201,9 @@ pub(crate) fn get_attribute(
*pi = COMPUTE_CAPABILITY_MINOR;
return Ok(());
}
+ CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH => {
+ return get_device_prop(pi, dev_idx, |props| props.maxTexture1DMipmap)
+ }
_ => {}
}
let attrib = remap_attribute! {
@@ -260,7 +377,7 @@ pub(crate) fn get_name(
name: *mut ::core::ffi::c_char,
len: ::core::ffi::c_int,
dev: hipDevice_t,
-) -> cuda_types::CUresult {
+) -> CUresult {
unsafe { hipDeviceGetName(name, len, dev) }?;
let len = len as usize;
let buffer = unsafe { std::slice::from_raw_parts(name, len) };
@@ -287,7 +404,7 @@ pub(crate) fn total_mem_v2(bytes: *mut usize, dev: hipDevice_t) -> hipError_t {
unsafe { hipDeviceTotalMem(bytes, dev) }
}
-pub(crate) fn get_properties(prop: &mut cuda_types::CUdevprop, dev: hipDevice_t) -> hipError_t {
+pub(crate) fn get_properties(prop: &mut CUdevprop, dev: hipDevice_t) -> hipError_t {
let mut hip_props = unsafe { mem::zeroed() };
unsafe { hipGetDevicePropertiesR0600(&mut hip_props, dev) }?;
prop.maxThreadsPerBlock = hip_props.maxThreadsPerBlock;
diff --git a/zluda/src/impl/driver.rs b/zluda/src/impl/driver.rs
index 7ff2f54..5b15afb 100644
--- a/zluda/src/impl/driver.rs
+++ b/zluda/src/impl/driver.rs
@@ -1,4 +1,4 @@
-use cuda_types::*;
+use cuda_types::cuda::*;
use hip_runtime_sys::*;
use std::{
ffi::{CStr, CString},
@@ -74,6 +74,6 @@ pub(crate) fn init(flags: ::core::ffi::c_uint) -> CUresult {
}
pub(crate) fn get_version(version: &mut ::core::ffi::c_int) -> CUresult {
- *version = cuda_types::CUDA_VERSION as i32;
+ *version = cuda_types::cuda::CUDA_VERSION as i32;
Ok(())
}
diff --git a/zluda/src/impl/memory.rs b/zluda/src/impl/memory.rs
index 3843776..18e58e7 100644
--- a/zluda/src/impl/memory.rs
+++ b/zluda/src/impl/memory.rs
@@ -1,4 +1,5 @@
use hip_runtime_sys::*;
+use std::mem;
pub(crate) fn alloc_v2(dptr: *mut hipDeviceptr_t, bytesize: usize) -> hipError_t {
unsafe { hipMalloc(dptr.cast(), bytesize) }?;
@@ -33,3 +34,11 @@ pub(crate) fn get_address_range_v2(
) -> hipError_t {
unsafe { hipMemGetAddressRange(pbase, psize, dptr) }
}
+
+pub(crate) fn set_d32_v2(dst: hipDeviceptr_t, ui: ::core::ffi::c_uint, n: usize) -> hipError_t {
+ unsafe { hipMemsetD32(dst, mem::transmute(ui), n) }
+}
+
+pub(crate) fn set_d8_v2(dst: hipDeviceptr_t, value: ::core::ffi::c_uchar, n: usize) -> hipError_t {
+ unsafe { hipMemsetD8(dst, value, n) }
+}
diff --git a/zluda/src/impl/mod.rs b/zluda/src/impl/mod.rs
index 766b4a5..4d8bc83 100644
--- a/zluda/src/impl/mod.rs
+++ b/zluda/src/impl/mod.rs
@@ -1,4 +1,4 @@
-use cuda_types::*;
+use cuda_types::cuda::*;
use hip_runtime_sys::*;
use std::mem::{self, ManuallyDrop, MaybeUninit};
@@ -107,10 +107,11 @@ from_cuda_nop!(
*const ::core::ffi::c_char,
*mut ::core::ffi::c_void,
*mut *mut ::core::ffi::c_void,
+ u8,
i32,
u32,
usize,
- cuda_types::CUdevprop,
+ cuda_types::cuda::CUdevprop,
CUdevice_attribute
);
from_cuda_transmute!(
@@ -136,7 +137,7 @@ impl<'a> FromCuda<'a, CUlimit> for hipLimit_t {
pub(crate) trait ZludaObject: Sized + Send + Sync {
const COOKIE: usize;
- const LIVENESS_FAIL: CUerror = cuda_types::CUerror::INVALID_VALUE;
+ const LIVENESS_FAIL: CUerror = cuda_types::cuda::CUerror::INVALID_VALUE;
type CudaHandle: Sized;
diff --git a/zluda/src/impl/module.rs b/zluda/src/impl/module.rs
index 8b19c1b..b469a89 100644
--- a/zluda/src/impl/module.rs
+++ b/zluda/src/impl/module.rs
@@ -1,5 +1,5 @@
use super::ZludaObject;
-use cuda_types::*;
+use cuda_types::cuda::*;
use hip_runtime_sys::*;
use std::{ffi::CStr, mem};
diff --git a/zluda/src/impl/pointer.rs b/zluda/src/impl/pointer.rs
index 6b458a0..e620bab 100644
--- a/zluda/src/impl/pointer.rs
+++ b/zluda/src/impl/pointer.rs
@@ -1,4 +1,4 @@
-use cuda_types::*;
+use cuda_types::cuda::*;
use hip_runtime_sys::*;
use std::{ffi::c_void, ptr};
diff --git a/zluda/src/lib.rs b/zluda/src/lib.rs
index 1568f47..e058bd7 100644
--- a/zluda/src/lib.rs
+++ b/zluda/src/lib.rs
@@ -1,7 +1,7 @@
pub(crate) mod r#impl;
macro_rules! unimplemented {
- ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path;)*) => {
+ ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:ty;)*) => {
$(
#[cfg_attr(not(test), no_mangle)]
#[allow(improper_ctypes)]
@@ -14,7 +14,7 @@ macro_rules! unimplemented {
}
macro_rules! implemented {
- ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path;)*) => {
+ ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:ty;)*) => {
$(
#[cfg_attr(not(test), no_mangle)]
#[allow(improper_ctypes)]
@@ -28,7 +28,7 @@ macro_rules! implemented {
}
macro_rules! implemented_in_function {
- ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path;)*) => {
+ ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:ty;)*) => {
$(
#[cfg_attr(not(test), no_mangle)]
#[allow(improper_ctypes)]
@@ -72,6 +72,8 @@ cuda_base::cuda_function_declarations!(
cuModuleUnload,
cuPointerGetAttribute,
cuMemGetAddressRange_v2,
+ cuMemsetD32_v2,
+ cuMemsetD8_v2
],
implemented_in_function <= [
cuLaunchKernel,
diff --git a/zluda_bindgen/src/main.rs b/zluda_bindgen/src/main.rs
index 7332254..bfa9d49 100644
--- a/zluda_bindgen/src/main.rs
+++ b/zluda_bindgen/src/main.rs
@@ -5,7 +5,7 @@ use std::{collections::hash_map, fs::File, io::Write, iter, path::PathBuf, str::
use syn::{
parse_quote, punctuated::Punctuated, visit_mut::VisitMut, Abi, Fields, FieldsUnnamed, FnArg,
ForeignItem, ForeignItemFn, Ident, Item, ItemConst, ItemForeignMod, ItemUse, LitStr, Path,
- PathArguments, Signature, Type, TypePath, UseTree,
+ PathArguments, Signature, Type, TypePath, UseTree, PathSegment
};
fn main() {
@@ -14,6 +14,11 @@ fn main() {
&crate_root,
&["..", "ext", "hip_runtime-sys", "src", "lib.rs"],
);
+ generate_ml(&crate_root);
+ generate_cuda(&crate_root);
+}
+
+fn generate_cuda(crate_root: &PathBuf) {
let cuda_header = bindgen::Builder::default()
.use_core()
.rust_target(bindgen::RustTarget::Stable_1_77)
@@ -42,16 +47,91 @@ fn main() {
.unwrap()
.to_string();
let module: syn::File = syn::parse_str(&cuda_header).unwrap();
- generate_functions(&crate_root, &["..", "cuda_base", "src", "cuda.rs"], &module);
- generate_types(&crate_root, &["..", "cuda_types", "src", "lib.rs"], &module);
+ generate_functions(
+ &crate_root,
+ "cuda",
+ &["..", "cuda_base", "src", "cuda.rs"],
+ &module,
+ );
+ generate_types_cuda(
+ &crate_root,
+ &["..", "cuda_types", "src", "cuda.rs"],
+ &module,
+ );
generate_display(
&crate_root,
&["..", "zluda_dump", "src", "format_generated.rs"],
- "cuda_types",
+ &["cuda_types", "cuda"],
&module,
)
}
+fn generate_ml(crate_root: &PathBuf) {
+ let ml_header = bindgen::Builder::default()
+ .use_core()
+ .rust_target(bindgen::RustTarget::Stable_1_77)
+ .layout_tests(false)
+ .default_enum_style(bindgen::EnumVariation::NewType {
+ is_bitfield: false,
+ is_global: false,
+ })
+ .derive_hash(true)
+ .derive_eq(true)
+ .header("/usr/local/cuda/include/nvml.h")
+ .allowlist_type("^nvml.*")
+ .allowlist_function("^nvml.*")
+ .allowlist_var("^NVML.*")
+ .must_use_type("nvmlReturn_t")
+ .constified_enum("nvmlReturn_enum")
+ .generate()
+ .unwrap()
+ .to_string();
+ let mut module: syn::File = syn::parse_str(&ml_header).unwrap();
+ let mut converter = ConvertIntoRustResult {
+ type_: "nvmlReturn_t",
+ underlying_type: "nvmlReturn_enum",
+ new_error_type: "nvmlError_t",
+ error_prefix: ("NVML_ERROR_", "ERROR_"),
+ success: ("NVML_SUCCESS", "SUCCESS"),
+ constants: Vec::new(),
+ };
+ module.items = module
+ .items
+ .into_iter()
+ .filter_map(|item| match item {
+ Item::Const(const_) => converter.get_const(const_).map(Item::Const),
+ Item::Use(use_) => converter.get_use(use_).map(Item::Use),
+ Item::Type(type_) => converter.get_type(type_).map(Item::Type),
+ item => Some(item),
+ })
+ .collect::<Vec<_>>();
+ converter.flush(&mut module.items);
+ generate_functions(
+ &crate_root,
+ "nvml",
+ &["..", "cuda_base", "src", "nvml.rs"],
+ &module,
+ );
+ generate_types(
+ &crate_root,
+ &["..", "cuda_types", "src", "nvml.rs"],
+ &module,
+ );
+}
+
+fn generate_types(crate_root: &PathBuf, path: &[&str], module: &syn::File) {
+ let non_fn = module.items.iter().filter_map(|item| match item {
+ Item::ForeignMod(_) => None,
+ _ => Some(item),
+ });
+ let module: syn::File = parse_quote! {
+ #(#non_fn)*
+ };
+ let mut output = crate_root.clone();
+ output.extend(path);
+ write_rust_to_file(output, &prettyplease::unparse(&module))
+}
+
fn generate_hip_runtime(output: &PathBuf, path: &[&str]) {
let hiprt_header = bindgen::Builder::default()
.use_core()
@@ -125,7 +205,7 @@ fn add_send_sync(items: &mut Vec<Item>, arg: &[&str]) {
}
}
-fn generate_functions(output: &PathBuf, path: &[&str], module: &syn::File) {
+fn generate_functions(output: &PathBuf, submodule: &str, path: &[&str], module: &syn::File) {
let fns_ = module.items.iter().filter_map(|item| match item {
Item::ForeignMod(extern_) => match &*extern_.items {
[ForeignItem::Fn(fn_)] => Some(fn_),
@@ -138,7 +218,8 @@ fn generate_functions(output: &PathBuf, path: &[&str], module: &syn::File) {
#(#fns_)*
}
};
- syn::visit_mut::visit_file_mut(&mut PrependCudaPath, &mut module);
+ let submodule = Ident::new(submodule, Span::call_site());
+ syn::visit_mut::visit_file_mut(&mut PrependCudaPath { module: submodule }, &mut module);
syn::visit_mut::visit_file_mut(&mut RemoveVisibility, &mut module);
syn::visit_mut::visit_file_mut(&mut ExplicitReturnType, &mut module);
let mut output = output.clone();
@@ -146,7 +227,7 @@ fn generate_functions(output: &PathBuf, path: &[&str], module: &syn::File) {
write_rust_to_file(output, &prettyplease::unparse(&module))
}
-fn generate_types(output: &PathBuf, path: &[&str], module: &syn::File) {
+fn generate_types_cuda(output: &PathBuf, path: &[&str], module: &syn::File) {
let mut module = module.clone();
let mut converter = ConvertIntoRustResult {
type_: "CUresult",
@@ -314,7 +395,9 @@ impl VisitMut for FixAbi {
}
}
-struct PrependCudaPath;
+struct PrependCudaPath {
+ module: Ident,
+}
impl VisitMut for PrependCudaPath {
fn visit_type_path_mut(&mut self, type_: &mut TypePath) {
@@ -322,7 +405,8 @@ impl VisitMut for PrependCudaPath {
match &*type_.path.segments[0].ident.to_string() {
"usize" | "f64" | "f32" => {}
_ => {
- *type_ = parse_quote! { cuda_types :: #type_ };
+ let module = &self.module;
+ *type_ = parse_quote! { cuda_types :: #module :: #type_ };
}
}
}
@@ -350,7 +434,7 @@ impl VisitMut for ExplicitReturnType {
fn generate_display(
output: &PathBuf,
path: &[&str],
- types_crate: &'static str,
+ types_crate: &[&'static str],
module: &syn::File,
) {
let ignore_types = [
@@ -419,7 +503,7 @@ fn generate_display(
}
struct DeriveDisplayState<'a> {
- types_crate: &'static str,
+ types_crate: Path,
ignore_types: FxHashSet<Ident>,
ignore_fns: FxHashSet<Ident>,
enums: FxHashMap<&'a Ident, Vec<&'a Ident>>,
@@ -430,12 +514,22 @@ struct DeriveDisplayState<'a> {
impl<'a> DeriveDisplayState<'a> {
fn new(
ignore_types: &[&'static str],
- types_crate: &'static str,
+ types_crate: &[&'static str],
ignore_fns: &[&'static str],
count_selectors: &[(&'static str, usize, usize)],
) -> Self {
+ let segments = types_crate
+ .iter()
+ .map(|seg| PathSegment {
+ ident: Ident::new(seg, Span::call_site()),
+ arguments: PathArguments::None,
+ })
+ .collect::<Punctuated<_, _>>();
DeriveDisplayState {
- types_crate,
+ types_crate: Path {
+ leading_colon: None,
+ segments,
+ },
ignore_types: ignore_types
.into_iter()
.map(|x| Ident::new(x, Span::call_site()))
@@ -469,8 +563,11 @@ fn cuda_derive_display_trait_for_item<'a>(
state: &mut DeriveDisplayState<'a>,
item: &'a Item,
) -> Option<syn::Item> {
- let path_prefix = Path::from(Ident::new(state.types_crate, Span::call_site()));
+ let path_prefix = & state.types_crate;
let path_prefix_iter = iter::repeat(&path_prefix);
+ let mut prepend_path = PrependCudaPath {
+ module: Ident::new("cuda", Span::call_site()),
+ };
match item {
Item::Const(const_) => {
if const_.ty.to_token_stream().to_string() == "cudaError_enum" {
@@ -490,7 +587,7 @@ fn cuda_derive_display_trait_for_item<'a>(
.iter()
.map(|fn_arg| {
let mut fn_arg = fn_arg.clone();
- syn::visit_mut::visit_fn_arg_mut(&mut PrependCudaPath, &mut fn_arg);
+ syn::visit_mut::visit_fn_arg_mut(&mut prepend_path, &mut fn_arg);
fn_arg
})
.collect::<Vec<_>>();
@@ -686,7 +783,7 @@ fn curesult_display_trait(derive_state: &DeriveDisplayState) -> syn::Item {
})
});
parse_quote! {
- impl crate::format::CudaDisplay for cuda_types::CUresult {
+ impl crate::format::CudaDisplay for cuda_types::cuda::CUresult {
fn write(&self, _fn_name: &'static str, _index: usize, writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> {
match self {
Ok(()) => writer.write_all(b"CUDA_SUCCESS"),
diff --git a/zluda_dump/src/dark_api.rs b/zluda_dump/src/dark_api.rs
index 623f96f..58a0ac2 100644
--- a/zluda_dump/src/dark_api.rs
+++ b/zluda_dump/src/dark_api.rs
@@ -1,7 +1,7 @@
use crate::format;
use crate::{log, os, trace::StateTracker};
use crate::{log::UInt, GlobalDelayedState};
-use cuda_types::{CUmodule, CUresult, CUuuid};
+use cuda_types::cuda::*;
use std::borrow::Cow;
use std::hash::Hash;
use std::{
diff --git a/zluda_dump/src/format.rs b/zluda_dump/src/format.rs
index c1aac61..776c493 100644
--- a/zluda_dump/src/format.rs
+++ b/zluda_dump/src/format.rs
@@ -1,4 +1,4 @@
-use cuda_types::{CUGLDeviceList, CUdevice};
+use cuda_types::cuda::*;
use std::{
ffi::{c_void, CStr},
fmt::LowerHex,
@@ -14,7 +14,7 @@ pub(crate) trait CudaDisplay {
) -> std::io::Result<()>;
}
-impl CudaDisplay for cuda_types::CUuuid {
+impl CudaDisplay for CUuuid {
fn write(
&self,
_fn_name: &'static str,
@@ -26,7 +26,7 @@ impl CudaDisplay for cuda_types::CUuuid {
}
}
-impl CudaDisplay for cuda_types::CUdeviceptr_v1 {
+impl CudaDisplay for CUdeviceptr_v1 {
fn write(
&self,
_fn_name: &'static str,
@@ -125,7 +125,7 @@ pub fn write_handle<T: LowerHex>(
Ok(())
}
-impl CudaDisplay for cuda_types::CUipcMemHandle {
+impl CudaDisplay for CUipcMemHandle {
fn write(
&self,
_fn_name: &'static str,
@@ -136,7 +136,7 @@ impl CudaDisplay for cuda_types::CUipcMemHandle {
}
}
-impl CudaDisplay for cuda_types::CUipcEventHandle {
+impl CudaDisplay for CUipcEventHandle {
fn write(
&self,
_fn_name: &'static str,
@@ -147,7 +147,7 @@ impl CudaDisplay for cuda_types::CUipcEventHandle {
}
}
-impl CudaDisplay for cuda_types::CUmemPoolPtrExportData_v1 {
+impl CudaDisplay for CUmemPoolPtrExportData_v1 {
fn write(
&self,
_fn_name: &'static str,
@@ -223,7 +223,7 @@ impl CudaDisplay for *mut i8 {
}
}
-impl CudaDisplay for cuda_types::CUstreamBatchMemOpParams {
+impl CudaDisplay for CUstreamBatchMemOpParams {
fn write(
&self,
_fn_name: &'static str,
@@ -236,15 +236,15 @@ impl CudaDisplay for cuda_types::CUstreamBatchMemOpParams {
// distinct operations with nominally distinct union variants, but
// in reality they are structurally different, so we take a little
// shortcut here
- cuda_types::CUstreamBatchMemOpType::CU_STREAM_MEM_OP_WAIT_VALUE_32
- | cuda_types::CUstreamBatchMemOpType::CU_STREAM_MEM_OP_WRITE_VALUE_32 => {
+ CUstreamBatchMemOpType::CU_STREAM_MEM_OP_WAIT_VALUE_32
+ | CUstreamBatchMemOpType::CU_STREAM_MEM_OP_WRITE_VALUE_32 => {
write_wait_value(&self.waitValue, writer, false)
}
- cuda_types::CUstreamBatchMemOpType::CU_STREAM_MEM_OP_WAIT_VALUE_64
- | cuda_types::CUstreamBatchMemOpType::CU_STREAM_MEM_OP_WRITE_VALUE_64 => {
+ CUstreamBatchMemOpType::CU_STREAM_MEM_OP_WAIT_VALUE_64
+ | CUstreamBatchMemOpType::CU_STREAM_MEM_OP_WRITE_VALUE_64 => {
write_wait_value(&self.waitValue, writer, true)
}
- cuda_types::CUstreamBatchMemOpType::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES => {
+ CUstreamBatchMemOpType::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES => {
CudaDisplay::write(&self.flushRemoteWrites, "", 0, writer)
}
_ => {
@@ -258,7 +258,7 @@ impl CudaDisplay for cuda_types::CUstreamBatchMemOpParams {
}
pub fn write_wait_value(
- this: &cuda_types::CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st,
+ this: &CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st,
writer: &mut (impl std::io::Write + ?Sized),
is_64_bit: bool,
) -> std::io::Result<()> {
@@ -275,7 +275,7 @@ pub fn write_wait_value(
}
pub fn write_wait_value_32_or_64(
- this: &cuda_types::CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1,
+ this: &CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1,
writer: &mut (impl std::io::Write + ?Sized),
is_64_bit: bool,
) -> std::io::Result<()> {
@@ -288,7 +288,7 @@ pub fn write_wait_value_32_or_64(
}
}
-impl CudaDisplay for cuda_types::CUDA_RESOURCE_DESC_st {
+impl CudaDisplay for CUDA_RESOURCE_DESC_st {
fn write(
&self,
_fn_name: &'static str,
@@ -298,28 +298,28 @@ impl CudaDisplay for cuda_types::CUDA_RESOURCE_DESC_st {
writer.write_all(b"{ resType: ")?;
CudaDisplay::write(&self.resType, "", 0, writer)?;
match self.resType {
- cuda_types::CUresourcetype::CU_RESOURCE_TYPE_ARRAY => {
+ CUresourcetype::CU_RESOURCE_TYPE_ARRAY => {
writer.write_all(b", res: ")?;
CudaDisplay::write(unsafe { &self.res.array }, "", 0, writer)?;
writer.write_all(b", flags: ")?;
CudaDisplay::write(&self.flags, "", 0, writer)?;
writer.write_all(b" }")
}
- cuda_types::CUresourcetype::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY => {
+ CUresourcetype::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY => {
writer.write_all(b", res: ")?;
CudaDisplay::write(unsafe { &self.res.mipmap }, "", 0, writer)?;
writer.write_all(b", flags: ")?;
CudaDisplay::write(&self.flags, "", 0, writer)?;
writer.write_all(b" }")
}
- cuda_types::CUresourcetype::CU_RESOURCE_TYPE_LINEAR => {
+ CUresourcetype::CU_RESOURCE_TYPE_LINEAR => {
writer.write_all(b", res: ")?;
CudaDisplay::write(unsafe { &self.res.linear }, "", 0, writer)?;
writer.write_all(b", flags: ")?;
CudaDisplay::write(&self.flags, "", 0, writer)?;
writer.write_all(b" }")
}
- cuda_types::CUresourcetype::CU_RESOURCE_TYPE_PITCH2D => {
+ CUresourcetype::CU_RESOURCE_TYPE_PITCH2D => {
writer.write_all(b", res: ")?;
CudaDisplay::write(unsafe { &self.res.pitch2D }, "", 0, writer)?;
writer.write_all(b", flags: ")?;
@@ -335,7 +335,7 @@ impl CudaDisplay for cuda_types::CUDA_RESOURCE_DESC_st {
}
}
-impl CudaDisplay for cuda_types::CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st {
+impl CudaDisplay for CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st {
fn write(
&self,
_fn_name: &'static str,
@@ -345,22 +345,22 @@ impl CudaDisplay for cuda_types::CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st {
writer.write_all(b"{ type: ")?;
CudaDisplay::write(&self.type_, "", 0, writer)?;
match self.type_ {
- cuda_types::CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD => {
+ CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD => {
writer.write_all(b", handle: ")?;
CudaDisplay::write(unsafe { &self.handle.fd }, "", 0,writer)?;
}
- cuda_types::CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32
- | cuda_types::CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP
- | cuda_types::CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE
- |cuda_types::CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE => {
+ CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32
+ | CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP
+ | CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE
+ |CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE => {
write_win32_handle(unsafe { self.handle.win32 }, writer)?;
}
- cuda_types::CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT
- | cuda_types::CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT => {
+ CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT
+ | CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT => {
writer.write_all(b", handle: ")?;
CudaDisplay::write(unsafe { &self.handle.win32.handle }, "", 0,writer)?;
}
- cuda_types::CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF => {
+ CUexternalMemoryHandleType::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF => {
writer.write_all(b", handle: ")?;
CudaDisplay::write(unsafe { &self.handle.nvSciBufObject }, "", 0,writer)?;
}
@@ -381,7 +381,7 @@ impl CudaDisplay for cuda_types::CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st {
}
pub fn write_win32_handle(
- win32: cuda_types::CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1,
+ win32: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1,
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
if win32.handle != ptr::null_mut() {
@@ -400,7 +400,7 @@ pub fn write_win32_handle(
Ok(())
}
-impl CudaDisplay for cuda_types::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st {
+impl CudaDisplay for CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st {
fn write(
&self,
_fn_name: &'static str,
@@ -410,22 +410,22 @@ impl CudaDisplay for cuda_types::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st {
writer.write_all(b"{ type: ")?;
CudaDisplay::write(&self.type_, "", 0, writer)?;
match self.type_ {
- cuda_types::CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD => {
+ CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD => {
writer.write_all(b", handle: ")?;
CudaDisplay::write(unsafe { &self.handle.fd }, "", 0,writer)?;
}
- cuda_types::CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32
- | cuda_types::CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE
- | cuda_types::CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE
- | cuda_types::CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX
- | cuda_types::CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT => {
+ CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32
+ | CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE
+ | CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE
+ | CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX
+ | CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT => {
write_win32_handle(unsafe { mem::transmute(self.handle.win32) }, writer)?;
}
- cuda_types::CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT => {
+ CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT => {
writer.write_all(b", handle: ")?;
CudaDisplay::write(unsafe { &self.handle.win32.handle }, "", 0,writer)?;
}
- cuda_types::CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC => {
+ CUexternalSemaphoreHandleType::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC => {
writer.write_all(b", handle: ")?;
CudaDisplay::write(unsafe { &self.handle.nvSciSyncObj }, "", 0,writer)?;
}
@@ -442,7 +442,7 @@ impl CudaDisplay for cuda_types::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st {
}
impl CudaDisplay
- for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_2
+ for CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_2
{
fn write(
&self,
@@ -457,7 +457,7 @@ impl CudaDisplay
}
impl CudaDisplay
- for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_2
+ for CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_2
{
fn write(
&self,
@@ -471,7 +471,7 @@ impl CudaDisplay
}
}
-impl CudaDisplay for cuda_types::CUgraphNodeParams_st {
+impl CudaDisplay for CUgraphNodeParams_st {
fn write(
&self,
_fn_name: &'static str,
@@ -482,7 +482,7 @@ impl CudaDisplay for cuda_types::CUgraphNodeParams_st {
}
}
-impl CudaDisplay for cuda_types::CUlaunchConfig_st {
+impl CudaDisplay for CUlaunchConfig_st {
fn write(
&self,
_fn_name: &'static str,
@@ -493,7 +493,7 @@ impl CudaDisplay for cuda_types::CUlaunchConfig_st {
}
}
-impl CudaDisplay for cuda_types::CUeglFrame_st {
+impl CudaDisplay for CUeglFrame_st {
fn write(
&self,
_fn_name: &'static str,
@@ -504,7 +504,7 @@ impl CudaDisplay for cuda_types::CUeglFrame_st {
}
}
-impl CudaDisplay for cuda_types::CUdevResource_st {
+impl CudaDisplay for CUdevResource_st {
fn write(
&self,
_fn_name: &'static str,
@@ -514,7 +514,7 @@ impl CudaDisplay for cuda_types::CUdevResource_st {
todo!()
}
}
-impl CudaDisplay for cuda_types::CUlaunchAttribute_st {
+impl CudaDisplay for CUlaunchAttribute_st {
fn write(
&self,
_fn_name: &'static str,
@@ -574,7 +574,7 @@ impl<T: CudaDisplay, const N: usize> CudaDisplay for [T; N] {
}
}
-impl CudaDisplay for cuda_types::CUarrayMapInfo_st {
+impl CudaDisplay for CUarrayMapInfo_st {
fn write(
&self,
_fn_name: &'static str,
@@ -585,7 +585,7 @@ impl CudaDisplay for cuda_types::CUarrayMapInfo_st {
}
}
-impl CudaDisplay for cuda_types::CUexecAffinityParam_st {
+impl CudaDisplay for CUexecAffinityParam_st {
fn write(
&self,
_fn_name: &'static str,
@@ -599,9 +599,9 @@ impl CudaDisplay for cuda_types::CUexecAffinityParam_st {
#[allow(non_snake_case)]
pub fn write_cuGraphKernelNodeGetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- attr: cuda_types::CUkernelNodeAttrID,
- value_out: *mut cuda_types::CUkernelNodeAttrValue,
+ hNode: CUgraphNode,
+ attr: CUkernelNodeAttrID,
+ value_out: *mut CUkernelNodeAttrValue,
) -> std::io::Result<()> {
writer.write_all(b"(hNode: ")?;
CudaDisplay::write(&hNode, "cuGraphKernelNodeGetAttribute", 0, writer)?;
@@ -614,9 +614,9 @@ pub fn write_cuGraphKernelNodeGetAttribute(
#[allow(non_snake_case)]
pub fn write_cuGraphKernelNodeSetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- attr: cuda_types::CUkernelNodeAttrID,
- value_out: *const cuda_types::CUkernelNodeAttrValue,
+ hNode: CUgraphNode,
+ attr: CUkernelNodeAttrID,
+ value_out: *const CUkernelNodeAttrValue,
) -> std::io::Result<()> {
write_cuGraphKernelNodeGetAttribute(writer, hNode, attr, value_out as *mut _)
}
@@ -624,9 +624,9 @@ pub fn write_cuGraphKernelNodeSetAttribute(
#[allow(non_snake_case)]
pub fn write_cuStreamGetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- attr: cuda_types::CUstreamAttrID,
- value_out: *mut cuda_types::CUstreamAttrValue,
+ hStream: CUstream,
+ attr: CUstreamAttrID,
+ value_out: *mut CUstreamAttrValue,
) -> std::io::Result<()> {
writer.write_all(b"(hStream: ")?;
CudaDisplay::write(&hStream, "cuStreamGetAttribute", 0, writer)?;
@@ -640,11 +640,11 @@ fn write_launch_attribute(
writer: &mut (impl std::io::Write + ?Sized),
fn_name: &'static str,
index: usize,
- attribute: cuda_types::CUlaunchAttributeID,
- value_out: *mut cuda_types::CUstreamAttrValue,
+ attribute: CUlaunchAttributeID,
+ value_out: *mut CUstreamAttrValue,
) -> std::io::Result<()> {
match attribute {
- cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW => {
+ CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW => {
writer.write_all(b", value_out: ")?;
CudaDisplay::write(
unsafe { &(*value_out).accessPolicyWindow },
@@ -653,47 +653,47 @@ fn write_launch_attribute(
writer,
)
}
- cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_COOPERATIVE => {
+ CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_COOPERATIVE => {
writer.write_all(b", value_out: ")?;
CudaDisplay::write(unsafe { &(*value_out).cooperative }, fn_name, index, writer)
}
- cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY => {
+ CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY => {
writer.write_all(b", value_out: ")?;
CudaDisplay::write(unsafe { &(*value_out).syncPolicy }, fn_name, index, writer)
}
- cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION => {
+ CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION => {
writer.write_all(b", value_out: ")?;
CudaDisplay::write(unsafe { &(*value_out).clusterDim }, fn_name, index, writer)
}
- cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE => {
+ CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE => {
writer.write_all(b", value_out: ")?;
CudaDisplay::write(unsafe { &(*value_out).clusterSchedulingPolicyPreference }, fn_name, index, writer)
}
- cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION => {
+ CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION => {
writer.write_all(b", value_out: ")?;
CudaDisplay::write(unsafe { &(*value_out).programmaticStreamSerializationAllowed }, fn_name, index, writer)
}
- cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT => {
+ CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT => {
writer.write_all(b", value_out: ")?;
CudaDisplay::write(unsafe { &(*value_out).programmaticEvent }, fn_name, index, writer)
}
- cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_PRIORITY => {
+ CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_PRIORITY => {
writer.write_all(b", value_out: ")?;
CudaDisplay::write(unsafe { &(*value_out).priority }, fn_name, index, writer)
}
- cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP => {
+ CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP => {
writer.write_all(b", value_out: ")?;
CudaDisplay::write(unsafe { &(*value_out).memSyncDomainMap }, fn_name, index, writer)
}
- cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN => {
+ CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN => {
writer.write_all(b", value_out: ")?;
CudaDisplay::write(unsafe { &(*value_out).memSyncDomain }, fn_name, index, writer)
}
- cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT => {
+ CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT => {
writer.write_all(b", value_out: ")?;
CudaDisplay::write(unsafe { &(*value_out).launchCompletionEvent }, fn_name, index, writer)
}
- cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE => {
+ CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE => {
writer.write_all(b", value_out: ")?;
CudaDisplay::write(unsafe { &(*value_out).deviceUpdatableKernelNode }, fn_name, index, writer)
}
@@ -704,9 +704,9 @@ fn write_launch_attribute(
#[allow(non_snake_case)]
pub fn write_cuStreamGetAttribute_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- attr: cuda_types::CUstreamAttrID,
- value_out: *mut cuda_types::CUstreamAttrValue,
+ hStream: CUstream,
+ attr: CUstreamAttrID,
+ value_out: *mut CUstreamAttrValue,
) -> std::io::Result<()> {
write_cuStreamGetAttribute(writer, hStream, attr, value_out)
}
@@ -714,9 +714,9 @@ pub fn write_cuStreamGetAttribute_ptsz(
#[allow(non_snake_case)]
pub fn write_cuStreamSetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- attr: cuda_types::CUstreamAttrID,
- value_out: *const cuda_types::CUstreamAttrValue,
+ hStream: CUstream,
+ attr: CUstreamAttrID,
+ value_out: *const CUstreamAttrValue,
) -> std::io::Result<()> {
write_cuStreamGetAttribute(writer, hStream, attr, value_out as *mut _)
}
@@ -724,9 +724,9 @@ pub fn write_cuStreamSetAttribute(
#[allow(non_snake_case)]
pub fn write_cuStreamSetAttribute_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- attr: cuda_types::CUstreamAttrID,
- value_out: *const cuda_types::CUstreamAttrValue,
+ hStream: CUstream,
+ attr: CUstreamAttrID,
+ value_out: *const CUstreamAttrValue,
) -> std::io::Result<()> {
write_cuStreamSetAttribute(writer, hStream, attr, value_out)
}
diff --git a/zluda_dump/src/format_generated.rs b/zluda_dump/src/format_generated.rs
index 742fb57..ed5eb49 100644
--- a/zluda_dump/src/format_generated.rs
+++ b/zluda_dump/src/format_generated.rs
@@ -1,7 +1,7 @@
// Generated automatically by zluda_bindgen
// DO NOT EDIT MANUALLY
#![allow(warnings)]
-impl crate::format::CudaDisplay for cuda_types::CUdeviceptr_v2 {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUdeviceptr_v2 {
fn write(
&self,
_fn_name: &'static str,
@@ -11,7 +11,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdeviceptr_v2 {
write!(writer, "{:p}", self.0)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUcontext {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUcontext {
fn write(
&self,
_fn_name: &'static str,
@@ -21,7 +21,7 @@ impl crate::format::CudaDisplay for cuda_types::CUcontext {
write!(writer, "{:p}", self.0)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmodule {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmodule {
fn write(
&self,
_fn_name: &'static str,
@@ -31,7 +31,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmodule {
write!(writer, "{:p}", self.0)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUfunction {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUfunction {
fn write(
&self,
_fn_name: &'static str,
@@ -41,7 +41,7 @@ impl crate::format::CudaDisplay for cuda_types::CUfunction {
write!(writer, "{:p}", self.0)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUlibrary {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUlibrary {
fn write(
&self,
_fn_name: &'static str,
@@ -51,7 +51,7 @@ impl crate::format::CudaDisplay for cuda_types::CUlibrary {
write!(writer, "{:p}", self.0)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUkernel {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUkernel {
fn write(
&self,
_fn_name: &'static str,
@@ -61,7 +61,7 @@ impl crate::format::CudaDisplay for cuda_types::CUkernel {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUarray {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUarray {
fn write(
&self,
_fn_name: &'static str,
@@ -71,7 +71,7 @@ impl crate::format::CudaDisplay for cuda_types::CUarray {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmipmappedArray {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmipmappedArray {
fn write(
&self,
_fn_name: &'static str,
@@ -81,7 +81,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmipmappedArray {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUtexref {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUtexref {
fn write(
&self,
_fn_name: &'static str,
@@ -91,7 +91,7 @@ impl crate::format::CudaDisplay for cuda_types::CUtexref {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUsurfref {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUsurfref {
fn write(
&self,
_fn_name: &'static str,
@@ -101,7 +101,7 @@ impl crate::format::CudaDisplay for cuda_types::CUsurfref {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUevent {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUevent {
fn write(
&self,
_fn_name: &'static str,
@@ -111,7 +111,7 @@ impl crate::format::CudaDisplay for cuda_types::CUevent {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUstream {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUstream {
fn write(
&self,
_fn_name: &'static str,
@@ -121,7 +121,7 @@ impl crate::format::CudaDisplay for cuda_types::CUstream {
write!(writer, "{:p}", self.0)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphicsResource {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphicsResource {
fn write(
&self,
_fn_name: &'static str,
@@ -131,7 +131,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphicsResource {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUexternalMemory {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUexternalMemory {
fn write(
&self,
_fn_name: &'static str,
@@ -141,7 +141,7 @@ impl crate::format::CudaDisplay for cuda_types::CUexternalMemory {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUexternalSemaphore {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUexternalSemaphore {
fn write(
&self,
_fn_name: &'static str,
@@ -151,7 +151,7 @@ impl crate::format::CudaDisplay for cuda_types::CUexternalSemaphore {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraph {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraph {
fn write(
&self,
_fn_name: &'static str,
@@ -161,7 +161,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraph {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphNode {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphNode {
fn write(
&self,
_fn_name: &'static str,
@@ -171,7 +171,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphNode {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphExec {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphExec {
fn write(
&self,
_fn_name: &'static str,
@@ -181,7 +181,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphExec {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemoryPool {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemoryPool {
fn write(
&self,
_fn_name: &'static str,
@@ -191,7 +191,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmemoryPool {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUuserObject {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUuserObject {
fn write(
&self,
_fn_name: &'static str,
@@ -201,7 +201,7 @@ impl crate::format::CudaDisplay for cuda_types::CUuserObject {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphDeviceNode {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphDeviceNode {
fn write(
&self,
_fn_name: &'static str,
@@ -211,7 +211,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphDeviceNode {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUasyncCallbackHandle {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUasyncCallbackHandle {
fn write(
&self,
_fn_name: &'static str,
@@ -221,7 +221,7 @@ impl crate::format::CudaDisplay for cuda_types::CUasyncCallbackHandle {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemFabricHandle_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemFabricHandle_st {
fn write(
&self,
_fn_name: &'static str,
@@ -233,7 +233,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmemFabricHandle_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUipcMem_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUipcMem_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -241,7 +241,7 @@ impl crate::format::CudaDisplay for cuda_types::CUipcMem_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUipcMem_flags_enum::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS => {
+ &cuda_types::cuda::CUipcMem_flags_enum::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS => {
writer
.write_all(stringify!(CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS).as_bytes())
}
@@ -249,7 +249,7 @@ impl crate::format::CudaDisplay for cuda_types::CUipcMem_flags_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemAttach_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemAttach_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -257,20 +257,20 @@ impl crate::format::CudaDisplay for cuda_types::CUmemAttach_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmemAttach_flags_enum::CU_MEM_ATTACH_GLOBAL => {
+ &cuda_types::cuda::CUmemAttach_flags_enum::CU_MEM_ATTACH_GLOBAL => {
writer.write_all(stringify!(CU_MEM_ATTACH_GLOBAL).as_bytes())
}
- &cuda_types::CUmemAttach_flags_enum::CU_MEM_ATTACH_HOST => {
+ &cuda_types::cuda::CUmemAttach_flags_enum::CU_MEM_ATTACH_HOST => {
writer.write_all(stringify!(CU_MEM_ATTACH_HOST).as_bytes())
}
- &cuda_types::CUmemAttach_flags_enum::CU_MEM_ATTACH_SINGLE => {
+ &cuda_types::cuda::CUmemAttach_flags_enum::CU_MEM_ATTACH_SINGLE => {
writer.write_all(stringify!(CU_MEM_ATTACH_SINGLE).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUctx_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUctx_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -278,47 +278,47 @@ impl crate::format::CudaDisplay for cuda_types::CUctx_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUctx_flags_enum::CU_CTX_SCHED_AUTO => {
+ &cuda_types::cuda::CUctx_flags_enum::CU_CTX_SCHED_AUTO => {
writer.write_all(stringify!(CU_CTX_SCHED_AUTO).as_bytes())
}
- &cuda_types::CUctx_flags_enum::CU_CTX_SCHED_SPIN => {
+ &cuda_types::cuda::CUctx_flags_enum::CU_CTX_SCHED_SPIN => {
writer.write_all(stringify!(CU_CTX_SCHED_SPIN).as_bytes())
}
- &cuda_types::CUctx_flags_enum::CU_CTX_SCHED_YIELD => {
+ &cuda_types::cuda::CUctx_flags_enum::CU_CTX_SCHED_YIELD => {
writer.write_all(stringify!(CU_CTX_SCHED_YIELD).as_bytes())
}
- &cuda_types::CUctx_flags_enum::CU_CTX_SCHED_BLOCKING_SYNC => {
+ &cuda_types::cuda::CUctx_flags_enum::CU_CTX_SCHED_BLOCKING_SYNC => {
writer.write_all(stringify!(CU_CTX_SCHED_BLOCKING_SYNC).as_bytes())
}
- &cuda_types::CUctx_flags_enum::CU_CTX_BLOCKING_SYNC => {
+ &cuda_types::cuda::CUctx_flags_enum::CU_CTX_BLOCKING_SYNC => {
writer.write_all(stringify!(CU_CTX_BLOCKING_SYNC).as_bytes())
}
- &cuda_types::CUctx_flags_enum::CU_CTX_SCHED_MASK => {
+ &cuda_types::cuda::CUctx_flags_enum::CU_CTX_SCHED_MASK => {
writer.write_all(stringify!(CU_CTX_SCHED_MASK).as_bytes())
}
- &cuda_types::CUctx_flags_enum::CU_CTX_MAP_HOST => {
+ &cuda_types::cuda::CUctx_flags_enum::CU_CTX_MAP_HOST => {
writer.write_all(stringify!(CU_CTX_MAP_HOST).as_bytes())
}
- &cuda_types::CUctx_flags_enum::CU_CTX_LMEM_RESIZE_TO_MAX => {
+ &cuda_types::cuda::CUctx_flags_enum::CU_CTX_LMEM_RESIZE_TO_MAX => {
writer.write_all(stringify!(CU_CTX_LMEM_RESIZE_TO_MAX).as_bytes())
}
- &cuda_types::CUctx_flags_enum::CU_CTX_COREDUMP_ENABLE => {
+ &cuda_types::cuda::CUctx_flags_enum::CU_CTX_COREDUMP_ENABLE => {
writer.write_all(stringify!(CU_CTX_COREDUMP_ENABLE).as_bytes())
}
- &cuda_types::CUctx_flags_enum::CU_CTX_USER_COREDUMP_ENABLE => {
+ &cuda_types::cuda::CUctx_flags_enum::CU_CTX_USER_COREDUMP_ENABLE => {
writer.write_all(stringify!(CU_CTX_USER_COREDUMP_ENABLE).as_bytes())
}
- &cuda_types::CUctx_flags_enum::CU_CTX_SYNC_MEMOPS => {
+ &cuda_types::cuda::CUctx_flags_enum::CU_CTX_SYNC_MEMOPS => {
writer.write_all(stringify!(CU_CTX_SYNC_MEMOPS).as_bytes())
}
- &cuda_types::CUctx_flags_enum::CU_CTX_FLAGS_MASK => {
+ &cuda_types::cuda::CUctx_flags_enum::CU_CTX_FLAGS_MASK => {
writer.write_all(stringify!(CU_CTX_FLAGS_MASK).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUevent_sched_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUevent_sched_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -326,23 +326,23 @@ impl crate::format::CudaDisplay for cuda_types::CUevent_sched_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUevent_sched_flags_enum::CU_EVENT_SCHED_AUTO => {
+ &cuda_types::cuda::CUevent_sched_flags_enum::CU_EVENT_SCHED_AUTO => {
writer.write_all(stringify!(CU_EVENT_SCHED_AUTO).as_bytes())
}
- &cuda_types::CUevent_sched_flags_enum::CU_EVENT_SCHED_SPIN => {
+ &cuda_types::cuda::CUevent_sched_flags_enum::CU_EVENT_SCHED_SPIN => {
writer.write_all(stringify!(CU_EVENT_SCHED_SPIN).as_bytes())
}
- &cuda_types::CUevent_sched_flags_enum::CU_EVENT_SCHED_YIELD => {
+ &cuda_types::cuda::CUevent_sched_flags_enum::CU_EVENT_SCHED_YIELD => {
writer.write_all(stringify!(CU_EVENT_SCHED_YIELD).as_bytes())
}
- &cuda_types::CUevent_sched_flags_enum::CU_EVENT_SCHED_BLOCKING_SYNC => {
+ &cuda_types::cuda::CUevent_sched_flags_enum::CU_EVENT_SCHED_BLOCKING_SYNC => {
writer.write_all(stringify!(CU_EVENT_SCHED_BLOCKING_SYNC).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUstream_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUstream_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -350,17 +350,17 @@ impl crate::format::CudaDisplay for cuda_types::CUstream_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUstream_flags_enum::CU_STREAM_DEFAULT => {
+ &cuda_types::cuda::CUstream_flags_enum::CU_STREAM_DEFAULT => {
writer.write_all(stringify!(CU_STREAM_DEFAULT).as_bytes())
}
- &cuda_types::CUstream_flags_enum::CU_STREAM_NON_BLOCKING => {
+ &cuda_types::cuda::CUstream_flags_enum::CU_STREAM_NON_BLOCKING => {
writer.write_all(stringify!(CU_STREAM_NON_BLOCKING).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUevent_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUevent_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -368,23 +368,23 @@ impl crate::format::CudaDisplay for cuda_types::CUevent_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUevent_flags_enum::CU_EVENT_DEFAULT => {
+ &cuda_types::cuda::CUevent_flags_enum::CU_EVENT_DEFAULT => {
writer.write_all(stringify!(CU_EVENT_DEFAULT).as_bytes())
}
- &cuda_types::CUevent_flags_enum::CU_EVENT_BLOCKING_SYNC => {
+ &cuda_types::cuda::CUevent_flags_enum::CU_EVENT_BLOCKING_SYNC => {
writer.write_all(stringify!(CU_EVENT_BLOCKING_SYNC).as_bytes())
}
- &cuda_types::CUevent_flags_enum::CU_EVENT_DISABLE_TIMING => {
+ &cuda_types::cuda::CUevent_flags_enum::CU_EVENT_DISABLE_TIMING => {
writer.write_all(stringify!(CU_EVENT_DISABLE_TIMING).as_bytes())
}
- &cuda_types::CUevent_flags_enum::CU_EVENT_INTERPROCESS => {
+ &cuda_types::cuda::CUevent_flags_enum::CU_EVENT_INTERPROCESS => {
writer.write_all(stringify!(CU_EVENT_INTERPROCESS).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUevent_record_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUevent_record_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -392,17 +392,17 @@ impl crate::format::CudaDisplay for cuda_types::CUevent_record_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUevent_record_flags_enum::CU_EVENT_RECORD_DEFAULT => {
+ &cuda_types::cuda::CUevent_record_flags_enum::CU_EVENT_RECORD_DEFAULT => {
writer.write_all(stringify!(CU_EVENT_RECORD_DEFAULT).as_bytes())
}
- &cuda_types::CUevent_record_flags_enum::CU_EVENT_RECORD_EXTERNAL => {
+ &cuda_types::cuda::CUevent_record_flags_enum::CU_EVENT_RECORD_EXTERNAL => {
writer.write_all(stringify!(CU_EVENT_RECORD_EXTERNAL).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUevent_wait_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUevent_wait_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -410,17 +410,17 @@ impl crate::format::CudaDisplay for cuda_types::CUevent_wait_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUevent_wait_flags_enum::CU_EVENT_WAIT_DEFAULT => {
+ &cuda_types::cuda::CUevent_wait_flags_enum::CU_EVENT_WAIT_DEFAULT => {
writer.write_all(stringify!(CU_EVENT_WAIT_DEFAULT).as_bytes())
}
- &cuda_types::CUevent_wait_flags_enum::CU_EVENT_WAIT_EXTERNAL => {
+ &cuda_types::cuda::CUevent_wait_flags_enum::CU_EVENT_WAIT_EXTERNAL => {
writer.write_all(stringify!(CU_EVENT_WAIT_EXTERNAL).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUstreamWaitValue_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUstreamWaitValue_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -428,26 +428,26 @@ impl crate::format::CudaDisplay for cuda_types::CUstreamWaitValue_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_GEQ => {
+ &cuda_types::cuda::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_GEQ => {
writer.write_all(stringify!(CU_STREAM_WAIT_VALUE_GEQ).as_bytes())
}
- &cuda_types::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_EQ => {
+ &cuda_types::cuda::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_EQ => {
writer.write_all(stringify!(CU_STREAM_WAIT_VALUE_EQ).as_bytes())
}
- &cuda_types::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_AND => {
+ &cuda_types::cuda::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_AND => {
writer.write_all(stringify!(CU_STREAM_WAIT_VALUE_AND).as_bytes())
}
- &cuda_types::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_NOR => {
+ &cuda_types::cuda::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_NOR => {
writer.write_all(stringify!(CU_STREAM_WAIT_VALUE_NOR).as_bytes())
}
- &cuda_types::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_FLUSH => {
+ &cuda_types::cuda::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_FLUSH => {
writer.write_all(stringify!(CU_STREAM_WAIT_VALUE_FLUSH).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUstreamWriteValue_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUstreamWriteValue_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -455,10 +455,10 @@ impl crate::format::CudaDisplay for cuda_types::CUstreamWriteValue_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUstreamWriteValue_flags_enum::CU_STREAM_WRITE_VALUE_DEFAULT => {
+ &cuda_types::cuda::CUstreamWriteValue_flags_enum::CU_STREAM_WRITE_VALUE_DEFAULT => {
writer.write_all(stringify!(CU_STREAM_WRITE_VALUE_DEFAULT).as_bytes())
}
- &cuda_types::CUstreamWriteValue_flags_enum::CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER => {
+ &cuda_types::cuda::CUstreamWriteValue_flags_enum::CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER => {
writer
.write_all(
stringify!(CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER).as_bytes(),
@@ -468,7 +468,7 @@ impl crate::format::CudaDisplay for cuda_types::CUstreamWriteValue_flags_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUstreamBatchMemOpType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUstreamBatchMemOpType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -476,22 +476,22 @@ impl crate::format::CudaDisplay for cuda_types::CUstreamBatchMemOpType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_WAIT_VALUE_32 => {
+ &cuda_types::cuda::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_WAIT_VALUE_32 => {
writer.write_all(stringify!(CU_STREAM_MEM_OP_WAIT_VALUE_32).as_bytes())
}
- &cuda_types::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_WRITE_VALUE_32 => {
+ &cuda_types::cuda::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_WRITE_VALUE_32 => {
writer.write_all(stringify!(CU_STREAM_MEM_OP_WRITE_VALUE_32).as_bytes())
}
- &cuda_types::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_WAIT_VALUE_64 => {
+ &cuda_types::cuda::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_WAIT_VALUE_64 => {
writer.write_all(stringify!(CU_STREAM_MEM_OP_WAIT_VALUE_64).as_bytes())
}
- &cuda_types::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_WRITE_VALUE_64 => {
+ &cuda_types::cuda::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_WRITE_VALUE_64 => {
writer.write_all(stringify!(CU_STREAM_MEM_OP_WRITE_VALUE_64).as_bytes())
}
- &cuda_types::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_BARRIER => {
+ &cuda_types::cuda::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_BARRIER => {
writer.write_all(stringify!(CU_STREAM_MEM_OP_BARRIER).as_bytes())
}
- &cuda_types::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES => {
+ &cuda_types::cuda::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES => {
writer
.write_all(
stringify!(CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES).as_bytes(),
@@ -501,7 +501,7 @@ impl crate::format::CudaDisplay for cuda_types::CUstreamBatchMemOpType_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUstreamMemoryBarrier_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUstreamMemoryBarrier_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -509,11 +509,11 @@ impl crate::format::CudaDisplay for cuda_types::CUstreamMemoryBarrier_flags_enum
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUstreamMemoryBarrier_flags_enum::CU_STREAM_MEMORY_BARRIER_TYPE_SYS => {
+ &cuda_types::cuda::CUstreamMemoryBarrier_flags_enum::CU_STREAM_MEMORY_BARRIER_TYPE_SYS => {
writer
.write_all(stringify!(CU_STREAM_MEMORY_BARRIER_TYPE_SYS).as_bytes())
}
- &cuda_types::CUstreamMemoryBarrier_flags_enum::CU_STREAM_MEMORY_BARRIER_TYPE_GPU => {
+ &cuda_types::cuda::CUstreamMemoryBarrier_flags_enum::CU_STREAM_MEMORY_BARRIER_TYPE_GPU => {
writer
.write_all(stringify!(CU_STREAM_MEMORY_BARRIER_TYPE_GPU).as_bytes())
}
@@ -522,7 +522,7 @@ impl crate::format::CudaDisplay for cuda_types::CUstreamMemoryBarrier_flags_enum
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st {
+for cuda_types::cuda::CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st {
fn write(
&self,
_fn_name: &'static str,
@@ -537,7 +537,7 @@ for cuda_types::CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesPar
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUstreamBatchMemOpParams_union_CUstreamMemOpMemoryBarrierParams_st {
+for cuda_types::cuda::CUstreamBatchMemOpParams_union_CUstreamMemOpMemoryBarrierParams_st {
fn write(
&self,
_fn_name: &'static str,
@@ -551,7 +551,8 @@ for cuda_types::CUstreamBatchMemOpParams_union_CUstreamMemOpMemoryBarrierParams_
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st {
fn write(
&self,
_fn_name: &'static str,
@@ -569,7 +570,8 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS_v1
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st {
fn write(
&self,
_fn_name: &'static str,
@@ -587,7 +589,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS_v2
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUoccupancy_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUoccupancy_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -595,10 +597,10 @@ impl crate::format::CudaDisplay for cuda_types::CUoccupancy_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUoccupancy_flags_enum::CU_OCCUPANCY_DEFAULT => {
+ &cuda_types::cuda::CUoccupancy_flags_enum::CU_OCCUPANCY_DEFAULT => {
writer.write_all(stringify!(CU_OCCUPANCY_DEFAULT).as_bytes())
}
- &cuda_types::CUoccupancy_flags_enum::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE => {
+ &cuda_types::cuda::CUoccupancy_flags_enum::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE => {
writer
.write_all(
stringify!(CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE).as_bytes(),
@@ -609,7 +611,7 @@ impl crate::format::CudaDisplay for cuda_types::CUoccupancy_flags_enum {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUstreamUpdateCaptureDependencies_flags_enum {
+for cuda_types::cuda::CUstreamUpdateCaptureDependencies_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -617,11 +619,11 @@ for cuda_types::CUstreamUpdateCaptureDependencies_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUstreamUpdateCaptureDependencies_flags_enum::CU_STREAM_ADD_CAPTURE_DEPENDENCIES => {
+ &cuda_types::cuda::CUstreamUpdateCaptureDependencies_flags_enum::CU_STREAM_ADD_CAPTURE_DEPENDENCIES => {
writer
.write_all(stringify!(CU_STREAM_ADD_CAPTURE_DEPENDENCIES).as_bytes())
}
- &cuda_types::CUstreamUpdateCaptureDependencies_flags_enum::CU_STREAM_SET_CAPTURE_DEPENDENCIES => {
+ &cuda_types::cuda::CUstreamUpdateCaptureDependencies_flags_enum::CU_STREAM_SET_CAPTURE_DEPENDENCIES => {
writer
.write_all(stringify!(CU_STREAM_SET_CAPTURE_DEPENDENCIES).as_bytes())
}
@@ -629,7 +631,7 @@ for cuda_types::CUstreamUpdateCaptureDependencies_flags_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUasyncNotificationType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUasyncNotificationType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -637,7 +639,7 @@ impl crate::format::CudaDisplay for cuda_types::CUasyncNotificationType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUasyncNotificationType_enum::CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET => {
+ &cuda_types::cuda::CUasyncNotificationType_enum::CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET => {
writer
.write_all(
stringify!(CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET).as_bytes(),
@@ -648,7 +650,7 @@ impl crate::format::CudaDisplay for cuda_types::CUasyncNotificationType_enum {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUasyncNotificationInfo_st__bindgen_ty_1__bindgen_ty_1 {
+for cuda_types::cuda::CUasyncNotificationInfo_st__bindgen_ty_1__bindgen_ty_1 {
fn write(
&self,
_fn_name: &'static str,
@@ -660,7 +662,7 @@ for cuda_types::CUasyncNotificationInfo_st__bindgen_ty_1__bindgen_ty_1 {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUasyncCallback {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUasyncCallback {
fn write(
&self,
_fn_name: &'static str,
@@ -672,14 +674,14 @@ impl crate::format::CudaDisplay for cuda_types::CUasyncCallback {
"{:p}",
unsafe {
std::mem::transmute::<
- cuda_types::CUasyncCallback,
+ cuda_types::cuda::CUasyncCallback,
*mut ::std::ffi::c_void,
>(*self)
},
)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUarray_format_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUarray_format_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -687,116 +689,116 @@ impl crate::format::CudaDisplay for cuda_types::CUarray_format_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNSIGNED_INT8 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_UNSIGNED_INT8 => {
writer.write_all(stringify!(CU_AD_FORMAT_UNSIGNED_INT8).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNSIGNED_INT16 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_UNSIGNED_INT16 => {
writer.write_all(stringify!(CU_AD_FORMAT_UNSIGNED_INT16).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNSIGNED_INT32 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_UNSIGNED_INT32 => {
writer.write_all(stringify!(CU_AD_FORMAT_UNSIGNED_INT32).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SIGNED_INT8 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_SIGNED_INT8 => {
writer.write_all(stringify!(CU_AD_FORMAT_SIGNED_INT8).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SIGNED_INT16 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_SIGNED_INT16 => {
writer.write_all(stringify!(CU_AD_FORMAT_SIGNED_INT16).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SIGNED_INT32 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_SIGNED_INT32 => {
writer.write_all(stringify!(CU_AD_FORMAT_SIGNED_INT32).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_HALF => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_HALF => {
writer.write_all(stringify!(CU_AD_FORMAT_HALF).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_FLOAT => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_FLOAT => {
writer.write_all(stringify!(CU_AD_FORMAT_FLOAT).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_NV12 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_NV12 => {
writer.write_all(stringify!(CU_AD_FORMAT_NV12).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT8X1 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT8X1 => {
writer.write_all(stringify!(CU_AD_FORMAT_UNORM_INT8X1).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT8X2 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT8X2 => {
writer.write_all(stringify!(CU_AD_FORMAT_UNORM_INT8X2).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT8X4 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT8X4 => {
writer.write_all(stringify!(CU_AD_FORMAT_UNORM_INT8X4).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT16X1 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT16X1 => {
writer.write_all(stringify!(CU_AD_FORMAT_UNORM_INT16X1).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT16X2 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT16X2 => {
writer.write_all(stringify!(CU_AD_FORMAT_UNORM_INT16X2).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT16X4 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT16X4 => {
writer.write_all(stringify!(CU_AD_FORMAT_UNORM_INT16X4).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT8X1 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT8X1 => {
writer.write_all(stringify!(CU_AD_FORMAT_SNORM_INT8X1).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT8X2 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT8X2 => {
writer.write_all(stringify!(CU_AD_FORMAT_SNORM_INT8X2).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT8X4 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT8X4 => {
writer.write_all(stringify!(CU_AD_FORMAT_SNORM_INT8X4).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT16X1 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT16X1 => {
writer.write_all(stringify!(CU_AD_FORMAT_SNORM_INT16X1).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT16X2 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT16X2 => {
writer.write_all(stringify!(CU_AD_FORMAT_SNORM_INT16X2).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT16X4 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT16X4 => {
writer.write_all(stringify!(CU_AD_FORMAT_SNORM_INT16X4).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC1_UNORM => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC1_UNORM => {
writer.write_all(stringify!(CU_AD_FORMAT_BC1_UNORM).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC1_UNORM_SRGB => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC1_UNORM_SRGB => {
writer.write_all(stringify!(CU_AD_FORMAT_BC1_UNORM_SRGB).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC2_UNORM => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC2_UNORM => {
writer.write_all(stringify!(CU_AD_FORMAT_BC2_UNORM).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC2_UNORM_SRGB => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC2_UNORM_SRGB => {
writer.write_all(stringify!(CU_AD_FORMAT_BC2_UNORM_SRGB).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC3_UNORM => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC3_UNORM => {
writer.write_all(stringify!(CU_AD_FORMAT_BC3_UNORM).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC3_UNORM_SRGB => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC3_UNORM_SRGB => {
writer.write_all(stringify!(CU_AD_FORMAT_BC3_UNORM_SRGB).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC4_UNORM => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC4_UNORM => {
writer.write_all(stringify!(CU_AD_FORMAT_BC4_UNORM).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC4_SNORM => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC4_SNORM => {
writer.write_all(stringify!(CU_AD_FORMAT_BC4_SNORM).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC5_UNORM => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC5_UNORM => {
writer.write_all(stringify!(CU_AD_FORMAT_BC5_UNORM).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC5_SNORM => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC5_SNORM => {
writer.write_all(stringify!(CU_AD_FORMAT_BC5_SNORM).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC6H_UF16 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC6H_UF16 => {
writer.write_all(stringify!(CU_AD_FORMAT_BC6H_UF16).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC6H_SF16 => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC6H_SF16 => {
writer.write_all(stringify!(CU_AD_FORMAT_BC6H_SF16).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC7_UNORM => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC7_UNORM => {
writer.write_all(stringify!(CU_AD_FORMAT_BC7_UNORM).as_bytes())
}
- &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC7_UNORM_SRGB => {
+ &cuda_types::cuda::CUarray_format_enum::CU_AD_FORMAT_BC7_UNORM_SRGB => {
writer.write_all(stringify!(CU_AD_FORMAT_BC7_UNORM_SRGB).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUaddress_mode_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUaddress_mode_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -804,23 +806,23 @@ impl crate::format::CudaDisplay for cuda_types::CUaddress_mode_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUaddress_mode_enum::CU_TR_ADDRESS_MODE_WRAP => {
+ &cuda_types::cuda::CUaddress_mode_enum::CU_TR_ADDRESS_MODE_WRAP => {
writer.write_all(stringify!(CU_TR_ADDRESS_MODE_WRAP).as_bytes())
}
- &cuda_types::CUaddress_mode_enum::CU_TR_ADDRESS_MODE_CLAMP => {
+ &cuda_types::cuda::CUaddress_mode_enum::CU_TR_ADDRESS_MODE_CLAMP => {
writer.write_all(stringify!(CU_TR_ADDRESS_MODE_CLAMP).as_bytes())
}
- &cuda_types::CUaddress_mode_enum::CU_TR_ADDRESS_MODE_MIRROR => {
+ &cuda_types::cuda::CUaddress_mode_enum::CU_TR_ADDRESS_MODE_MIRROR => {
writer.write_all(stringify!(CU_TR_ADDRESS_MODE_MIRROR).as_bytes())
}
- &cuda_types::CUaddress_mode_enum::CU_TR_ADDRESS_MODE_BORDER => {
+ &cuda_types::cuda::CUaddress_mode_enum::CU_TR_ADDRESS_MODE_BORDER => {
writer.write_all(stringify!(CU_TR_ADDRESS_MODE_BORDER).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUfilter_mode_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUfilter_mode_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -828,17 +830,17 @@ impl crate::format::CudaDisplay for cuda_types::CUfilter_mode_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUfilter_mode_enum::CU_TR_FILTER_MODE_POINT => {
+ &cuda_types::cuda::CUfilter_mode_enum::CU_TR_FILTER_MODE_POINT => {
writer.write_all(stringify!(CU_TR_FILTER_MODE_POINT).as_bytes())
}
- &cuda_types::CUfilter_mode_enum::CU_TR_FILTER_MODE_LINEAR => {
+ &cuda_types::cuda::CUfilter_mode_enum::CU_TR_FILTER_MODE_LINEAR => {
writer.write_all(stringify!(CU_TR_FILTER_MODE_LINEAR).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUdevice_attribute_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -846,304 +848,304 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X => {
writer
.write_all(stringify!(CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y => {
writer
.write_all(stringify!(CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z => {
writer
.write_all(stringify!(CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_WARP_SIZE => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_WARP_SIZE => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_WARP_SIZE).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_PITCH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_PITCH => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_MAX_PITCH).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CLOCK_RATE => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CLOCK_RATE => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_CLOCK_RATE).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_OVERLAP => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_OVERLAP => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_GPU_OVERLAP).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_INTEGRATED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_INTEGRATED => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_INTEGRATED).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_COMPUTE_MODE).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_ECC_ENABLED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_ECC_ENABLED => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_ECC_ENABLED).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PCI_BUS_ID => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PCI_BUS_ID => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_PCI_BUS_ID).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID => {
writer
.write_all(stringify!(CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TCC_DRIVER => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TCC_DRIVER => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_TCC_DRIVER).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE => {
writer
.write_all(stringify!(CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE => {
writer
.write_all(
stringify!(
@@ -1152,32 +1154,32 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID => {
writer
.write_all(stringify!(CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH => {
writer
.write_all(
stringify!(
@@ -1186,7 +1188,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS => {
writer
.write_all(
stringify!(
@@ -1195,91 +1197,91 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH => {
writer
.write_all(
stringify!(
@@ -1288,7 +1290,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS => {
writer
.write_all(
stringify!(
@@ -1297,42 +1299,42 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT => {
writer
.write_all(
stringify!(
@@ -1341,49 +1343,49 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR => {
writer
.write_all(
stringify!(
@@ -1392,38 +1394,38 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY => {
writer
.write_all(stringify!(CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO => {
writer
.write_all(
stringify!(
@@ -1432,27 +1434,27 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM => {
writer
.write_all(
stringify!(
@@ -1461,62 +1463,62 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES => {
writer
.write_all(
stringify!(
@@ -1525,7 +1527,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST => {
writer
.write_all(
stringify!(
@@ -1534,7 +1536,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED => {
writer
.write_all(
stringify!(
@@ -1543,7 +1545,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED => {
writer
.write_all(
stringify!(
@@ -1552,7 +1554,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED => {
writer
.write_all(
stringify!(
@@ -1561,7 +1563,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED => {
writer
.write_all(
stringify!(
@@ -1570,7 +1572,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED => {
writer
.write_all(
stringify!(
@@ -1579,35 +1581,35 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED => {
writer
.write_all(
stringify!(
@@ -1616,28 +1618,28 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED => {
writer
.write_all(
stringify!(
@@ -1646,20 +1648,20 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS => {
writer
.write_all(
stringify!(
@@ -1668,25 +1670,25 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH => {
writer
.write_all(stringify!(CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED => {
writer
.write_all(
stringify!(
@@ -1695,85 +1697,85 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS)
.as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_NUMA_CONFIG => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_NUMA_CONFIG => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_NUMA_CONFIG).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_NUMA_ID => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_NUMA_ID => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_NUMA_ID).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED).as_bytes(),
)
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MPS_ENABLED => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MPS_ENABLED => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_MPS_ENABLED).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID).as_bytes())
}
- &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX => {
+ &cuda_types::cuda::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX => {
writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUdevprop_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUdevprop_st {
fn write(
&self,
_fn_name: &'static str,
@@ -1805,7 +1807,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdevprop_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUpointer_attribute_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUpointer_attribute_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -1813,93 +1815,93 @@ impl crate::format::CudaDisplay for cuda_types::CUpointer_attribute_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_CONTEXT => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_CONTEXT => {
writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_CONTEXT).as_bytes())
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MEMORY_TYPE => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MEMORY_TYPE => {
writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_MEMORY_TYPE).as_bytes())
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_DEVICE_POINTER => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_DEVICE_POINTER => {
writer
.write_all(
stringify!(CU_POINTER_ATTRIBUTE_DEVICE_POINTER).as_bytes(),
)
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_HOST_POINTER => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_HOST_POINTER => {
writer
.write_all(stringify!(CU_POINTER_ATTRIBUTE_HOST_POINTER).as_bytes())
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_P2P_TOKENS => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_P2P_TOKENS => {
writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_P2P_TOKENS).as_bytes())
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS => {
writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_SYNC_MEMOPS).as_bytes())
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_BUFFER_ID => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_BUFFER_ID => {
writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_BUFFER_ID).as_bytes())
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_IS_MANAGED => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_IS_MANAGED => {
writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_IS_MANAGED).as_bytes())
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL => {
writer
.write_all(
stringify!(CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL).as_bytes(),
)
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE => {
writer
.write_all(
stringify!(CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE)
.as_bytes(),
)
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR => {
writer
.write_all(
stringify!(CU_POINTER_ATTRIBUTE_RANGE_START_ADDR).as_bytes(),
)
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_RANGE_SIZE => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_RANGE_SIZE => {
writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_RANGE_SIZE).as_bytes())
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MAPPED => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MAPPED => {
writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_MAPPED).as_bytes())
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES => {
writer
.write_all(
stringify!(CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES).as_bytes(),
)
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE => {
writer
.write_all(
stringify!(CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE)
.as_bytes(),
)
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_ACCESS_FLAGS => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_ACCESS_FLAGS => {
writer
.write_all(stringify!(CU_POINTER_ATTRIBUTE_ACCESS_FLAGS).as_bytes())
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE => {
writer
.write_all(
stringify!(CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE).as_bytes(),
)
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MAPPING_SIZE => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MAPPING_SIZE => {
writer
.write_all(stringify!(CU_POINTER_ATTRIBUTE_MAPPING_SIZE).as_bytes())
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR => {
writer
.write_all(
stringify!(CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR).as_bytes(),
)
}
- &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID => {
+ &cuda_types::cuda::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID => {
writer
.write_all(
stringify!(CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID).as_bytes(),
@@ -1909,7 +1911,7 @@ impl crate::format::CudaDisplay for cuda_types::CUpointer_attribute_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUfunction_attribute_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUfunction_attribute_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -1917,84 +1919,84 @@ impl crate::format::CudaDisplay for cuda_types::CUfunction_attribute_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK => {
writer
.write_all(
stringify!(CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK).as_bytes(),
)
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES => {
writer
.write_all(
stringify!(CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES).as_bytes(),
)
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES => {
writer
.write_all(stringify!(CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES).as_bytes())
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES => {
writer
.write_all(stringify!(CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES).as_bytes())
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_NUM_REGS => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_NUM_REGS => {
writer.write_all(stringify!(CU_FUNC_ATTRIBUTE_NUM_REGS).as_bytes())
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_PTX_VERSION => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_PTX_VERSION => {
writer.write_all(stringify!(CU_FUNC_ATTRIBUTE_PTX_VERSION).as_bytes())
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_BINARY_VERSION => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_BINARY_VERSION => {
writer.write_all(stringify!(CU_FUNC_ATTRIBUTE_BINARY_VERSION).as_bytes())
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_CACHE_MODE_CA => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_CACHE_MODE_CA => {
writer.write_all(stringify!(CU_FUNC_ATTRIBUTE_CACHE_MODE_CA).as_bytes())
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES => {
writer
.write_all(
stringify!(CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES)
.as_bytes(),
)
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT => {
writer
.write_all(
stringify!(CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT)
.as_bytes(),
)
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET => {
writer
.write_all(
stringify!(CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET).as_bytes(),
)
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH => {
writer
.write_all(
stringify!(CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH).as_bytes(),
)
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT => {
writer
.write_all(
stringify!(CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT).as_bytes(),
)
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH => {
writer
.write_all(
stringify!(CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH).as_bytes(),
)
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED => {
writer
.write_all(
stringify!(CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED)
.as_bytes(),
)
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE => {
writer
.write_all(
stringify!(
@@ -2003,14 +2005,14 @@ impl crate::format::CudaDisplay for cuda_types::CUfunction_attribute_enum {
.as_bytes(),
)
}
- &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_MAX => {
+ &cuda_types::cuda::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_MAX => {
writer.write_all(stringify!(CU_FUNC_ATTRIBUTE_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUfunc_cache_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUfunc_cache_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2018,23 +2020,23 @@ impl crate::format::CudaDisplay for cuda_types::CUfunc_cache_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUfunc_cache_enum::CU_FUNC_CACHE_PREFER_NONE => {
+ &cuda_types::cuda::CUfunc_cache_enum::CU_FUNC_CACHE_PREFER_NONE => {
writer.write_all(stringify!(CU_FUNC_CACHE_PREFER_NONE).as_bytes())
}
- &cuda_types::CUfunc_cache_enum::CU_FUNC_CACHE_PREFER_SHARED => {
+ &cuda_types::cuda::CUfunc_cache_enum::CU_FUNC_CACHE_PREFER_SHARED => {
writer.write_all(stringify!(CU_FUNC_CACHE_PREFER_SHARED).as_bytes())
}
- &cuda_types::CUfunc_cache_enum::CU_FUNC_CACHE_PREFER_L1 => {
+ &cuda_types::cuda::CUfunc_cache_enum::CU_FUNC_CACHE_PREFER_L1 => {
writer.write_all(stringify!(CU_FUNC_CACHE_PREFER_L1).as_bytes())
}
- &cuda_types::CUfunc_cache_enum::CU_FUNC_CACHE_PREFER_EQUAL => {
+ &cuda_types::cuda::CUfunc_cache_enum::CU_FUNC_CACHE_PREFER_EQUAL => {
writer.write_all(stringify!(CU_FUNC_CACHE_PREFER_EQUAL).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUsharedconfig_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUsharedconfig_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2042,19 +2044,19 @@ impl crate::format::CudaDisplay for cuda_types::CUsharedconfig_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUsharedconfig_enum::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE => {
+ &cuda_types::cuda::CUsharedconfig_enum::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE => {
writer
.write_all(
stringify!(CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE).as_bytes(),
)
}
- &cuda_types::CUsharedconfig_enum::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE => {
+ &cuda_types::cuda::CUsharedconfig_enum::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE => {
writer
.write_all(
stringify!(CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE).as_bytes(),
)
}
- &cuda_types::CUsharedconfig_enum::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE => {
+ &cuda_types::cuda::CUsharedconfig_enum::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE => {
writer
.write_all(
stringify!(CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE).as_bytes(),
@@ -2064,7 +2066,7 @@ impl crate::format::CudaDisplay for cuda_types::CUsharedconfig_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUshared_carveout_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUshared_carveout_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2072,20 +2074,20 @@ impl crate::format::CudaDisplay for cuda_types::CUshared_carveout_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUshared_carveout_enum::CU_SHAREDMEM_CARVEOUT_DEFAULT => {
+ &cuda_types::cuda::CUshared_carveout_enum::CU_SHAREDMEM_CARVEOUT_DEFAULT => {
writer.write_all(stringify!(CU_SHAREDMEM_CARVEOUT_DEFAULT).as_bytes())
}
- &cuda_types::CUshared_carveout_enum::CU_SHAREDMEM_CARVEOUT_MAX_SHARED => {
+ &cuda_types::cuda::CUshared_carveout_enum::CU_SHAREDMEM_CARVEOUT_MAX_SHARED => {
writer.write_all(stringify!(CU_SHAREDMEM_CARVEOUT_MAX_SHARED).as_bytes())
}
- &cuda_types::CUshared_carveout_enum::CU_SHAREDMEM_CARVEOUT_MAX_L1 => {
+ &cuda_types::cuda::CUshared_carveout_enum::CU_SHAREDMEM_CARVEOUT_MAX_L1 => {
writer.write_all(stringify!(CU_SHAREDMEM_CARVEOUT_MAX_L1).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemorytype_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemorytype_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2093,23 +2095,23 @@ impl crate::format::CudaDisplay for cuda_types::CUmemorytype_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmemorytype_enum::CU_MEMORYTYPE_HOST => {
+ &cuda_types::cuda::CUmemorytype_enum::CU_MEMORYTYPE_HOST => {
writer.write_all(stringify!(CU_MEMORYTYPE_HOST).as_bytes())
}
- &cuda_types::CUmemorytype_enum::CU_MEMORYTYPE_DEVICE => {
+ &cuda_types::cuda::CUmemorytype_enum::CU_MEMORYTYPE_DEVICE => {
writer.write_all(stringify!(CU_MEMORYTYPE_DEVICE).as_bytes())
}
- &cuda_types::CUmemorytype_enum::CU_MEMORYTYPE_ARRAY => {
+ &cuda_types::cuda::CUmemorytype_enum::CU_MEMORYTYPE_ARRAY => {
writer.write_all(stringify!(CU_MEMORYTYPE_ARRAY).as_bytes())
}
- &cuda_types::CUmemorytype_enum::CU_MEMORYTYPE_UNIFIED => {
+ &cuda_types::cuda::CUmemorytype_enum::CU_MEMORYTYPE_UNIFIED => {
writer.write_all(stringify!(CU_MEMORYTYPE_UNIFIED).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUcomputemode_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUcomputemode_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2117,20 +2119,20 @@ impl crate::format::CudaDisplay for cuda_types::CUcomputemode_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUcomputemode_enum::CU_COMPUTEMODE_DEFAULT => {
+ &cuda_types::cuda::CUcomputemode_enum::CU_COMPUTEMODE_DEFAULT => {
writer.write_all(stringify!(CU_COMPUTEMODE_DEFAULT).as_bytes())
}
- &cuda_types::CUcomputemode_enum::CU_COMPUTEMODE_PROHIBITED => {
+ &cuda_types::cuda::CUcomputemode_enum::CU_COMPUTEMODE_PROHIBITED => {
writer.write_all(stringify!(CU_COMPUTEMODE_PROHIBITED).as_bytes())
}
- &cuda_types::CUcomputemode_enum::CU_COMPUTEMODE_EXCLUSIVE_PROCESS => {
+ &cuda_types::cuda::CUcomputemode_enum::CU_COMPUTEMODE_EXCLUSIVE_PROCESS => {
writer.write_all(stringify!(CU_COMPUTEMODE_EXCLUSIVE_PROCESS).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmem_advise_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmem_advise_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2138,35 +2140,35 @@ impl crate::format::CudaDisplay for cuda_types::CUmem_advise_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmem_advise_enum::CU_MEM_ADVISE_SET_READ_MOSTLY => {
+ &cuda_types::cuda::CUmem_advise_enum::CU_MEM_ADVISE_SET_READ_MOSTLY => {
writer.write_all(stringify!(CU_MEM_ADVISE_SET_READ_MOSTLY).as_bytes())
}
- &cuda_types::CUmem_advise_enum::CU_MEM_ADVISE_UNSET_READ_MOSTLY => {
+ &cuda_types::cuda::CUmem_advise_enum::CU_MEM_ADVISE_UNSET_READ_MOSTLY => {
writer.write_all(stringify!(CU_MEM_ADVISE_UNSET_READ_MOSTLY).as_bytes())
}
- &cuda_types::CUmem_advise_enum::CU_MEM_ADVISE_SET_PREFERRED_LOCATION => {
+ &cuda_types::cuda::CUmem_advise_enum::CU_MEM_ADVISE_SET_PREFERRED_LOCATION => {
writer
.write_all(
stringify!(CU_MEM_ADVISE_SET_PREFERRED_LOCATION).as_bytes(),
)
}
- &cuda_types::CUmem_advise_enum::CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION => {
+ &cuda_types::cuda::CUmem_advise_enum::CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION => {
writer
.write_all(
stringify!(CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION).as_bytes(),
)
}
- &cuda_types::CUmem_advise_enum::CU_MEM_ADVISE_SET_ACCESSED_BY => {
+ &cuda_types::cuda::CUmem_advise_enum::CU_MEM_ADVISE_SET_ACCESSED_BY => {
writer.write_all(stringify!(CU_MEM_ADVISE_SET_ACCESSED_BY).as_bytes())
}
- &cuda_types::CUmem_advise_enum::CU_MEM_ADVISE_UNSET_ACCESSED_BY => {
+ &cuda_types::cuda::CUmem_advise_enum::CU_MEM_ADVISE_UNSET_ACCESSED_BY => {
writer.write_all(stringify!(CU_MEM_ADVISE_UNSET_ACCESSED_BY).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmem_range_attribute_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmem_range_attribute_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2174,49 +2176,49 @@ impl crate::format::CudaDisplay for cuda_types::CUmem_range_attribute_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY => {
+ &cuda_types::cuda::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY => {
writer
.write_all(stringify!(CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY).as_bytes())
}
- &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION => {
+ &cuda_types::cuda::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION => {
writer
.write_all(
stringify!(CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION).as_bytes(),
)
}
- &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY => {
+ &cuda_types::cuda::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY => {
writer
.write_all(stringify!(CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY).as_bytes())
}
- &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION => {
+ &cuda_types::cuda::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION => {
writer
.write_all(
stringify!(CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION)
.as_bytes(),
)
}
- &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE => {
+ &cuda_types::cuda::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE => {
writer
.write_all(
stringify!(CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE)
.as_bytes(),
)
}
- &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID => {
+ &cuda_types::cuda::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID => {
writer
.write_all(
stringify!(CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID)
.as_bytes(),
)
}
- &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE => {
+ &cuda_types::cuda::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE => {
writer
.write_all(
stringify!(CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE)
.as_bytes(),
)
}
- &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID => {
+ &cuda_types::cuda::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID => {
writer
.write_all(
stringify!(CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID)
@@ -2227,7 +2229,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmem_range_attribute_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUjit_option_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUjit_option_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2235,121 +2237,121 @@ impl crate::format::CudaDisplay for cuda_types::CUjit_option_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUjit_option_enum::CU_JIT_MAX_REGISTERS => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_MAX_REGISTERS => {
writer.write_all(stringify!(CU_JIT_MAX_REGISTERS).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_THREADS_PER_BLOCK => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_THREADS_PER_BLOCK => {
writer.write_all(stringify!(CU_JIT_THREADS_PER_BLOCK).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_WALL_TIME => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_WALL_TIME => {
writer.write_all(stringify!(CU_JIT_WALL_TIME).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_INFO_LOG_BUFFER => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_INFO_LOG_BUFFER => {
writer.write_all(stringify!(CU_JIT_INFO_LOG_BUFFER).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES => {
writer
.write_all(stringify!(CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_ERROR_LOG_BUFFER => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_ERROR_LOG_BUFFER => {
writer.write_all(stringify!(CU_JIT_ERROR_LOG_BUFFER).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES => {
writer
.write_all(stringify!(CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_OPTIMIZATION_LEVEL => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_OPTIMIZATION_LEVEL => {
writer.write_all(stringify!(CU_JIT_OPTIMIZATION_LEVEL).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_TARGET_FROM_CUCONTEXT => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_TARGET_FROM_CUCONTEXT => {
writer.write_all(stringify!(CU_JIT_TARGET_FROM_CUCONTEXT).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_TARGET => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_TARGET => {
writer.write_all(stringify!(CU_JIT_TARGET).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_FALLBACK_STRATEGY => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_FALLBACK_STRATEGY => {
writer.write_all(stringify!(CU_JIT_FALLBACK_STRATEGY).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_GENERATE_DEBUG_INFO => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_GENERATE_DEBUG_INFO => {
writer.write_all(stringify!(CU_JIT_GENERATE_DEBUG_INFO).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_LOG_VERBOSE => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_LOG_VERBOSE => {
writer.write_all(stringify!(CU_JIT_LOG_VERBOSE).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_GENERATE_LINE_INFO => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_GENERATE_LINE_INFO => {
writer.write_all(stringify!(CU_JIT_GENERATE_LINE_INFO).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_CACHE_MODE => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_CACHE_MODE => {
writer.write_all(stringify!(CU_JIT_CACHE_MODE).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_NEW_SM3X_OPT => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_NEW_SM3X_OPT => {
writer.write_all(stringify!(CU_JIT_NEW_SM3X_OPT).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_FAST_COMPILE => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_FAST_COMPILE => {
writer.write_all(stringify!(CU_JIT_FAST_COMPILE).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_GLOBAL_SYMBOL_NAMES => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_GLOBAL_SYMBOL_NAMES => {
writer.write_all(stringify!(CU_JIT_GLOBAL_SYMBOL_NAMES).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_GLOBAL_SYMBOL_ADDRESSES => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_GLOBAL_SYMBOL_ADDRESSES => {
writer.write_all(stringify!(CU_JIT_GLOBAL_SYMBOL_ADDRESSES).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_GLOBAL_SYMBOL_COUNT => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_GLOBAL_SYMBOL_COUNT => {
writer.write_all(stringify!(CU_JIT_GLOBAL_SYMBOL_COUNT).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_LTO => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_LTO => {
writer.write_all(stringify!(CU_JIT_LTO).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_FTZ => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_FTZ => {
writer.write_all(stringify!(CU_JIT_FTZ).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_PREC_DIV => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_PREC_DIV => {
writer.write_all(stringify!(CU_JIT_PREC_DIV).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_PREC_SQRT => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_PREC_SQRT => {
writer.write_all(stringify!(CU_JIT_PREC_SQRT).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_FMA => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_FMA => {
writer.write_all(stringify!(CU_JIT_FMA).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_REFERENCED_KERNEL_NAMES => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_REFERENCED_KERNEL_NAMES => {
writer.write_all(stringify!(CU_JIT_REFERENCED_KERNEL_NAMES).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_REFERENCED_KERNEL_COUNT => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_REFERENCED_KERNEL_COUNT => {
writer.write_all(stringify!(CU_JIT_REFERENCED_KERNEL_COUNT).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_REFERENCED_VARIABLE_NAMES => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_REFERENCED_VARIABLE_NAMES => {
writer.write_all(stringify!(CU_JIT_REFERENCED_VARIABLE_NAMES).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_REFERENCED_VARIABLE_COUNT => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_REFERENCED_VARIABLE_COUNT => {
writer.write_all(stringify!(CU_JIT_REFERENCED_VARIABLE_COUNT).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES => {
writer
.write_all(
stringify!(CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES).as_bytes(),
)
}
- &cuda_types::CUjit_option_enum::CU_JIT_POSITION_INDEPENDENT_CODE => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_POSITION_INDEPENDENT_CODE => {
writer.write_all(stringify!(CU_JIT_POSITION_INDEPENDENT_CODE).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_MIN_CTA_PER_SM => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_MIN_CTA_PER_SM => {
writer.write_all(stringify!(CU_JIT_MIN_CTA_PER_SM).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_MAX_THREADS_PER_BLOCK => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_MAX_THREADS_PER_BLOCK => {
writer.write_all(stringify!(CU_JIT_MAX_THREADS_PER_BLOCK).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_OVERRIDE_DIRECTIVE_VALUES => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_OVERRIDE_DIRECTIVE_VALUES => {
writer.write_all(stringify!(CU_JIT_OVERRIDE_DIRECTIVE_VALUES).as_bytes())
}
- &cuda_types::CUjit_option_enum::CU_JIT_NUM_OPTIONS => {
+ &cuda_types::cuda::CUjit_option_enum::CU_JIT_NUM_OPTIONS => {
writer.write_all(stringify!(CU_JIT_NUM_OPTIONS).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUjit_target_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUjit_target_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2357,68 +2359,68 @@ impl crate::format::CudaDisplay for cuda_types::CUjit_target_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_30 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_30 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_30).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_32 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_32 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_32).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_35 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_35 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_35).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_37 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_37 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_37).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_50 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_50 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_50).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_52 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_52 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_52).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_53 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_53 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_53).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_60 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_60 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_60).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_61 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_61 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_61).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_62 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_62 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_62).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_70 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_70 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_70).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_72 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_72 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_72).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_75 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_75 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_75).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_80 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_80 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_80).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_86 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_86 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_86).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_87 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_87 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_87).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_89 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_89 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_89).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_90 => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_90 => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_90).as_bytes())
}
- &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_90A => {
+ &cuda_types::cuda::CUjit_target_enum::CU_TARGET_COMPUTE_90A => {
writer.write_all(stringify!(CU_TARGET_COMPUTE_90A).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUjit_fallback_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUjit_fallback_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2426,17 +2428,17 @@ impl crate::format::CudaDisplay for cuda_types::CUjit_fallback_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUjit_fallback_enum::CU_PREFER_PTX => {
+ &cuda_types::cuda::CUjit_fallback_enum::CU_PREFER_PTX => {
writer.write_all(stringify!(CU_PREFER_PTX).as_bytes())
}
- &cuda_types::CUjit_fallback_enum::CU_PREFER_BINARY => {
+ &cuda_types::cuda::CUjit_fallback_enum::CU_PREFER_BINARY => {
writer.write_all(stringify!(CU_PREFER_BINARY).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUjit_cacheMode_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUjit_cacheMode_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2444,20 +2446,20 @@ impl crate::format::CudaDisplay for cuda_types::CUjit_cacheMode_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUjit_cacheMode_enum::CU_JIT_CACHE_OPTION_NONE => {
+ &cuda_types::cuda::CUjit_cacheMode_enum::CU_JIT_CACHE_OPTION_NONE => {
writer.write_all(stringify!(CU_JIT_CACHE_OPTION_NONE).as_bytes())
}
- &cuda_types::CUjit_cacheMode_enum::CU_JIT_CACHE_OPTION_CG => {
+ &cuda_types::cuda::CUjit_cacheMode_enum::CU_JIT_CACHE_OPTION_CG => {
writer.write_all(stringify!(CU_JIT_CACHE_OPTION_CG).as_bytes())
}
- &cuda_types::CUjit_cacheMode_enum::CU_JIT_CACHE_OPTION_CA => {
+ &cuda_types::cuda::CUjit_cacheMode_enum::CU_JIT_CACHE_OPTION_CA => {
writer.write_all(stringify!(CU_JIT_CACHE_OPTION_CA).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUjitInputType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUjitInputType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2465,32 +2467,32 @@ impl crate::format::CudaDisplay for cuda_types::CUjitInputType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUjitInputType_enum::CU_JIT_INPUT_CUBIN => {
+ &cuda_types::cuda::CUjitInputType_enum::CU_JIT_INPUT_CUBIN => {
writer.write_all(stringify!(CU_JIT_INPUT_CUBIN).as_bytes())
}
- &cuda_types::CUjitInputType_enum::CU_JIT_INPUT_PTX => {
+ &cuda_types::cuda::CUjitInputType_enum::CU_JIT_INPUT_PTX => {
writer.write_all(stringify!(CU_JIT_INPUT_PTX).as_bytes())
}
- &cuda_types::CUjitInputType_enum::CU_JIT_INPUT_FATBINARY => {
+ &cuda_types::cuda::CUjitInputType_enum::CU_JIT_INPUT_FATBINARY => {
writer.write_all(stringify!(CU_JIT_INPUT_FATBINARY).as_bytes())
}
- &cuda_types::CUjitInputType_enum::CU_JIT_INPUT_OBJECT => {
+ &cuda_types::cuda::CUjitInputType_enum::CU_JIT_INPUT_OBJECT => {
writer.write_all(stringify!(CU_JIT_INPUT_OBJECT).as_bytes())
}
- &cuda_types::CUjitInputType_enum::CU_JIT_INPUT_LIBRARY => {
+ &cuda_types::cuda::CUjitInputType_enum::CU_JIT_INPUT_LIBRARY => {
writer.write_all(stringify!(CU_JIT_INPUT_LIBRARY).as_bytes())
}
- &cuda_types::CUjitInputType_enum::CU_JIT_INPUT_NVVM => {
+ &cuda_types::cuda::CUjitInputType_enum::CU_JIT_INPUT_NVVM => {
writer.write_all(stringify!(CU_JIT_INPUT_NVVM).as_bytes())
}
- &cuda_types::CUjitInputType_enum::CU_JIT_NUM_INPUT_TYPES => {
+ &cuda_types::cuda::CUjitInputType_enum::CU_JIT_NUM_INPUT_TYPES => {
writer.write_all(stringify!(CU_JIT_NUM_INPUT_TYPES).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUlinkState {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUlinkState {
fn write(
&self,
_fn_name: &'static str,
@@ -2500,7 +2502,7 @@ impl crate::format::CudaDisplay for cuda_types::CUlinkState {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphicsRegisterFlags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphicsRegisterFlags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2508,28 +2510,28 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphicsRegisterFlags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_NONE => {
+ &cuda_types::cuda::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_NONE => {
writer.write_all(stringify!(CU_GRAPHICS_REGISTER_FLAGS_NONE).as_bytes())
}
- &cuda_types::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY => {
+ &cuda_types::cuda::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY => {
writer
.write_all(
stringify!(CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY).as_bytes(),
)
}
- &cuda_types::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD => {
+ &cuda_types::cuda::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD => {
writer
.write_all(
stringify!(CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD).as_bytes(),
)
}
- &cuda_types::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST => {
+ &cuda_types::cuda::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST => {
writer
.write_all(
stringify!(CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST).as_bytes(),
)
}
- &cuda_types::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER => {
+ &cuda_types::cuda::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER => {
writer
.write_all(
stringify!(CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER).as_bytes(),
@@ -2539,7 +2541,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphicsRegisterFlags_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphicsMapResourceFlags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphicsMapResourceFlags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2547,19 +2549,19 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphicsMapResourceFlags_enum
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUgraphicsMapResourceFlags_enum::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE => {
+ &cuda_types::cuda::CUgraphicsMapResourceFlags_enum::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE => {
writer
.write_all(
stringify!(CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE).as_bytes(),
)
}
- &cuda_types::CUgraphicsMapResourceFlags_enum::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY => {
+ &cuda_types::cuda::CUgraphicsMapResourceFlags_enum::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY => {
writer
.write_all(
stringify!(CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY).as_bytes(),
)
}
- &cuda_types::CUgraphicsMapResourceFlags_enum::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD => {
+ &cuda_types::cuda::CUgraphicsMapResourceFlags_enum::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD => {
writer
.write_all(
stringify!(CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD)
@@ -2570,7 +2572,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphicsMapResourceFlags_enum
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUarray_cubemap_face_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUarray_cubemap_face_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2578,29 +2580,29 @@ impl crate::format::CudaDisplay for cuda_types::CUarray_cubemap_face_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_POSITIVE_X => {
+ &cuda_types::cuda::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_POSITIVE_X => {
writer.write_all(stringify!(CU_CUBEMAP_FACE_POSITIVE_X).as_bytes())
}
- &cuda_types::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_NEGATIVE_X => {
+ &cuda_types::cuda::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_NEGATIVE_X => {
writer.write_all(stringify!(CU_CUBEMAP_FACE_NEGATIVE_X).as_bytes())
}
- &cuda_types::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_POSITIVE_Y => {
+ &cuda_types::cuda::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_POSITIVE_Y => {
writer.write_all(stringify!(CU_CUBEMAP_FACE_POSITIVE_Y).as_bytes())
}
- &cuda_types::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_NEGATIVE_Y => {
+ &cuda_types::cuda::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_NEGATIVE_Y => {
writer.write_all(stringify!(CU_CUBEMAP_FACE_NEGATIVE_Y).as_bytes())
}
- &cuda_types::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_POSITIVE_Z => {
+ &cuda_types::cuda::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_POSITIVE_Z => {
writer.write_all(stringify!(CU_CUBEMAP_FACE_POSITIVE_Z).as_bytes())
}
- &cuda_types::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_NEGATIVE_Z => {
+ &cuda_types::cuda::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_NEGATIVE_Z => {
writer.write_all(stringify!(CU_CUBEMAP_FACE_NEGATIVE_Z).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUlimit_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUlimit_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2608,40 +2610,40 @@ impl crate::format::CudaDisplay for cuda_types::CUlimit_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUlimit_enum::CU_LIMIT_STACK_SIZE => {
+ &cuda_types::cuda::CUlimit_enum::CU_LIMIT_STACK_SIZE => {
writer.write_all(stringify!(CU_LIMIT_STACK_SIZE).as_bytes())
}
- &cuda_types::CUlimit_enum::CU_LIMIT_PRINTF_FIFO_SIZE => {
+ &cuda_types::cuda::CUlimit_enum::CU_LIMIT_PRINTF_FIFO_SIZE => {
writer.write_all(stringify!(CU_LIMIT_PRINTF_FIFO_SIZE).as_bytes())
}
- &cuda_types::CUlimit_enum::CU_LIMIT_MALLOC_HEAP_SIZE => {
+ &cuda_types::cuda::CUlimit_enum::CU_LIMIT_MALLOC_HEAP_SIZE => {
writer.write_all(stringify!(CU_LIMIT_MALLOC_HEAP_SIZE).as_bytes())
}
- &cuda_types::CUlimit_enum::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH => {
+ &cuda_types::cuda::CUlimit_enum::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH => {
writer.write_all(stringify!(CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH).as_bytes())
}
- &cuda_types::CUlimit_enum::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT => {
+ &cuda_types::cuda::CUlimit_enum::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT => {
writer
.write_all(
stringify!(CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT).as_bytes(),
)
}
- &cuda_types::CUlimit_enum::CU_LIMIT_MAX_L2_FETCH_GRANULARITY => {
+ &cuda_types::cuda::CUlimit_enum::CU_LIMIT_MAX_L2_FETCH_GRANULARITY => {
writer
.write_all(stringify!(CU_LIMIT_MAX_L2_FETCH_GRANULARITY).as_bytes())
}
- &cuda_types::CUlimit_enum::CU_LIMIT_PERSISTING_L2_CACHE_SIZE => {
+ &cuda_types::cuda::CUlimit_enum::CU_LIMIT_PERSISTING_L2_CACHE_SIZE => {
writer
.write_all(stringify!(CU_LIMIT_PERSISTING_L2_CACHE_SIZE).as_bytes())
}
- &cuda_types::CUlimit_enum::CU_LIMIT_MAX => {
+ &cuda_types::cuda::CUlimit_enum::CU_LIMIT_MAX => {
writer.write_all(stringify!(CU_LIMIT_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUresourcetype_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUresourcetype_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2649,23 +2651,23 @@ impl crate::format::CudaDisplay for cuda_types::CUresourcetype_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUresourcetype_enum::CU_RESOURCE_TYPE_ARRAY => {
+ &cuda_types::cuda::CUresourcetype_enum::CU_RESOURCE_TYPE_ARRAY => {
writer.write_all(stringify!(CU_RESOURCE_TYPE_ARRAY).as_bytes())
}
- &cuda_types::CUresourcetype_enum::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY => {
+ &cuda_types::cuda::CUresourcetype_enum::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY => {
writer.write_all(stringify!(CU_RESOURCE_TYPE_MIPMAPPED_ARRAY).as_bytes())
}
- &cuda_types::CUresourcetype_enum::CU_RESOURCE_TYPE_LINEAR => {
+ &cuda_types::cuda::CUresourcetype_enum::CU_RESOURCE_TYPE_LINEAR => {
writer.write_all(stringify!(CU_RESOURCE_TYPE_LINEAR).as_bytes())
}
- &cuda_types::CUresourcetype_enum::CU_RESOURCE_TYPE_PITCH2D => {
+ &cuda_types::cuda::CUresourcetype_enum::CU_RESOURCE_TYPE_PITCH2D => {
writer.write_all(stringify!(CU_RESOURCE_TYPE_PITCH2D).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUhostFn {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUhostFn {
fn write(
&self,
_fn_name: &'static str,
@@ -2677,14 +2679,14 @@ impl crate::format::CudaDisplay for cuda_types::CUhostFn {
"{:p}",
unsafe {
std::mem::transmute::<
- cuda_types::CUhostFn,
+ cuda_types::cuda::CUhostFn,
*mut ::std::ffi::c_void,
>(*self)
},
)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUaccessProperty_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUaccessProperty_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2692,20 +2694,20 @@ impl crate::format::CudaDisplay for cuda_types::CUaccessProperty_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUaccessProperty_enum::CU_ACCESS_PROPERTY_NORMAL => {
+ &cuda_types::cuda::CUaccessProperty_enum::CU_ACCESS_PROPERTY_NORMAL => {
writer.write_all(stringify!(CU_ACCESS_PROPERTY_NORMAL).as_bytes())
}
- &cuda_types::CUaccessProperty_enum::CU_ACCESS_PROPERTY_STREAMING => {
+ &cuda_types::cuda::CUaccessProperty_enum::CU_ACCESS_PROPERTY_STREAMING => {
writer.write_all(stringify!(CU_ACCESS_PROPERTY_STREAMING).as_bytes())
}
- &cuda_types::CUaccessProperty_enum::CU_ACCESS_PROPERTY_PERSISTING => {
+ &cuda_types::cuda::CUaccessProperty_enum::CU_ACCESS_PROPERTY_PERSISTING => {
writer.write_all(stringify!(CU_ACCESS_PROPERTY_PERSISTING).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUaccessPolicyWindow_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUaccessPolicyWindow_st {
fn write(
&self,
_fn_name: &'static str,
@@ -2725,7 +2727,7 @@ impl crate::format::CudaDisplay for cuda_types::CUaccessPolicyWindow_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_KERNEL_NODE_PARAMS_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -2755,7 +2757,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_KERNEL_NODE_PARAMS_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_KERNEL_NODE_PARAMS_v2_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v2_st {
fn write(
&self,
_fn_name: &'static str,
@@ -2789,7 +2791,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_KERNEL_NODE_PARAMS_v2_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_KERNEL_NODE_PARAMS_v3_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v3_st {
fn write(
&self,
_fn_name: &'static str,
@@ -2823,7 +2825,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_KERNEL_NODE_PARAMS_v3_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_MEMSET_NODE_PARAMS_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -2845,7 +2847,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_MEMSET_NODE_PARAMS_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_MEMSET_NODE_PARAMS_v2_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS_v2_st {
fn write(
&self,
_fn_name: &'static str,
@@ -2869,7 +2871,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_MEMSET_NODE_PARAMS_v2_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_HOST_NODE_PARAMS_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_HOST_NODE_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -2883,7 +2885,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_HOST_NODE_PARAMS_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_HOST_NODE_PARAMS_v2_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_HOST_NODE_PARAMS_v2_st {
fn write(
&self,
_fn_name: &'static str,
@@ -2897,7 +2899,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_HOST_NODE_PARAMS_v2_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphConditionalNodeType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphConditionalNodeType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2905,17 +2907,17 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphConditionalNodeType_enum
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUgraphConditionalNodeType_enum::CU_GRAPH_COND_TYPE_IF => {
+ &cuda_types::cuda::CUgraphConditionalNodeType_enum::CU_GRAPH_COND_TYPE_IF => {
writer.write_all(stringify!(CU_GRAPH_COND_TYPE_IF).as_bytes())
}
- &cuda_types::CUgraphConditionalNodeType_enum::CU_GRAPH_COND_TYPE_WHILE => {
+ &cuda_types::cuda::CUgraphConditionalNodeType_enum::CU_GRAPH_COND_TYPE_WHILE => {
writer.write_all(stringify!(CU_GRAPH_COND_TYPE_WHILE).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_CONDITIONAL_NODE_PARAMS {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_CONDITIONAL_NODE_PARAMS {
fn write(
&self,
_fn_name: &'static str,
@@ -2935,7 +2937,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_CONDITIONAL_NODE_PARAMS {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphNodeType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphNodeType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -2943,57 +2945,57 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphNodeType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_KERNEL => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_KERNEL => {
writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_KERNEL).as_bytes())
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_MEMCPY => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_MEMCPY => {
writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_MEMCPY).as_bytes())
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_MEMSET => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_MEMSET => {
writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_MEMSET).as_bytes())
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_HOST => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_HOST => {
writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_HOST).as_bytes())
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_GRAPH => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_GRAPH => {
writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_GRAPH).as_bytes())
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_EMPTY => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_EMPTY => {
writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_EMPTY).as_bytes())
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_WAIT_EVENT => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_WAIT_EVENT => {
writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_WAIT_EVENT).as_bytes())
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_EVENT_RECORD => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_EVENT_RECORD => {
writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_EVENT_RECORD).as_bytes())
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL => {
writer
.write_all(
stringify!(CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL).as_bytes(),
)
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT => {
writer
.write_all(stringify!(CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT).as_bytes())
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_MEM_ALLOC => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_MEM_ALLOC => {
writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_MEM_ALLOC).as_bytes())
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_MEM_FREE => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_MEM_FREE => {
writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_MEM_FREE).as_bytes())
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_BATCH_MEM_OP => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_BATCH_MEM_OP => {
writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_BATCH_MEM_OP).as_bytes())
}
- &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_CONDITIONAL => {
+ &cuda_types::cuda::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_CONDITIONAL => {
writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_CONDITIONAL).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphDependencyType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphDependencyType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3001,10 +3003,10 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphDependencyType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUgraphDependencyType_enum::CU_GRAPH_DEPENDENCY_TYPE_DEFAULT => {
+ &cuda_types::cuda::CUgraphDependencyType_enum::CU_GRAPH_DEPENDENCY_TYPE_DEFAULT => {
writer.write_all(stringify!(CU_GRAPH_DEPENDENCY_TYPE_DEFAULT).as_bytes())
}
- &cuda_types::CUgraphDependencyType_enum::CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC => {
+ &cuda_types::cuda::CUgraphDependencyType_enum::CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC => {
writer
.write_all(
stringify!(CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC).as_bytes(),
@@ -3014,7 +3016,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphDependencyType_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphEdgeData_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphEdgeData_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3030,7 +3032,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphEdgeData_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphInstantiateResult_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphInstantiateResult_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3038,26 +3040,26 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphInstantiateResult_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_SUCCESS => {
+ &cuda_types::cuda::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_SUCCESS => {
writer.write_all(stringify!(CUDA_GRAPH_INSTANTIATE_SUCCESS).as_bytes())
}
- &cuda_types::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_ERROR => {
+ &cuda_types::cuda::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_ERROR => {
writer.write_all(stringify!(CUDA_GRAPH_INSTANTIATE_ERROR).as_bytes())
}
- &cuda_types::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE => {
+ &cuda_types::cuda::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE => {
writer
.write_all(
stringify!(CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE).as_bytes(),
)
}
- &cuda_types::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED => {
+ &cuda_types::cuda::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED => {
writer
.write_all(
stringify!(CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED => {
+ &cuda_types::cuda::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED => {
writer
.write_all(
stringify!(CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED)
@@ -3068,7 +3070,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphInstantiateResult_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_GRAPH_INSTANTIATE_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3086,7 +3088,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS_st
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUsynchronizationPolicy_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUsynchronizationPolicy_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3094,23 +3096,23 @@ impl crate::format::CudaDisplay for cuda_types::CUsynchronizationPolicy_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUsynchronizationPolicy_enum::CU_SYNC_POLICY_AUTO => {
+ &cuda_types::cuda::CUsynchronizationPolicy_enum::CU_SYNC_POLICY_AUTO => {
writer.write_all(stringify!(CU_SYNC_POLICY_AUTO).as_bytes())
}
- &cuda_types::CUsynchronizationPolicy_enum::CU_SYNC_POLICY_SPIN => {
+ &cuda_types::cuda::CUsynchronizationPolicy_enum::CU_SYNC_POLICY_SPIN => {
writer.write_all(stringify!(CU_SYNC_POLICY_SPIN).as_bytes())
}
- &cuda_types::CUsynchronizationPolicy_enum::CU_SYNC_POLICY_YIELD => {
+ &cuda_types::cuda::CUsynchronizationPolicy_enum::CU_SYNC_POLICY_YIELD => {
writer.write_all(stringify!(CU_SYNC_POLICY_YIELD).as_bytes())
}
- &cuda_types::CUsynchronizationPolicy_enum::CU_SYNC_POLICY_BLOCKING_SYNC => {
+ &cuda_types::cuda::CUsynchronizationPolicy_enum::CU_SYNC_POLICY_BLOCKING_SYNC => {
writer.write_all(stringify!(CU_SYNC_POLICY_BLOCKING_SYNC).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUclusterSchedulingPolicy_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUclusterSchedulingPolicy_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3118,19 +3120,19 @@ impl crate::format::CudaDisplay for cuda_types::CUclusterSchedulingPolicy_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUclusterSchedulingPolicy_enum::CU_CLUSTER_SCHEDULING_POLICY_DEFAULT => {
+ &cuda_types::cuda::CUclusterSchedulingPolicy_enum::CU_CLUSTER_SCHEDULING_POLICY_DEFAULT => {
writer
.write_all(
stringify!(CU_CLUSTER_SCHEDULING_POLICY_DEFAULT).as_bytes(),
)
}
- &cuda_types::CUclusterSchedulingPolicy_enum::CU_CLUSTER_SCHEDULING_POLICY_SPREAD => {
+ &cuda_types::cuda::CUclusterSchedulingPolicy_enum::CU_CLUSTER_SCHEDULING_POLICY_SPREAD => {
writer
.write_all(
stringify!(CU_CLUSTER_SCHEDULING_POLICY_SPREAD).as_bytes(),
)
}
- &cuda_types::CUclusterSchedulingPolicy_enum::CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING => {
+ &cuda_types::cuda::CUclusterSchedulingPolicy_enum::CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING => {
writer
.write_all(
stringify!(CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING)
@@ -3141,7 +3143,7 @@ impl crate::format::CudaDisplay for cuda_types::CUclusterSchedulingPolicy_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUlaunchMemSyncDomain_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUlaunchMemSyncDomain_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3149,18 +3151,18 @@ impl crate::format::CudaDisplay for cuda_types::CUlaunchMemSyncDomain_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUlaunchMemSyncDomain_enum::CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT => {
+ &cuda_types::cuda::CUlaunchMemSyncDomain_enum::CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT => {
writer
.write_all(stringify!(CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT).as_bytes())
}
- &cuda_types::CUlaunchMemSyncDomain_enum::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE => {
+ &cuda_types::cuda::CUlaunchMemSyncDomain_enum::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE => {
writer.write_all(stringify!(CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUlaunchMemSyncDomainMap_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUlaunchMemSyncDomainMap_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3174,7 +3176,7 @@ impl crate::format::CudaDisplay for cuda_types::CUlaunchMemSyncDomainMap_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUlaunchAttributeID_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUlaunchAttributeID_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3182,31 +3184,31 @@ impl crate::format::CudaDisplay for cuda_types::CUlaunchAttributeID_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_IGNORE => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_IGNORE => {
writer.write_all(stringify!(CU_LAUNCH_ATTRIBUTE_IGNORE).as_bytes())
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW => {
writer
.write_all(
stringify!(CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW).as_bytes(),
)
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_COOPERATIVE => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_COOPERATIVE => {
writer.write_all(stringify!(CU_LAUNCH_ATTRIBUTE_COOPERATIVE).as_bytes())
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY => {
writer
.write_all(
stringify!(CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY).as_bytes(),
)
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION => {
writer
.write_all(
stringify!(CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION).as_bytes(),
)
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE => {
writer
.write_all(
stringify!(
@@ -3215,49 +3217,49 @@ impl crate::format::CudaDisplay for cuda_types::CUlaunchAttributeID_enum {
.as_bytes(),
)
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION => {
writer
.write_all(
stringify!(CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION)
.as_bytes(),
)
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT => {
writer
.write_all(
stringify!(CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT).as_bytes(),
)
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_PRIORITY => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_PRIORITY => {
writer.write_all(stringify!(CU_LAUNCH_ATTRIBUTE_PRIORITY).as_bytes())
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP => {
writer
.write_all(
stringify!(CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP).as_bytes(),
)
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN => {
writer
.write_all(
stringify!(CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN).as_bytes(),
)
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT => {
writer
.write_all(
stringify!(CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT)
.as_bytes(),
)
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE => {
writer
.write_all(
stringify!(CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE)
.as_bytes(),
)
}
- &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_MAX => {
+ &cuda_types::cuda::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_MAX => {
writer.write_all(stringify!(CU_LAUNCH_ATTRIBUTE_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
@@ -3265,7 +3267,7 @@ impl crate::format::CudaDisplay for cuda_types::CUlaunchAttributeID_enum {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUlaunchAttributeValue_union__bindgen_ty_1 {
+for cuda_types::cuda::CUlaunchAttributeValue_union__bindgen_ty_1 {
fn write(
&self,
_fn_name: &'static str,
@@ -3282,7 +3284,7 @@ for cuda_types::CUlaunchAttributeValue_union__bindgen_ty_1 {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUlaunchAttributeValue_union__bindgen_ty_2 {
+for cuda_types::cuda::CUlaunchAttributeValue_union__bindgen_ty_2 {
fn write(
&self,
_fn_name: &'static str,
@@ -3300,7 +3302,7 @@ for cuda_types::CUlaunchAttributeValue_union__bindgen_ty_2 {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUlaunchAttributeValue_union__bindgen_ty_3 {
+for cuda_types::cuda::CUlaunchAttributeValue_union__bindgen_ty_3 {
fn write(
&self,
_fn_name: &'static str,
@@ -3315,7 +3317,7 @@ for cuda_types::CUlaunchAttributeValue_union__bindgen_ty_3 {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUlaunchAttributeValue_union__bindgen_ty_4 {
+for cuda_types::cuda::CUlaunchAttributeValue_union__bindgen_ty_4 {
fn write(
&self,
_fn_name: &'static str,
@@ -3329,7 +3331,7 @@ for cuda_types::CUlaunchAttributeValue_union__bindgen_ty_4 {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUstreamCaptureStatus_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUstreamCaptureStatus_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3337,13 +3339,13 @@ impl crate::format::CudaDisplay for cuda_types::CUstreamCaptureStatus_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUstreamCaptureStatus_enum::CU_STREAM_CAPTURE_STATUS_NONE => {
+ &cuda_types::cuda::CUstreamCaptureStatus_enum::CU_STREAM_CAPTURE_STATUS_NONE => {
writer.write_all(stringify!(CU_STREAM_CAPTURE_STATUS_NONE).as_bytes())
}
- &cuda_types::CUstreamCaptureStatus_enum::CU_STREAM_CAPTURE_STATUS_ACTIVE => {
+ &cuda_types::cuda::CUstreamCaptureStatus_enum::CU_STREAM_CAPTURE_STATUS_ACTIVE => {
writer.write_all(stringify!(CU_STREAM_CAPTURE_STATUS_ACTIVE).as_bytes())
}
- &cuda_types::CUstreamCaptureStatus_enum::CU_STREAM_CAPTURE_STATUS_INVALIDATED => {
+ &cuda_types::cuda::CUstreamCaptureStatus_enum::CU_STREAM_CAPTURE_STATUS_INVALIDATED => {
writer
.write_all(
stringify!(CU_STREAM_CAPTURE_STATUS_INVALIDATED).as_bytes(),
@@ -3353,7 +3355,7 @@ impl crate::format::CudaDisplay for cuda_types::CUstreamCaptureStatus_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUstreamCaptureMode_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUstreamCaptureMode_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3361,23 +3363,23 @@ impl crate::format::CudaDisplay for cuda_types::CUstreamCaptureMode_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUstreamCaptureMode_enum::CU_STREAM_CAPTURE_MODE_GLOBAL => {
+ &cuda_types::cuda::CUstreamCaptureMode_enum::CU_STREAM_CAPTURE_MODE_GLOBAL => {
writer.write_all(stringify!(CU_STREAM_CAPTURE_MODE_GLOBAL).as_bytes())
}
- &cuda_types::CUstreamCaptureMode_enum::CU_STREAM_CAPTURE_MODE_THREAD_LOCAL => {
+ &cuda_types::cuda::CUstreamCaptureMode_enum::CU_STREAM_CAPTURE_MODE_THREAD_LOCAL => {
writer
.write_all(
stringify!(CU_STREAM_CAPTURE_MODE_THREAD_LOCAL).as_bytes(),
)
}
- &cuda_types::CUstreamCaptureMode_enum::CU_STREAM_CAPTURE_MODE_RELAXED => {
+ &cuda_types::cuda::CUstreamCaptureMode_enum::CU_STREAM_CAPTURE_MODE_RELAXED => {
writer.write_all(stringify!(CU_STREAM_CAPTURE_MODE_RELAXED).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUdriverProcAddress_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUdriverProcAddress_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3385,14 +3387,14 @@ impl crate::format::CudaDisplay for cuda_types::CUdriverProcAddress_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUdriverProcAddress_flags_enum::CU_GET_PROC_ADDRESS_DEFAULT => {
+ &cuda_types::cuda::CUdriverProcAddress_flags_enum::CU_GET_PROC_ADDRESS_DEFAULT => {
writer.write_all(stringify!(CU_GET_PROC_ADDRESS_DEFAULT).as_bytes())
}
- &cuda_types::CUdriverProcAddress_flags_enum::CU_GET_PROC_ADDRESS_LEGACY_STREAM => {
+ &cuda_types::cuda::CUdriverProcAddress_flags_enum::CU_GET_PROC_ADDRESS_LEGACY_STREAM => {
writer
.write_all(stringify!(CU_GET_PROC_ADDRESS_LEGACY_STREAM).as_bytes())
}
- &cuda_types::CUdriverProcAddress_flags_enum::CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM => {
+ &cuda_types::cuda::CUdriverProcAddress_flags_enum::CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM => {
writer
.write_all(
stringify!(CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM)
@@ -3403,7 +3405,8 @@ impl crate::format::CudaDisplay for cuda_types::CUdriverProcAddress_flags_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUdriverProcAddressQueryResult_enum {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUdriverProcAddressQueryResult_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3411,16 +3414,16 @@ impl crate::format::CudaDisplay for cuda_types::CUdriverProcAddressQueryResult_e
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUdriverProcAddressQueryResult_enum::CU_GET_PROC_ADDRESS_SUCCESS => {
+ &cuda_types::cuda::CUdriverProcAddressQueryResult_enum::CU_GET_PROC_ADDRESS_SUCCESS => {
writer.write_all(stringify!(CU_GET_PROC_ADDRESS_SUCCESS).as_bytes())
}
- &cuda_types::CUdriverProcAddressQueryResult_enum::CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND => {
+ &cuda_types::cuda::CUdriverProcAddressQueryResult_enum::CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND => {
writer
.write_all(
stringify!(CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND).as_bytes(),
)
}
- &cuda_types::CUdriverProcAddressQueryResult_enum::CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT => {
+ &cuda_types::cuda::CUdriverProcAddressQueryResult_enum::CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT => {
writer
.write_all(
stringify!(CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT).as_bytes(),
@@ -3430,7 +3433,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdriverProcAddressQueryResult_e
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUexecAffinityType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUexecAffinityType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3438,17 +3441,17 @@ impl crate::format::CudaDisplay for cuda_types::CUexecAffinityType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUexecAffinityType_enum::CU_EXEC_AFFINITY_TYPE_SM_COUNT => {
+ &cuda_types::cuda::CUexecAffinityType_enum::CU_EXEC_AFFINITY_TYPE_SM_COUNT => {
writer.write_all(stringify!(CU_EXEC_AFFINITY_TYPE_SM_COUNT).as_bytes())
}
- &cuda_types::CUexecAffinityType_enum::CU_EXEC_AFFINITY_TYPE_MAX => {
+ &cuda_types::cuda::CUexecAffinityType_enum::CU_EXEC_AFFINITY_TYPE_MAX => {
writer.write_all(stringify!(CU_EXEC_AFFINITY_TYPE_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUexecAffinitySmCount_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUexecAffinitySmCount_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3460,7 +3463,7 @@ impl crate::format::CudaDisplay for cuda_types::CUexecAffinitySmCount_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUlibraryOption_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUlibraryOption_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3468,17 +3471,17 @@ impl crate::format::CudaDisplay for cuda_types::CUlibraryOption_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUlibraryOption_enum::CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE => {
+ &cuda_types::cuda::CUlibraryOption_enum::CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE => {
writer
.write_all(
stringify!(CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE)
.as_bytes(),
)
}
- &cuda_types::CUlibraryOption_enum::CU_LIBRARY_BINARY_IS_PRESERVED => {
+ &cuda_types::cuda::CUlibraryOption_enum::CU_LIBRARY_BINARY_IS_PRESERVED => {
writer.write_all(stringify!(CU_LIBRARY_BINARY_IS_PRESERVED).as_bytes())
}
- &cuda_types::CUlibraryOption_enum::CU_LIBRARY_NUM_OPTIONS => {
+ &cuda_types::cuda::CUlibraryOption_enum::CU_LIBRARY_NUM_OPTIONS => {
writer.write_all(stringify!(CU_LIBRARY_NUM_OPTIONS).as_bytes())
}
_ => write!(writer, "{}", self.0),
@@ -3486,7 +3489,7 @@ impl crate::format::CudaDisplay for cuda_types::CUlibraryOption_enum {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUlibraryHostUniversalFunctionAndDataTable_st {
+for cuda_types::cuda::CUlibraryHostUniversalFunctionAndDataTable_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3505,7 +3508,7 @@ for cuda_types::CUlibraryHostUniversalFunctionAndDataTable_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUdevice_P2PAttribute_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUdevice_P2PAttribute_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3513,33 +3516,33 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_P2PAttribute_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK => {
+ &cuda_types::cuda::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK => {
writer
.write_all(
stringify!(CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK).as_bytes(),
)
}
- &cuda_types::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED).as_bytes(),
)
}
- &cuda_types::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED)
.as_bytes(),
)
}
- &cuda_types::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED => {
+ &cuda_types::cuda::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED => {
writer
.write_all(
stringify!(CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED)
@@ -3550,7 +3553,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdevice_P2PAttribute_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUstreamCallback {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUstreamCallback {
fn write(
&self,
_fn_name: &'static str,
@@ -3562,14 +3565,14 @@ impl crate::format::CudaDisplay for cuda_types::CUstreamCallback {
"{:p}",
unsafe {
std::mem::transmute::<
- cuda_types::CUstreamCallback,
+ cuda_types::cuda::CUstreamCallback,
*mut ::std::ffi::c_void,
>(*self)
},
)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUoccupancyB2DSize {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUoccupancyB2DSize {
fn write(
&self,
_fn_name: &'static str,
@@ -3581,14 +3584,14 @@ impl crate::format::CudaDisplay for cuda_types::CUoccupancyB2DSize {
"{:p}",
unsafe {
std::mem::transmute::<
- cuda_types::CUoccupancyB2DSize,
+ cuda_types::cuda::CUoccupancyB2DSize,
*mut ::std::ffi::c_void,
>(*self)
},
)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY2D_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_MEMCPY2D_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3630,7 +3633,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY2D_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY3D_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_MEMCPY3D_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3686,7 +3689,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY3D_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY3D_PEER_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_MEMCPY3D_PEER_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3746,7 +3749,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY3D_PEER_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY_NODE_PARAMS_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_MEMCPY_NODE_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3762,7 +3765,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY_NODE_PARAMS_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY_DESCRIPTOR_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3780,7 +3783,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY_DESCRIPTOR_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY3D_DESCRIPTOR_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3802,7 +3805,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY3D_DESCRIPTOR_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_ARRAY_SPARSE_PROPERTIES_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3821,7 +3824,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES_st
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES_st__bindgen_ty_1 {
+for cuda_types::cuda::CUDA_ARRAY_SPARSE_PROPERTIES_st__bindgen_ty_1 {
fn write(
&self,
_fn_name: &'static str,
@@ -3837,7 +3840,7 @@ for cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES_st__bindgen_ty_1 {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_ARRAY_MEMORY_REQUIREMENTS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3852,7 +3855,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS_s
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
+for cuda_types::cuda::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
fn write(
&self,
_fn_name: &'static str,
@@ -3865,7 +3868,7 @@ for cuda_types::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2 {
+for cuda_types::cuda::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2 {
fn write(
&self,
_fn_name: &'static str,
@@ -3878,7 +3881,7 @@ for cuda_types::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2 {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3 {
+for cuda_types::cuda::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3 {
fn write(
&self,
_fn_name: &'static str,
@@ -3897,7 +3900,7 @@ for cuda_types::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3 {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4 {
+for cuda_types::cuda::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4 {
fn write(
&self,
_fn_name: &'static str,
@@ -3919,7 +3922,7 @@ for cuda_types::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4 {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_TEXTURE_DESC_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_TEXTURE_DESC_st {
fn write(
&self,
_fn_name: &'static str,
@@ -3949,7 +3952,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_TEXTURE_DESC_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUresourceViewFormat_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUresourceViewFormat_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -3957,116 +3960,116 @@ impl crate::format::CudaDisplay for cuda_types::CUresourceViewFormat_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_NONE => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_NONE => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_NONE).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_1X8 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_1X8 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_1X8).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_2X8 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_2X8 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_2X8).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_4X8 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_4X8 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_4X8).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_1X8 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_1X8 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_1X8).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_2X8 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_2X8 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_2X8).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_4X8 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_4X8 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_4X8).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_1X16 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_1X16 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_1X16).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_2X16 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_2X16 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_2X16).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_4X16 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_4X16 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_4X16).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_1X16 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_1X16 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_1X16).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_2X16 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_2X16 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_2X16).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_4X16 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_4X16 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_4X16).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_1X32 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_1X32 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_1X32).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_2X32 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_2X32 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_2X32).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_4X32 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_4X32 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_4X32).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_1X32 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_1X32 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_1X32).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_2X32 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_2X32 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_2X32).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_4X32 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_4X32 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_4X32).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_1X16 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_1X16 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_FLOAT_1X16).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_2X16 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_2X16 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_FLOAT_2X16).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_4X16 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_4X16 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_FLOAT_4X16).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_1X32 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_1X32 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_FLOAT_1X32).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_2X32 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_2X32 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_FLOAT_2X32).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_4X32 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_4X32 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_FLOAT_4X32).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC1 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC1 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC1).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC2 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC2 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC2).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC3 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC3 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC3).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC4 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC4 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC4).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SIGNED_BC4 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SIGNED_BC4 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SIGNED_BC4).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC5 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC5 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC5).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SIGNED_BC5 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SIGNED_BC5 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SIGNED_BC5).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC6H => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC6H => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC6H).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SIGNED_BC6H => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SIGNED_BC6H => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SIGNED_BC6H).as_bytes())
}
- &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC7 => {
+ &cuda_types::cuda::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC7 => {
writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC7).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_RESOURCE_VIEW_DESC_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_RESOURCE_VIEW_DESC_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4092,7 +4095,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_RESOURCE_VIEW_DESC_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUtensorMap_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUtensorMap_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4104,7 +4107,7 @@ impl crate::format::CudaDisplay for cuda_types::CUtensorMap_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUtensorMapDataType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUtensorMapDataType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4112,46 +4115,46 @@ impl crate::format::CudaDisplay for cuda_types::CUtensorMapDataType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_UINT8 => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_UINT8 => {
writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_UINT8).as_bytes())
}
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_UINT16 => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_UINT16 => {
writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_UINT16).as_bytes())
}
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_UINT32 => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_UINT32 => {
writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_UINT32).as_bytes())
}
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_INT32 => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_INT32 => {
writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_INT32).as_bytes())
}
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_UINT64 => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_UINT64 => {
writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_UINT64).as_bytes())
}
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_INT64 => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_INT64 => {
writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_INT64).as_bytes())
}
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_FLOAT16 => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_FLOAT16 => {
writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_FLOAT16).as_bytes())
}
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_FLOAT32 => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_FLOAT32 => {
writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_FLOAT32).as_bytes())
}
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_FLOAT64 => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_FLOAT64 => {
writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_FLOAT64).as_bytes())
}
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_BFLOAT16 => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_BFLOAT16 => {
writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_BFLOAT16).as_bytes())
}
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ => {
writer
.write_all(
stringify!(CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ).as_bytes(),
)
}
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_TFLOAT32 => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_TFLOAT32 => {
writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_TFLOAT32).as_bytes())
}
- &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ => {
+ &cuda_types::cuda::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ => {
writer
.write_all(
stringify!(CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ).as_bytes(),
@@ -4161,7 +4164,7 @@ impl crate::format::CudaDisplay for cuda_types::CUtensorMapDataType_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUtensorMapInterleave_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUtensorMapInterleave_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4169,20 +4172,20 @@ impl crate::format::CudaDisplay for cuda_types::CUtensorMapInterleave_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUtensorMapInterleave_enum::CU_TENSOR_MAP_INTERLEAVE_NONE => {
+ &cuda_types::cuda::CUtensorMapInterleave_enum::CU_TENSOR_MAP_INTERLEAVE_NONE => {
writer.write_all(stringify!(CU_TENSOR_MAP_INTERLEAVE_NONE).as_bytes())
}
- &cuda_types::CUtensorMapInterleave_enum::CU_TENSOR_MAP_INTERLEAVE_16B => {
+ &cuda_types::cuda::CUtensorMapInterleave_enum::CU_TENSOR_MAP_INTERLEAVE_16B => {
writer.write_all(stringify!(CU_TENSOR_MAP_INTERLEAVE_16B).as_bytes())
}
- &cuda_types::CUtensorMapInterleave_enum::CU_TENSOR_MAP_INTERLEAVE_32B => {
+ &cuda_types::cuda::CUtensorMapInterleave_enum::CU_TENSOR_MAP_INTERLEAVE_32B => {
writer.write_all(stringify!(CU_TENSOR_MAP_INTERLEAVE_32B).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUtensorMapSwizzle_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUtensorMapSwizzle_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4190,23 +4193,23 @@ impl crate::format::CudaDisplay for cuda_types::CUtensorMapSwizzle_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUtensorMapSwizzle_enum::CU_TENSOR_MAP_SWIZZLE_NONE => {
+ &cuda_types::cuda::CUtensorMapSwizzle_enum::CU_TENSOR_MAP_SWIZZLE_NONE => {
writer.write_all(stringify!(CU_TENSOR_MAP_SWIZZLE_NONE).as_bytes())
}
- &cuda_types::CUtensorMapSwizzle_enum::CU_TENSOR_MAP_SWIZZLE_32B => {
+ &cuda_types::cuda::CUtensorMapSwizzle_enum::CU_TENSOR_MAP_SWIZZLE_32B => {
writer.write_all(stringify!(CU_TENSOR_MAP_SWIZZLE_32B).as_bytes())
}
- &cuda_types::CUtensorMapSwizzle_enum::CU_TENSOR_MAP_SWIZZLE_64B => {
+ &cuda_types::cuda::CUtensorMapSwizzle_enum::CU_TENSOR_MAP_SWIZZLE_64B => {
writer.write_all(stringify!(CU_TENSOR_MAP_SWIZZLE_64B).as_bytes())
}
- &cuda_types::CUtensorMapSwizzle_enum::CU_TENSOR_MAP_SWIZZLE_128B => {
+ &cuda_types::cuda::CUtensorMapSwizzle_enum::CU_TENSOR_MAP_SWIZZLE_128B => {
writer.write_all(stringify!(CU_TENSOR_MAP_SWIZZLE_128B).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUtensorMapL2promotion_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUtensorMapL2promotion_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4214,18 +4217,18 @@ impl crate::format::CudaDisplay for cuda_types::CUtensorMapL2promotion_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUtensorMapL2promotion_enum::CU_TENSOR_MAP_L2_PROMOTION_NONE => {
+ &cuda_types::cuda::CUtensorMapL2promotion_enum::CU_TENSOR_MAP_L2_PROMOTION_NONE => {
writer.write_all(stringify!(CU_TENSOR_MAP_L2_PROMOTION_NONE).as_bytes())
}
- &cuda_types::CUtensorMapL2promotion_enum::CU_TENSOR_MAP_L2_PROMOTION_L2_64B => {
+ &cuda_types::cuda::CUtensorMapL2promotion_enum::CU_TENSOR_MAP_L2_PROMOTION_L2_64B => {
writer
.write_all(stringify!(CU_TENSOR_MAP_L2_PROMOTION_L2_64B).as_bytes())
}
- &cuda_types::CUtensorMapL2promotion_enum::CU_TENSOR_MAP_L2_PROMOTION_L2_128B => {
+ &cuda_types::cuda::CUtensorMapL2promotion_enum::CU_TENSOR_MAP_L2_PROMOTION_L2_128B => {
writer
.write_all(stringify!(CU_TENSOR_MAP_L2_PROMOTION_L2_128B).as_bytes())
}
- &cuda_types::CUtensorMapL2promotion_enum::CU_TENSOR_MAP_L2_PROMOTION_L2_256B => {
+ &cuda_types::cuda::CUtensorMapL2promotion_enum::CU_TENSOR_MAP_L2_PROMOTION_L2_256B => {
writer
.write_all(stringify!(CU_TENSOR_MAP_L2_PROMOTION_L2_256B).as_bytes())
}
@@ -4233,7 +4236,7 @@ impl crate::format::CudaDisplay for cuda_types::CUtensorMapL2promotion_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUtensorMapFloatOOBfill_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUtensorMapFloatOOBfill_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4241,11 +4244,11 @@ impl crate::format::CudaDisplay for cuda_types::CUtensorMapFloatOOBfill_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUtensorMapFloatOOBfill_enum::CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE => {
+ &cuda_types::cuda::CUtensorMapFloatOOBfill_enum::CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE => {
writer
.write_all(stringify!(CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE).as_bytes())
}
- &cuda_types::CUtensorMapFloatOOBfill_enum::CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA => {
+ &cuda_types::cuda::CUtensorMapFloatOOBfill_enum::CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA => {
writer
.write_all(
stringify!(CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA)
@@ -4256,7 +4259,8 @@ impl crate::format::CudaDisplay for cuda_types::CUtensorMapFloatOOBfill_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4271,7 +4275,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_POINTER_ATTRIBUTE_P2P_TOKEN
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum {
+for cuda_types::cuda::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4279,19 +4283,19 @@ for cuda_types::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum::CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE => {
+ &cuda_types::cuda::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum::CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE => {
writer
.write_all(
stringify!(CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE).as_bytes(),
)
}
- &cuda_types::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum::CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ => {
+ &cuda_types::cuda::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum::CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ => {
writer
.write_all(
stringify!(CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ).as_bytes(),
)
}
- &cuda_types::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum::CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE => {
+ &cuda_types::cuda::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum::CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE => {
writer
.write_all(
stringify!(CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE).as_bytes(),
@@ -4301,7 +4305,7 @@ for cuda_types::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_LAUNCH_PARAMS_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_LAUNCH_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4331,7 +4335,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_LAUNCH_PARAMS_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUexternalMemoryHandleType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUexternalMemoryHandleType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4339,54 +4343,54 @@ impl crate::format::CudaDisplay for cuda_types::CUexternalMemoryHandleType_enum
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD => {
+ &cuda_types::cuda::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD => {
writer
.write_all(
stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD).as_bytes(),
)
}
- &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 => {
+ &cuda_types::cuda::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 => {
writer
.write_all(
stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32)
.as_bytes(),
)
}
- &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT => {
+ &cuda_types::cuda::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT => {
writer
.write_all(
stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT)
.as_bytes(),
)
}
- &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP => {
+ &cuda_types::cuda::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP => {
writer
.write_all(
stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP).as_bytes(),
)
}
- &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE => {
+ &cuda_types::cuda::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE => {
writer
.write_all(
stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE)
.as_bytes(),
)
}
- &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE => {
+ &cuda_types::cuda::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE => {
writer
.write_all(
stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE)
.as_bytes(),
)
}
- &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT => {
+ &cuda_types::cuda::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT => {
writer
.write_all(
stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT)
.as_bytes(),
)
}
- &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF => {
+ &cuda_types::cuda::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF => {
writer
.write_all(
stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF).as_bytes(),
@@ -4397,7 +4401,7 @@ impl crate::format::CudaDisplay for cuda_types::CUexternalMemoryHandleType_enum
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
+for cuda_types::cuda::CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
fn write(
&self,
_fn_name: &'static str,
@@ -4411,7 +4415,8 @@ for cuda_types::CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4428,7 +4433,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_EXTERNAL_MEMORY_BUFFER_DESC
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st {
+for cuda_types::cuda::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4444,7 +4449,8 @@ for cuda_types::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUexternalSemaphoreHandleType_enum {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUexternalSemaphoreHandleType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4452,56 +4458,56 @@ impl crate::format::CudaDisplay for cuda_types::CUexternalSemaphoreHandleType_en
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD => {
+ &cuda_types::cuda::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD => {
writer
.write_all(
stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD)
.as_bytes(),
)
}
- &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 => {
+ &cuda_types::cuda::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 => {
writer
.write_all(
stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32)
.as_bytes(),
)
}
- &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT => {
+ &cuda_types::cuda::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT => {
writer
.write_all(
stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT)
.as_bytes(),
)
}
- &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE => {
+ &cuda_types::cuda::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE => {
writer
.write_all(
stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE)
.as_bytes(),
)
}
- &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE => {
+ &cuda_types::cuda::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE => {
writer
.write_all(
stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE)
.as_bytes(),
)
}
- &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC => {
+ &cuda_types::cuda::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC => {
writer
.write_all(
stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC)
.as_bytes(),
)
}
- &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX => {
+ &cuda_types::cuda::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX => {
writer
.write_all(
stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX)
.as_bytes(),
)
}
- &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT => {
+ &cuda_types::cuda::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT => {
writer
.write_all(
stringify!(
@@ -4510,7 +4516,7 @@ impl crate::format::CudaDisplay for cuda_types::CUexternalSemaphoreHandleType_en
.as_bytes(),
)
}
- &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD => {
+ &cuda_types::cuda::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD => {
writer
.write_all(
stringify!(
@@ -4519,7 +4525,7 @@ impl crate::format::CudaDisplay for cuda_types::CUexternalSemaphoreHandleType_en
.as_bytes(),
)
}
- &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 => {
+ &cuda_types::cuda::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 => {
writer
.write_all(
stringify!(
@@ -4533,7 +4539,7 @@ impl crate::format::CudaDisplay for cuda_types::CUexternalSemaphoreHandleType_en
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
+for cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 {
fn write(
&self,
_fn_name: &'static str,
@@ -4548,7 +4554,7 @@ for cuda_types::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st {
+for cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4563,7 +4569,7 @@ for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1 {
+for cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1 {
fn write(
&self,
_fn_name: &'static str,
@@ -4580,7 +4586,7 @@ for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1 {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1 {
+for cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1 {
fn write(
&self,
_fn_name: &'static str,
@@ -4593,7 +4599,7 @@ for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3 {
+for cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3 {
fn write(
&self,
_fn_name: &'static str,
@@ -4605,7 +4611,8 @@ for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4620,7 +4627,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PAR
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1 {
+for cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1 {
fn write(
&self,
_fn_name: &'static str,
@@ -4637,7 +4644,7 @@ for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1 {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1 {
+for cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1 {
fn write(
&self,
_fn_name: &'static str,
@@ -4650,7 +4657,7 @@ for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3 {
+for cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3 {
fn write(
&self,
_fn_name: &'static str,
@@ -4664,7 +4671,8 @@ for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4680,7 +4688,8 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4696,7 +4705,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4712,7 +4721,8 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS_st
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4728,7 +4738,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemAllocationHandleType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemAllocationHandleType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4736,32 +4746,32 @@ impl crate::format::CudaDisplay for cuda_types::CUmemAllocationHandleType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_NONE => {
+ &cuda_types::cuda::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_NONE => {
writer.write_all(stringify!(CU_MEM_HANDLE_TYPE_NONE).as_bytes())
}
- &cuda_types::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR => {
+ &cuda_types::cuda::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR => {
writer
.write_all(
stringify!(CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR).as_bytes(),
)
}
- &cuda_types::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_WIN32 => {
+ &cuda_types::cuda::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_WIN32 => {
writer.write_all(stringify!(CU_MEM_HANDLE_TYPE_WIN32).as_bytes())
}
- &cuda_types::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_WIN32_KMT => {
+ &cuda_types::cuda::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_WIN32_KMT => {
writer.write_all(stringify!(CU_MEM_HANDLE_TYPE_WIN32_KMT).as_bytes())
}
- &cuda_types::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_FABRIC => {
+ &cuda_types::cuda::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_FABRIC => {
writer.write_all(stringify!(CU_MEM_HANDLE_TYPE_FABRIC).as_bytes())
}
- &cuda_types::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_MAX => {
+ &cuda_types::cuda::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_MAX => {
writer.write_all(stringify!(CU_MEM_HANDLE_TYPE_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemAccess_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemAccess_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4769,24 +4779,24 @@ impl crate::format::CudaDisplay for cuda_types::CUmemAccess_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmemAccess_flags_enum::CU_MEM_ACCESS_FLAGS_PROT_NONE => {
+ &cuda_types::cuda::CUmemAccess_flags_enum::CU_MEM_ACCESS_FLAGS_PROT_NONE => {
writer.write_all(stringify!(CU_MEM_ACCESS_FLAGS_PROT_NONE).as_bytes())
}
- &cuda_types::CUmemAccess_flags_enum::CU_MEM_ACCESS_FLAGS_PROT_READ => {
+ &cuda_types::cuda::CUmemAccess_flags_enum::CU_MEM_ACCESS_FLAGS_PROT_READ => {
writer.write_all(stringify!(CU_MEM_ACCESS_FLAGS_PROT_READ).as_bytes())
}
- &cuda_types::CUmemAccess_flags_enum::CU_MEM_ACCESS_FLAGS_PROT_READWRITE => {
+ &cuda_types::cuda::CUmemAccess_flags_enum::CU_MEM_ACCESS_FLAGS_PROT_READWRITE => {
writer
.write_all(stringify!(CU_MEM_ACCESS_FLAGS_PROT_READWRITE).as_bytes())
}
- &cuda_types::CUmemAccess_flags_enum::CU_MEM_ACCESS_FLAGS_PROT_MAX => {
+ &cuda_types::cuda::CUmemAccess_flags_enum::CU_MEM_ACCESS_FLAGS_PROT_MAX => {
writer.write_all(stringify!(CU_MEM_ACCESS_FLAGS_PROT_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemLocationType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemLocationType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4794,32 +4804,32 @@ impl crate::format::CudaDisplay for cuda_types::CUmemLocationType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_INVALID => {
+ &cuda_types::cuda::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_INVALID => {
writer.write_all(stringify!(CU_MEM_LOCATION_TYPE_INVALID).as_bytes())
}
- &cuda_types::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_DEVICE => {
+ &cuda_types::cuda::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_DEVICE => {
writer.write_all(stringify!(CU_MEM_LOCATION_TYPE_DEVICE).as_bytes())
}
- &cuda_types::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_HOST => {
+ &cuda_types::cuda::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_HOST => {
writer.write_all(stringify!(CU_MEM_LOCATION_TYPE_HOST).as_bytes())
}
- &cuda_types::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_HOST_NUMA => {
+ &cuda_types::cuda::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_HOST_NUMA => {
writer.write_all(stringify!(CU_MEM_LOCATION_TYPE_HOST_NUMA).as_bytes())
}
- &cuda_types::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT => {
+ &cuda_types::cuda::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT => {
writer
.write_all(
stringify!(CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT).as_bytes(),
)
}
- &cuda_types::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_MAX => {
+ &cuda_types::cuda::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_MAX => {
writer.write_all(stringify!(CU_MEM_LOCATION_TYPE_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemAllocationType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemAllocationType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4827,20 +4837,21 @@ impl crate::format::CudaDisplay for cuda_types::CUmemAllocationType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmemAllocationType_enum::CU_MEM_ALLOCATION_TYPE_INVALID => {
+ &cuda_types::cuda::CUmemAllocationType_enum::CU_MEM_ALLOCATION_TYPE_INVALID => {
writer.write_all(stringify!(CU_MEM_ALLOCATION_TYPE_INVALID).as_bytes())
}
- &cuda_types::CUmemAllocationType_enum::CU_MEM_ALLOCATION_TYPE_PINNED => {
+ &cuda_types::cuda::CUmemAllocationType_enum::CU_MEM_ALLOCATION_TYPE_PINNED => {
writer.write_all(stringify!(CU_MEM_ALLOCATION_TYPE_PINNED).as_bytes())
}
- &cuda_types::CUmemAllocationType_enum::CU_MEM_ALLOCATION_TYPE_MAX => {
+ &cuda_types::cuda::CUmemAllocationType_enum::CU_MEM_ALLOCATION_TYPE_MAX => {
writer.write_all(stringify!(CU_MEM_ALLOCATION_TYPE_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemAllocationGranularity_flags_enum {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUmemAllocationGranularity_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4848,10 +4859,10 @@ impl crate::format::CudaDisplay for cuda_types::CUmemAllocationGranularity_flags
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmemAllocationGranularity_flags_enum::CU_MEM_ALLOC_GRANULARITY_MINIMUM => {
+ &cuda_types::cuda::CUmemAllocationGranularity_flags_enum::CU_MEM_ALLOC_GRANULARITY_MINIMUM => {
writer.write_all(stringify!(CU_MEM_ALLOC_GRANULARITY_MINIMUM).as_bytes())
}
- &cuda_types::CUmemAllocationGranularity_flags_enum::CU_MEM_ALLOC_GRANULARITY_RECOMMENDED => {
+ &cuda_types::cuda::CUmemAllocationGranularity_flags_enum::CU_MEM_ALLOC_GRANULARITY_RECOMMENDED => {
writer
.write_all(
stringify!(CU_MEM_ALLOC_GRANULARITY_RECOMMENDED).as_bytes(),
@@ -4861,7 +4872,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmemAllocationGranularity_flags
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemRangeHandleType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemRangeHandleType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4869,20 +4880,20 @@ impl crate::format::CudaDisplay for cuda_types::CUmemRangeHandleType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmemRangeHandleType_enum::CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD => {
+ &cuda_types::cuda::CUmemRangeHandleType_enum::CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD => {
writer
.write_all(
stringify!(CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD).as_bytes(),
)
}
- &cuda_types::CUmemRangeHandleType_enum::CU_MEM_RANGE_HANDLE_TYPE_MAX => {
+ &cuda_types::cuda::CUmemRangeHandleType_enum::CU_MEM_RANGE_HANDLE_TYPE_MAX => {
writer.write_all(stringify!(CU_MEM_RANGE_HANDLE_TYPE_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUarraySparseSubresourceType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUarraySparseSubresourceType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4890,14 +4901,14 @@ impl crate::format::CudaDisplay for cuda_types::CUarraySparseSubresourceType_enu
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUarraySparseSubresourceType_enum::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL => {
+ &cuda_types::cuda::CUarraySparseSubresourceType_enum::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL => {
writer
.write_all(
stringify!(CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL)
.as_bytes(),
)
}
- &cuda_types::CUarraySparseSubresourceType_enum::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL => {
+ &cuda_types::cuda::CUarraySparseSubresourceType_enum::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL => {
writer
.write_all(
stringify!(CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL).as_bytes(),
@@ -4907,7 +4918,7 @@ impl crate::format::CudaDisplay for cuda_types::CUarraySparseSubresourceType_enu
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemOperationType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemOperationType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4915,17 +4926,17 @@ impl crate::format::CudaDisplay for cuda_types::CUmemOperationType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmemOperationType_enum::CU_MEM_OPERATION_TYPE_MAP => {
+ &cuda_types::cuda::CUmemOperationType_enum::CU_MEM_OPERATION_TYPE_MAP => {
writer.write_all(stringify!(CU_MEM_OPERATION_TYPE_MAP).as_bytes())
}
- &cuda_types::CUmemOperationType_enum::CU_MEM_OPERATION_TYPE_UNMAP => {
+ &cuda_types::cuda::CUmemOperationType_enum::CU_MEM_OPERATION_TYPE_UNMAP => {
writer.write_all(stringify!(CU_MEM_OPERATION_TYPE_UNMAP).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemHandleType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemHandleType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -4933,7 +4944,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmemHandleType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmemHandleType_enum::CU_MEM_HANDLE_TYPE_GENERIC => {
+ &cuda_types::cuda::CUmemHandleType_enum::CU_MEM_HANDLE_TYPE_GENERIC => {
writer.write_all(stringify!(CU_MEM_HANDLE_TYPE_GENERIC).as_bytes())
}
_ => write!(writer, "{}", self.0),
@@ -4941,7 +4952,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmemHandleType_enum {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_1 {
+for cuda_types::cuda::CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_1 {
fn write(
&self,
_fn_name: &'static str,
@@ -4968,7 +4979,7 @@ for cuda_types::CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_1 {
}
}
impl crate::format::CudaDisplay
-for cuda_types::CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_2 {
+for cuda_types::cuda::CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_2 {
fn write(
&self,
_fn_name: &'static str,
@@ -4984,7 +4995,7 @@ for cuda_types::CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_2 {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemLocation_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemLocation_st {
fn write(
&self,
_fn_name: &'static str,
@@ -4998,7 +5009,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmemLocation_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemAllocationCompType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemAllocationCompType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5006,17 +5017,17 @@ impl crate::format::CudaDisplay for cuda_types::CUmemAllocationCompType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmemAllocationCompType_enum::CU_MEM_ALLOCATION_COMP_NONE => {
+ &cuda_types::cuda::CUmemAllocationCompType_enum::CU_MEM_ALLOCATION_COMP_NONE => {
writer.write_all(stringify!(CU_MEM_ALLOCATION_COMP_NONE).as_bytes())
}
- &cuda_types::CUmemAllocationCompType_enum::CU_MEM_ALLOCATION_COMP_GENERIC => {
+ &cuda_types::cuda::CUmemAllocationCompType_enum::CU_MEM_ALLOCATION_COMP_GENERIC => {
writer.write_all(stringify!(CU_MEM_ALLOCATION_COMP_GENERIC).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemAllocationProp_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemAllocationProp_st {
fn write(
&self,
_fn_name: &'static str,
@@ -5040,7 +5051,8 @@ impl crate::format::CudaDisplay for cuda_types::CUmemAllocationProp_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemAllocationProp_st__bindgen_ty_1 {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUmemAllocationProp_st__bindgen_ty_1 {
fn write(
&self,
_fn_name: &'static str,
@@ -5059,7 +5071,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmemAllocationProp_st__bindgen_
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmulticastGranularity_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmulticastGranularity_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5067,10 +5079,10 @@ impl crate::format::CudaDisplay for cuda_types::CUmulticastGranularity_flags_enu
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmulticastGranularity_flags_enum::CU_MULTICAST_GRANULARITY_MINIMUM => {
+ &cuda_types::cuda::CUmulticastGranularity_flags_enum::CU_MULTICAST_GRANULARITY_MINIMUM => {
writer.write_all(stringify!(CU_MULTICAST_GRANULARITY_MINIMUM).as_bytes())
}
- &cuda_types::CUmulticastGranularity_flags_enum::CU_MULTICAST_GRANULARITY_RECOMMENDED => {
+ &cuda_types::cuda::CUmulticastGranularity_flags_enum::CU_MULTICAST_GRANULARITY_RECOMMENDED => {
writer
.write_all(
stringify!(CU_MULTICAST_GRANULARITY_RECOMMENDED).as_bytes(),
@@ -5080,7 +5092,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmulticastGranularity_flags_enu
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmulticastObjectProp_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmulticastObjectProp_st {
fn write(
&self,
_fn_name: &'static str,
@@ -5098,7 +5110,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmulticastObjectProp_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemAccessDesc_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemAccessDesc_st {
fn write(
&self,
_fn_name: &'static str,
@@ -5112,7 +5124,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmemAccessDesc_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphExecUpdateResult_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphExecUpdateResult_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5120,47 +5132,47 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphExecUpdateResult_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_SUCCESS => {
+ &cuda_types::cuda::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_SUCCESS => {
writer.write_all(stringify!(CU_GRAPH_EXEC_UPDATE_SUCCESS).as_bytes())
}
- &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR => {
+ &cuda_types::cuda::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR => {
writer.write_all(stringify!(CU_GRAPH_EXEC_UPDATE_ERROR).as_bytes())
}
- &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED => {
+ &cuda_types::cuda::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED => {
writer
.write_all(
stringify!(CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED)
.as_bytes(),
)
}
- &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED => {
+ &cuda_types::cuda::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED => {
writer
.write_all(
stringify!(CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED)
.as_bytes(),
)
}
- &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED => {
+ &cuda_types::cuda::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED => {
writer
.write_all(
stringify!(CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED)
.as_bytes(),
)
}
- &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED => {
+ &cuda_types::cuda::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED => {
writer
.write_all(
stringify!(CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED)
.as_bytes(),
)
}
- &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED => {
+ &cuda_types::cuda::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED => {
writer
.write_all(
stringify!(CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED).as_bytes(),
)
}
- &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE => {
+ &cuda_types::cuda::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE => {
writer
.write_all(
stringify!(
@@ -5169,7 +5181,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphExecUpdateResult_enum {
.as_bytes(),
)
}
- &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED => {
+ &cuda_types::cuda::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED => {
writer
.write_all(
stringify!(CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED)
@@ -5180,7 +5192,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphExecUpdateResult_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphExecUpdateResultInfo_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphExecUpdateResultInfo_st {
fn write(
&self,
_fn_name: &'static str,
@@ -5196,7 +5208,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphExecUpdateResultInfo_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemPool_attribute_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemPool_attribute_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5204,51 +5216,51 @@ impl crate::format::CudaDisplay for cuda_types::CUmemPool_attribute_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES => {
+ &cuda_types::cuda::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES => {
writer
.write_all(
stringify!(CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES)
.as_bytes(),
)
}
- &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC => {
+ &cuda_types::cuda::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC => {
writer
.write_all(
stringify!(CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC).as_bytes(),
)
}
- &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES => {
+ &cuda_types::cuda::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES => {
writer
.write_all(
stringify!(CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES)
.as_bytes(),
)
}
- &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_RELEASE_THRESHOLD => {
+ &cuda_types::cuda::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_RELEASE_THRESHOLD => {
writer
.write_all(stringify!(CU_MEMPOOL_ATTR_RELEASE_THRESHOLD).as_bytes())
}
- &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT => {
+ &cuda_types::cuda::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT => {
writer
.write_all(
stringify!(CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT).as_bytes(),
)
}
- &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH => {
+ &cuda_types::cuda::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH => {
writer
.write_all(stringify!(CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH).as_bytes())
}
- &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_USED_MEM_CURRENT => {
+ &cuda_types::cuda::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_USED_MEM_CURRENT => {
writer.write_all(stringify!(CU_MEMPOOL_ATTR_USED_MEM_CURRENT).as_bytes())
}
- &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_USED_MEM_HIGH => {
+ &cuda_types::cuda::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_USED_MEM_HIGH => {
writer.write_all(stringify!(CU_MEMPOOL_ATTR_USED_MEM_HIGH).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUmemPoolProps_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmemPoolProps_st {
fn write(
&self,
_fn_name: &'static str,
@@ -5271,7 +5283,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmemPoolProps_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS_v1_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_MEM_ALLOC_NODE_PARAMS_v1_st {
fn write(
&self,
_fn_name: &'static str,
@@ -5291,7 +5303,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS_v1_st
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS_v2_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_MEM_ALLOC_NODE_PARAMS_v2_st {
fn write(
&self,
_fn_name: &'static str,
@@ -5311,7 +5323,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS_v2_st
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_MEM_FREE_NODE_PARAMS_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_MEM_FREE_NODE_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -5323,7 +5335,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_MEM_FREE_NODE_PARAMS_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphMem_attribute_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphMem_attribute_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5331,20 +5343,20 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphMem_attribute_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUgraphMem_attribute_enum::CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT => {
+ &cuda_types::cuda::CUgraphMem_attribute_enum::CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT => {
writer
.write_all(stringify!(CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT).as_bytes())
}
- &cuda_types::CUgraphMem_attribute_enum::CU_GRAPH_MEM_ATTR_USED_MEM_HIGH => {
+ &cuda_types::cuda::CUgraphMem_attribute_enum::CU_GRAPH_MEM_ATTR_USED_MEM_HIGH => {
writer.write_all(stringify!(CU_GRAPH_MEM_ATTR_USED_MEM_HIGH).as_bytes())
}
- &cuda_types::CUgraphMem_attribute_enum::CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT => {
+ &cuda_types::cuda::CUgraphMem_attribute_enum::CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT => {
writer
.write_all(
stringify!(CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT).as_bytes(),
)
}
- &cuda_types::CUgraphMem_attribute_enum::CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH => {
+ &cuda_types::cuda::CUgraphMem_attribute_enum::CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH => {
writer
.write_all(
stringify!(CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH).as_bytes(),
@@ -5354,7 +5366,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphMem_attribute_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_CHILD_GRAPH_NODE_PARAMS_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_CHILD_GRAPH_NODE_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -5366,7 +5378,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_CHILD_GRAPH_NODE_PARAMS_st
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_EVENT_RECORD_NODE_PARAMS_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_EVENT_RECORD_NODE_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -5378,7 +5390,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_EVENT_RECORD_NODE_PARAMS_st
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_EVENT_WAIT_NODE_PARAMS_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_EVENT_WAIT_NODE_PARAMS_st {
fn write(
&self,
_fn_name: &'static str,
@@ -5390,7 +5402,8 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_EVENT_WAIT_NODE_PARAMS_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUflushGPUDirectRDMAWritesOptions_enum {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUflushGPUDirectRDMAWritesOptions_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5398,14 +5411,14 @@ impl crate::format::CudaDisplay for cuda_types::CUflushGPUDirectRDMAWritesOption
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUflushGPUDirectRDMAWritesOptions_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST => {
+ &cuda_types::cuda::CUflushGPUDirectRDMAWritesOptions_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST => {
writer
.write_all(
stringify!(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST)
.as_bytes(),
)
}
- &cuda_types::CUflushGPUDirectRDMAWritesOptions_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS => {
+ &cuda_types::cuda::CUflushGPUDirectRDMAWritesOptions_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS => {
writer
.write_all(
stringify!(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS)
@@ -5416,7 +5429,8 @@ impl crate::format::CudaDisplay for cuda_types::CUflushGPUDirectRDMAWritesOption
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUGPUDirectRDMAWritesOrdering_enum {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUGPUDirectRDMAWritesOrdering_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5424,19 +5438,19 @@ impl crate::format::CudaDisplay for cuda_types::CUGPUDirectRDMAWritesOrdering_en
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUGPUDirectRDMAWritesOrdering_enum::CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE => {
+ &cuda_types::cuda::CUGPUDirectRDMAWritesOrdering_enum::CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE => {
writer
.write_all(
stringify!(CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE).as_bytes(),
)
}
- &cuda_types::CUGPUDirectRDMAWritesOrdering_enum::CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER => {
+ &cuda_types::cuda::CUGPUDirectRDMAWritesOrdering_enum::CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER => {
writer
.write_all(
stringify!(CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER).as_bytes(),
)
}
- &cuda_types::CUGPUDirectRDMAWritesOrdering_enum::CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES => {
+ &cuda_types::cuda::CUGPUDirectRDMAWritesOrdering_enum::CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES => {
writer
.write_all(
stringify!(CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES)
@@ -5447,7 +5461,8 @@ impl crate::format::CudaDisplay for cuda_types::CUGPUDirectRDMAWritesOrdering_en
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUflushGPUDirectRDMAWritesScope_enum {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUflushGPUDirectRDMAWritesScope_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5455,13 +5470,13 @@ impl crate::format::CudaDisplay for cuda_types::CUflushGPUDirectRDMAWritesScope_
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUflushGPUDirectRDMAWritesScope_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER => {
+ &cuda_types::cuda::CUflushGPUDirectRDMAWritesScope_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER => {
writer
.write_all(
stringify!(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER).as_bytes(),
)
}
- &cuda_types::CUflushGPUDirectRDMAWritesScope_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES => {
+ &cuda_types::cuda::CUflushGPUDirectRDMAWritesScope_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES => {
writer
.write_all(
stringify!(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES)
@@ -5472,7 +5487,8 @@ impl crate::format::CudaDisplay for cuda_types::CUflushGPUDirectRDMAWritesScope_
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUflushGPUDirectRDMAWritesTarget_enum {
+impl crate::format::CudaDisplay
+for cuda_types::cuda::CUflushGPUDirectRDMAWritesTarget_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5480,7 +5496,7 @@ impl crate::format::CudaDisplay for cuda_types::CUflushGPUDirectRDMAWritesTarget
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUflushGPUDirectRDMAWritesTarget_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX => {
+ &cuda_types::cuda::CUflushGPUDirectRDMAWritesTarget_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX => {
writer
.write_all(
stringify!(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX)
@@ -5491,7 +5507,7 @@ impl crate::format::CudaDisplay for cuda_types::CUflushGPUDirectRDMAWritesTarget
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphDebugDot_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphDebugDot_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5499,100 +5515,100 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphDebugDot_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE => {
writer.write_all(stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE).as_bytes())
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES).as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS)
.as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS)
.as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS)
.as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS).as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS).as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS)
.as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS)
.as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES)
.as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES => {
writer.write_all(stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES).as_bytes())
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS)
.as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS)
.as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS)
.as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO).as_bytes(),
)
}
- &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS => {
+ &cuda_types::cuda::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS => {
writer
.write_all(
stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS)
@@ -5603,7 +5619,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphDebugDot_flags_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUuserObject_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUuserObject_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5611,7 +5627,7 @@ impl crate::format::CudaDisplay for cuda_types::CUuserObject_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUuserObject_flags_enum::CU_USER_OBJECT_NO_DESTRUCTOR_SYNC => {
+ &cuda_types::cuda::CUuserObject_flags_enum::CU_USER_OBJECT_NO_DESTRUCTOR_SYNC => {
writer
.write_all(stringify!(CU_USER_OBJECT_NO_DESTRUCTOR_SYNC).as_bytes())
}
@@ -5619,7 +5635,7 @@ impl crate::format::CudaDisplay for cuda_types::CUuserObject_flags_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUuserObjectRetain_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUuserObjectRetain_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5627,14 +5643,14 @@ impl crate::format::CudaDisplay for cuda_types::CUuserObjectRetain_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUuserObjectRetain_flags_enum::CU_GRAPH_USER_OBJECT_MOVE => {
+ &cuda_types::cuda::CUuserObjectRetain_flags_enum::CU_GRAPH_USER_OBJECT_MOVE => {
writer.write_all(stringify!(CU_GRAPH_USER_OBJECT_MOVE).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgraphInstantiate_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgraphInstantiate_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5642,24 +5658,24 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphInstantiate_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUgraphInstantiate_flags_enum::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH => {
+ &cuda_types::cuda::CUgraphInstantiate_flags_enum::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH => {
writer
.write_all(
stringify!(CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH)
.as_bytes(),
)
}
- &cuda_types::CUgraphInstantiate_flags_enum::CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD => {
+ &cuda_types::cuda::CUgraphInstantiate_flags_enum::CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD => {
writer
.write_all(stringify!(CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD).as_bytes())
}
- &cuda_types::CUgraphInstantiate_flags_enum::CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH => {
+ &cuda_types::cuda::CUgraphInstantiate_flags_enum::CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH => {
writer
.write_all(
stringify!(CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH).as_bytes(),
)
}
- &cuda_types::CUgraphInstantiate_flags_enum::CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY => {
+ &cuda_types::cuda::CUgraphInstantiate_flags_enum::CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY => {
writer
.write_all(
stringify!(CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY)
@@ -5670,7 +5686,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgraphInstantiate_flags_enum {
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUdeviceNumaConfig_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUdeviceNumaConfig_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -5678,10 +5694,10 @@ impl crate::format::CudaDisplay for cuda_types::CUdeviceNumaConfig_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUdeviceNumaConfig_enum::CU_DEVICE_NUMA_CONFIG_NONE => {
+ &cuda_types::cuda::CUdeviceNumaConfig_enum::CU_DEVICE_NUMA_CONFIG_NONE => {
writer.write_all(stringify!(CU_DEVICE_NUMA_CONFIG_NONE).as_bytes())
}
- &cuda_types::CUdeviceNumaConfig_enum::CU_DEVICE_NUMA_CONFIG_NUMA_NODE => {
+ &cuda_types::cuda::CUdeviceNumaConfig_enum::CU_DEVICE_NUMA_CONFIG_NUMA_NODE => {
writer.write_all(stringify!(CU_DEVICE_NUMA_CONFIG_NUMA_NODE).as_bytes())
}
_ => write!(writer, "{}", self.0),
@@ -5690,7 +5706,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdeviceNumaConfig_enum {
}
pub fn write_cuGetErrorString(
writer: &mut (impl std::io::Write + ?Sized),
- error: cuda_types::CUresult,
+ error: cuda_types::cuda::CUresult,
pStr: *mut *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -5705,7 +5721,7 @@ pub fn write_cuGetErrorString(
}
pub fn write_cuGetErrorName(
writer: &mut (impl std::io::Write + ?Sized),
- error: cuda_types::CUresult,
+ error: cuda_types::cuda::CUresult,
pStr: *mut *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -5745,7 +5761,7 @@ pub fn write_cuDriverGetVersion(
}
pub fn write_cuDeviceGet(
writer: &mut (impl std::io::Write + ?Sized),
- device: *mut cuda_types::CUdevice,
+ device: *mut cuda_types::cuda::CUdevice,
ordinal: ::core::ffi::c_int,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -5772,7 +5788,7 @@ pub fn write_cuDeviceGetName(
writer: &mut (impl std::io::Write + ?Sized),
name: *mut ::core::ffi::c_char,
len: ::core::ffi::c_int,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -5790,8 +5806,8 @@ pub fn write_cuDeviceGetName(
}
pub fn write_cuDeviceGetUuid(
writer: &mut (impl std::io::Write + ?Sized),
- uuid: *mut cuda_types::CUuuid,
- dev: cuda_types::CUdevice,
+ uuid: *mut cuda_types::cuda::CUuuid,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -5805,8 +5821,8 @@ pub fn write_cuDeviceGetUuid(
}
pub fn write_cuDeviceGetUuid_v2(
writer: &mut (impl std::io::Write + ?Sized),
- uuid: *mut cuda_types::CUuuid,
- dev: cuda_types::CUdevice,
+ uuid: *mut cuda_types::cuda::CUuuid,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -5822,7 +5838,7 @@ pub fn write_cuDeviceGetLuid(
writer: &mut (impl std::io::Write + ?Sized),
luid: *mut ::core::ffi::c_char,
deviceNodeMask: *mut ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -5846,7 +5862,7 @@ pub fn write_cuDeviceGetLuid(
pub fn write_cuDeviceTotalMem_v2(
writer: &mut (impl std::io::Write + ?Sized),
bytes: *mut usize,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -5861,9 +5877,9 @@ pub fn write_cuDeviceTotalMem_v2(
pub fn write_cuDeviceGetTexture1DLinearMaxWidth(
writer: &mut (impl std::io::Write + ?Sized),
maxWidthInElements: *mut usize,
- format: cuda_types::CUarray_format,
+ format: cuda_types::cuda::CUarray_format,
numChannels: ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -5906,8 +5922,8 @@ pub fn write_cuDeviceGetTexture1DLinearMaxWidth(
pub fn write_cuDeviceGetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
pi: *mut ::core::ffi::c_int,
- attrib: cuda_types::CUdevice_attribute,
- dev: cuda_types::CUdevice,
+ attrib: cuda_types::cuda::CUdevice_attribute,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -5926,7 +5942,7 @@ pub fn write_cuDeviceGetAttribute(
pub fn write_cuDeviceGetNvSciSyncAttributes(
writer: &mut (impl std::io::Write + ?Sized),
nvSciSyncAttrList: *mut ::core::ffi::c_void,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
flags: ::core::ffi::c_int,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -5960,8 +5976,8 @@ pub fn write_cuDeviceGetNvSciSyncAttributes(
}
pub fn write_cuDeviceSetMemPool(
writer: &mut (impl std::io::Write + ?Sized),
- dev: cuda_types::CUdevice,
- pool: cuda_types::CUmemoryPool,
+ dev: cuda_types::cuda::CUdevice,
+ pool: cuda_types::cuda::CUmemoryPool,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -5975,8 +5991,8 @@ pub fn write_cuDeviceSetMemPool(
}
pub fn write_cuDeviceGetMemPool(
writer: &mut (impl std::io::Write + ?Sized),
- pool: *mut cuda_types::CUmemoryPool,
- dev: cuda_types::CUdevice,
+ pool: *mut cuda_types::cuda::CUmemoryPool,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -5990,8 +6006,8 @@ pub fn write_cuDeviceGetMemPool(
}
pub fn write_cuDeviceGetDefaultMemPool(
writer: &mut (impl std::io::Write + ?Sized),
- pool_out: *mut cuda_types::CUmemoryPool,
- dev: cuda_types::CUdevice,
+ pool_out: *mut cuda_types::cuda::CUmemoryPool,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6016,8 +6032,8 @@ pub fn write_cuDeviceGetDefaultMemPool(
pub fn write_cuDeviceGetExecAffinitySupport(
writer: &mut (impl std::io::Write + ?Sized),
pi: *mut ::core::ffi::c_int,
- type_: cuda_types::CUexecAffinityType,
- dev: cuda_types::CUdevice,
+ type_: cuda_types::cuda::CUexecAffinityType,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6050,8 +6066,8 @@ pub fn write_cuDeviceGetExecAffinitySupport(
}
pub fn write_cuFlushGPUDirectRDMAWrites(
writer: &mut (impl std::io::Write + ?Sized),
- target: cuda_types::CUflushGPUDirectRDMAWritesTarget,
- scope: cuda_types::CUflushGPUDirectRDMAWritesScope,
+ target: cuda_types::cuda::CUflushGPUDirectRDMAWritesTarget,
+ scope: cuda_types::cuda::CUflushGPUDirectRDMAWritesScope,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6075,8 +6091,8 @@ pub fn write_cuFlushGPUDirectRDMAWrites(
}
pub fn write_cuDeviceGetProperties(
writer: &mut (impl std::io::Write + ?Sized),
- prop: *mut cuda_types::CUdevprop,
- dev: cuda_types::CUdevice,
+ prop: *mut cuda_types::cuda::CUdevprop,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6092,7 +6108,7 @@ pub fn write_cuDeviceComputeCapability(
writer: &mut (impl std::io::Write + ?Sized),
major: *mut ::core::ffi::c_int,
minor: *mut ::core::ffi::c_int,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6125,8 +6141,8 @@ pub fn write_cuDeviceComputeCapability(
}
pub fn write_cuDevicePrimaryCtxRetain(
writer: &mut (impl std::io::Write + ?Sized),
- pctx: *mut cuda_types::CUcontext,
- dev: cuda_types::CUdevice,
+ pctx: *mut cuda_types::cuda::CUcontext,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6150,7 +6166,7 @@ pub fn write_cuDevicePrimaryCtxRetain(
}
pub fn write_cuDevicePrimaryCtxRelease_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6165,7 +6181,7 @@ pub fn write_cuDevicePrimaryCtxRelease_v2(
}
pub fn write_cuDevicePrimaryCtxSetFlags_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6190,7 +6206,7 @@ pub fn write_cuDevicePrimaryCtxSetFlags_v2(
}
pub fn write_cuDevicePrimaryCtxGetState(
writer: &mut (impl std::io::Write + ?Sized),
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
flags: *mut ::core::ffi::c_uint,
active: *mut ::core::ffi::c_int,
) -> std::io::Result<()> {
@@ -6225,7 +6241,7 @@ pub fn write_cuDevicePrimaryCtxGetState(
}
pub fn write_cuDevicePrimaryCtxReset_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6240,9 +6256,9 @@ pub fn write_cuDevicePrimaryCtxReset_v2(
}
pub fn write_cuCtxCreate_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pctx: *mut cuda_types::CUcontext,
+ pctx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6260,11 +6276,11 @@ pub fn write_cuCtxCreate_v2(
}
pub fn write_cuCtxCreate_v3(
writer: &mut (impl std::io::Write + ?Sized),
- pctx: *mut cuda_types::CUcontext,
- paramsArray: *mut cuda_types::CUexecAffinityParam,
+ pctx: *mut cuda_types::cuda::CUcontext,
+ paramsArray: *mut cuda_types::cuda::CUexecAffinityParam,
numParams: ::core::ffi::c_int,
flags: ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6302,7 +6318,7 @@ pub fn write_cuCtxCreate_v3(
}
pub fn write_cuCtxDestroy_v2(
writer: &mut (impl std::io::Write + ?Sized),
- ctx: cuda_types::CUcontext,
+ ctx: cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6312,7 +6328,7 @@ pub fn write_cuCtxDestroy_v2(
}
pub fn write_cuCtxPushCurrent_v2(
writer: &mut (impl std::io::Write + ?Sized),
- ctx: cuda_types::CUcontext,
+ ctx: cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6322,7 +6338,7 @@ pub fn write_cuCtxPushCurrent_v2(
}
pub fn write_cuCtxPopCurrent_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pctx: *mut cuda_types::CUcontext,
+ pctx: *mut cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6332,7 +6348,7 @@ pub fn write_cuCtxPopCurrent_v2(
}
pub fn write_cuCtxSetCurrent(
writer: &mut (impl std::io::Write + ?Sized),
- ctx: cuda_types::CUcontext,
+ ctx: cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6342,7 +6358,7 @@ pub fn write_cuCtxSetCurrent(
}
pub fn write_cuCtxGetCurrent(
writer: &mut (impl std::io::Write + ?Sized),
- pctx: *mut cuda_types::CUcontext,
+ pctx: *mut cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6352,7 +6368,7 @@ pub fn write_cuCtxGetCurrent(
}
pub fn write_cuCtxGetDevice(
writer: &mut (impl std::io::Write + ?Sized),
- device: *mut cuda_types::CUdevice,
+ device: *mut cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6382,7 +6398,7 @@ pub fn write_cuCtxSetFlags(
}
pub fn write_cuCtxGetId(
writer: &mut (impl std::io::Write + ?Sized),
- ctx: cuda_types::CUcontext,
+ ctx: cuda_types::cuda::CUcontext,
ctxId: *mut ::core::ffi::c_ulonglong,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6402,7 +6418,7 @@ pub fn write_cuCtxSynchronize(
}
pub fn write_cuCtxSetLimit(
writer: &mut (impl std::io::Write + ?Sized),
- limit: cuda_types::CUlimit,
+ limit: cuda_types::cuda::CUlimit,
value: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6418,7 +6434,7 @@ pub fn write_cuCtxSetLimit(
pub fn write_cuCtxGetLimit(
writer: &mut (impl std::io::Write + ?Sized),
pvalue: *mut usize,
- limit: cuda_types::CUlimit,
+ limit: cuda_types::cuda::CUlimit,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6432,7 +6448,7 @@ pub fn write_cuCtxGetLimit(
}
pub fn write_cuCtxGetCacheConfig(
writer: &mut (impl std::io::Write + ?Sized),
- pconfig: *mut cuda_types::CUfunc_cache,
+ pconfig: *mut cuda_types::cuda::CUfunc_cache,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6442,7 +6458,7 @@ pub fn write_cuCtxGetCacheConfig(
}
pub fn write_cuCtxSetCacheConfig(
writer: &mut (impl std::io::Write + ?Sized),
- config: cuda_types::CUfunc_cache,
+ config: cuda_types::cuda::CUfunc_cache,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6452,7 +6468,7 @@ pub fn write_cuCtxSetCacheConfig(
}
pub fn write_cuCtxGetApiVersion(
writer: &mut (impl std::io::Write + ?Sized),
- ctx: cuda_types::CUcontext,
+ ctx: cuda_types::cuda::CUcontext,
version: *mut ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6497,8 +6513,8 @@ pub fn write_cuCtxResetPersistingL2Cache(
}
pub fn write_cuCtxGetExecAffinity(
writer: &mut (impl std::io::Write + ?Sized),
- pExecAffinity: *mut cuda_types::CUexecAffinityParam,
- type_: cuda_types::CUexecAffinityType,
+ pExecAffinity: *mut cuda_types::cuda::CUexecAffinityParam,
+ type_: cuda_types::cuda::CUexecAffinityType,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6517,7 +6533,7 @@ pub fn write_cuCtxGetExecAffinity(
}
pub fn write_cuCtxAttach(
writer: &mut (impl std::io::Write + ?Sized),
- pctx: *mut cuda_types::CUcontext,
+ pctx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6532,7 +6548,7 @@ pub fn write_cuCtxAttach(
}
pub fn write_cuCtxDetach(
writer: &mut (impl std::io::Write + ?Sized),
- ctx: cuda_types::CUcontext,
+ ctx: cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6542,7 +6558,7 @@ pub fn write_cuCtxDetach(
}
pub fn write_cuCtxGetSharedMemConfig(
writer: &mut (impl std::io::Write + ?Sized),
- pConfig: *mut cuda_types::CUsharedconfig,
+ pConfig: *mut cuda_types::cuda::CUsharedconfig,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6557,7 +6573,7 @@ pub fn write_cuCtxGetSharedMemConfig(
}
pub fn write_cuCtxSetSharedMemConfig(
writer: &mut (impl std::io::Write + ?Sized),
- config: cuda_types::CUsharedconfig,
+ config: cuda_types::cuda::CUsharedconfig,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6572,7 +6588,7 @@ pub fn write_cuCtxSetSharedMemConfig(
}
pub fn write_cuModuleLoad(
writer: &mut (impl std::io::Write + ?Sized),
- module: *mut cuda_types::CUmodule,
+ module: *mut cuda_types::cuda::CUmodule,
fname: *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6587,7 +6603,7 @@ pub fn write_cuModuleLoad(
}
pub fn write_cuModuleLoadData(
writer: &mut (impl std::io::Write + ?Sized),
- module: *mut cuda_types::CUmodule,
+ module: *mut cuda_types::cuda::CUmodule,
image: *const ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6602,10 +6618,10 @@ pub fn write_cuModuleLoadData(
}
pub fn write_cuModuleLoadDataEx(
writer: &mut (impl std::io::Write + ?Sized),
- module: *mut cuda_types::CUmodule,
+ module: *mut cuda_types::cuda::CUmodule,
image: *const ::core::ffi::c_void,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6642,7 +6658,7 @@ pub fn write_cuModuleLoadDataEx(
}
pub fn write_cuModuleLoadFatBinary(
writer: &mut (impl std::io::Write + ?Sized),
- module: *mut cuda_types::CUmodule,
+ module: *mut cuda_types::cuda::CUmodule,
fatCubin: *const ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6667,7 +6683,7 @@ pub fn write_cuModuleLoadFatBinary(
}
pub fn write_cuModuleUnload(
writer: &mut (impl std::io::Write + ?Sized),
- hmod: cuda_types::CUmodule,
+ hmod: cuda_types::cuda::CUmodule,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6675,7 +6691,7 @@ pub fn write_cuModuleUnload(
crate::format::CudaDisplay::write(&hmod, "cuModuleUnload", arg_idx, writer)?;
writer.write_all(b")")
}
-impl crate::format::CudaDisplay for cuda_types::CUmoduleLoadingMode_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUmoduleLoadingMode_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -6683,10 +6699,10 @@ impl crate::format::CudaDisplay for cuda_types::CUmoduleLoadingMode_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUmoduleLoadingMode_enum::CU_MODULE_EAGER_LOADING => {
+ &cuda_types::cuda::CUmoduleLoadingMode_enum::CU_MODULE_EAGER_LOADING => {
writer.write_all(stringify!(CU_MODULE_EAGER_LOADING).as_bytes())
}
- &cuda_types::CUmoduleLoadingMode_enum::CU_MODULE_LAZY_LOADING => {
+ &cuda_types::cuda::CUmoduleLoadingMode_enum::CU_MODULE_LAZY_LOADING => {
writer.write_all(stringify!(CU_MODULE_LAZY_LOADING).as_bytes())
}
_ => write!(writer, "{}", self.0),
@@ -6695,7 +6711,7 @@ impl crate::format::CudaDisplay for cuda_types::CUmoduleLoadingMode_enum {
}
pub fn write_cuModuleGetLoadingMode(
writer: &mut (impl std::io::Write + ?Sized),
- mode: *mut cuda_types::CUmoduleLoadingMode,
+ mode: *mut cuda_types::cuda::CUmoduleLoadingMode,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6705,8 +6721,8 @@ pub fn write_cuModuleGetLoadingMode(
}
pub fn write_cuModuleGetFunction(
writer: &mut (impl std::io::Write + ?Sized),
- hfunc: *mut cuda_types::CUfunction,
- hmod: cuda_types::CUmodule,
+ hfunc: *mut cuda_types::cuda::CUfunction,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6726,7 +6742,7 @@ pub fn write_cuModuleGetFunction(
pub fn write_cuModuleGetFunctionCount(
writer: &mut (impl std::io::Write + ?Sized),
count: *mut ::core::ffi::c_uint,
- mod_: cuda_types::CUmodule,
+ mod_: cuda_types::cuda::CUmodule,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6750,9 +6766,9 @@ pub fn write_cuModuleGetFunctionCount(
}
pub fn write_cuModuleEnumerateFunctions(
writer: &mut (impl std::io::Write + ?Sized),
- functions: *mut cuda_types::CUfunction,
+ functions: *mut cuda_types::cuda::CUfunction,
numFunctions: ::core::ffi::c_uint,
- mod_: cuda_types::CUmodule,
+ mod_: cuda_types::cuda::CUmodule,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6785,9 +6801,9 @@ pub fn write_cuModuleEnumerateFunctions(
}
pub fn write_cuModuleGetGlobal_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytes: *mut usize,
- hmod: cuda_types::CUmodule,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6811,9 +6827,9 @@ pub fn write_cuModuleGetGlobal_v2(
pub fn write_cuLinkCreate_v2(
writer: &mut (impl std::io::Write + ?Sized),
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- stateOut: *mut cuda_types::CUlinkState,
+ stateOut: *mut cuda_types::cuda::CUlinkState,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6840,13 +6856,13 @@ pub fn write_cuLinkCreate_v2(
}
pub fn write_cuLinkAddData_v2(
writer: &mut (impl std::io::Write + ?Sized),
- state: cuda_types::CUlinkState,
- type_: cuda_types::CUjitInputType,
+ state: cuda_types::cuda::CUlinkState,
+ type_: cuda_types::cuda::CUjitInputType,
data: *mut ::core::ffi::c_void,
size: usize,
name: *const ::core::ffi::c_char,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6890,11 +6906,11 @@ pub fn write_cuLinkAddData_v2(
}
pub fn write_cuLinkAddFile_v2(
writer: &mut (impl std::io::Write + ?Sized),
- state: cuda_types::CUlinkState,
- type_: cuda_types::CUjitInputType,
+ state: cuda_types::cuda::CUlinkState,
+ type_: cuda_types::cuda::CUjitInputType,
path: *const ::core::ffi::c_char,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6930,7 +6946,7 @@ pub fn write_cuLinkAddFile_v2(
}
pub fn write_cuLinkComplete(
writer: &mut (impl std::io::Write + ?Sized),
- state: cuda_types::CUlinkState,
+ state: cuda_types::cuda::CUlinkState,
cubinOut: *mut *mut ::core::ffi::c_void,
sizeOut: *mut usize,
) -> std::io::Result<()> {
@@ -6950,7 +6966,7 @@ pub fn write_cuLinkComplete(
}
pub fn write_cuLinkDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- state: cuda_types::CUlinkState,
+ state: cuda_types::cuda::CUlinkState,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -6960,8 +6976,8 @@ pub fn write_cuLinkDestroy(
}
pub fn write_cuModuleGetTexRef(
writer: &mut (impl std::io::Write + ?Sized),
- pTexRef: *mut cuda_types::CUtexref,
- hmod: cuda_types::CUmodule,
+ pTexRef: *mut cuda_types::cuda::CUtexref,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -6980,8 +6996,8 @@ pub fn write_cuModuleGetTexRef(
}
pub fn write_cuModuleGetSurfRef(
writer: &mut (impl std::io::Write + ?Sized),
- pSurfRef: *mut cuda_types::CUsurfref,
- hmod: cuda_types::CUmodule,
+ pSurfRef: *mut cuda_types::cuda::CUsurfref,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -7000,12 +7016,12 @@ pub fn write_cuModuleGetSurfRef(
}
pub fn write_cuLibraryLoadData(
writer: &mut (impl std::io::Write + ?Sized),
- library: *mut cuda_types::CUlibrary,
+ library: *mut cuda_types::cuda::CUlibrary,
code: *const ::core::ffi::c_void,
- jitOptions: *mut cuda_types::CUjit_option,
+ jitOptions: *mut cuda_types::cuda::CUjit_option,
jitOptionsValues: *mut *mut ::core::ffi::c_void,
numJitOptions: ::core::ffi::c_uint,
- libraryOptions: *mut cuda_types::CUlibraryOption,
+ libraryOptions: *mut cuda_types::cuda::CUlibraryOption,
libraryOptionValues: *mut *mut ::core::ffi::c_void,
numLibraryOptions: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -7075,12 +7091,12 @@ pub fn write_cuLibraryLoadData(
}
pub fn write_cuLibraryLoadFromFile(
writer: &mut (impl std::io::Write + ?Sized),
- library: *mut cuda_types::CUlibrary,
+ library: *mut cuda_types::cuda::CUlibrary,
fileName: *const ::core::ffi::c_char,
- jitOptions: *mut cuda_types::CUjit_option,
+ jitOptions: *mut cuda_types::cuda::CUjit_option,
jitOptionsValues: *mut *mut ::core::ffi::c_void,
numJitOptions: ::core::ffi::c_uint,
- libraryOptions: *mut cuda_types::CUlibraryOption,
+ libraryOptions: *mut cuda_types::cuda::CUlibraryOption,
libraryOptionValues: *mut *mut ::core::ffi::c_void,
numLibraryOptions: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -7160,7 +7176,7 @@ pub fn write_cuLibraryLoadFromFile(
}
pub fn write_cuLibraryUnload(
writer: &mut (impl std::io::Write + ?Sized),
- library: cuda_types::CUlibrary,
+ library: cuda_types::cuda::CUlibrary,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7170,8 +7186,8 @@ pub fn write_cuLibraryUnload(
}
pub fn write_cuLibraryGetKernel(
writer: &mut (impl std::io::Write + ?Sized),
- pKernel: *mut cuda_types::CUkernel,
- library: cuda_types::CUlibrary,
+ pKernel: *mut cuda_types::cuda::CUkernel,
+ library: cuda_types::cuda::CUlibrary,
name: *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -7191,7 +7207,7 @@ pub fn write_cuLibraryGetKernel(
pub fn write_cuLibraryGetKernelCount(
writer: &mut (impl std::io::Write + ?Sized),
count: *mut ::core::ffi::c_uint,
- lib: cuda_types::CUlibrary,
+ lib: cuda_types::cuda::CUlibrary,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7210,9 +7226,9 @@ pub fn write_cuLibraryGetKernelCount(
}
pub fn write_cuLibraryEnumerateKernels(
writer: &mut (impl std::io::Write + ?Sized),
- kernels: *mut cuda_types::CUkernel,
+ kernels: *mut cuda_types::cuda::CUkernel,
numKernels: ::core::ffi::c_uint,
- lib: cuda_types::CUlibrary,
+ lib: cuda_types::cuda::CUlibrary,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7245,8 +7261,8 @@ pub fn write_cuLibraryEnumerateKernels(
}
pub fn write_cuLibraryGetModule(
writer: &mut (impl std::io::Write + ?Sized),
- pMod: *mut cuda_types::CUmodule,
- library: cuda_types::CUlibrary,
+ pMod: *mut cuda_types::cuda::CUmodule,
+ library: cuda_types::cuda::CUlibrary,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7260,8 +7276,8 @@ pub fn write_cuLibraryGetModule(
}
pub fn write_cuKernelGetFunction(
writer: &mut (impl std::io::Write + ?Sized),
- pFunc: *mut cuda_types::CUfunction,
- kernel: cuda_types::CUkernel,
+ pFunc: *mut cuda_types::cuda::CUfunction,
+ kernel: cuda_types::cuda::CUkernel,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7275,9 +7291,9 @@ pub fn write_cuKernelGetFunction(
}
pub fn write_cuLibraryGetGlobal(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytes: *mut usize,
- library: cuda_types::CUlibrary,
+ library: cuda_types::cuda::CUlibrary,
name: *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -7300,9 +7316,9 @@ pub fn write_cuLibraryGetGlobal(
}
pub fn write_cuLibraryGetManaged(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytes: *mut usize,
- library: cuda_types::CUlibrary,
+ library: cuda_types::cuda::CUlibrary,
name: *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -7326,7 +7342,7 @@ pub fn write_cuLibraryGetManaged(
pub fn write_cuLibraryGetUnifiedFunction(
writer: &mut (impl std::io::Write + ?Sized),
fptr: *mut *mut ::core::ffi::c_void,
- library: cuda_types::CUlibrary,
+ library: cuda_types::cuda::CUlibrary,
symbol: *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -7361,9 +7377,9 @@ pub fn write_cuLibraryGetUnifiedFunction(
pub fn write_cuKernelGetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
pi: *mut ::core::ffi::c_int,
- attrib: cuda_types::CUfunction_attribute,
- kernel: cuda_types::CUkernel,
- dev: cuda_types::CUdevice,
+ attrib: cuda_types::cuda::CUfunction_attribute,
+ kernel: cuda_types::cuda::CUkernel,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7385,10 +7401,10 @@ pub fn write_cuKernelGetAttribute(
}
pub fn write_cuKernelSetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
- attrib: cuda_types::CUfunction_attribute,
+ attrib: cuda_types::cuda::CUfunction_attribute,
val: ::core::ffi::c_int,
- kernel: cuda_types::CUkernel,
- dev: cuda_types::CUdevice,
+ kernel: cuda_types::cuda::CUkernel,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7410,9 +7426,9 @@ pub fn write_cuKernelSetAttribute(
}
pub fn write_cuKernelSetCacheConfig(
writer: &mut (impl std::io::Write + ?Sized),
- kernel: cuda_types::CUkernel,
- config: cuda_types::CUfunc_cache,
- dev: cuda_types::CUdevice,
+ kernel: cuda_types::cuda::CUkernel,
+ config: cuda_types::cuda::CUfunc_cache,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7441,7 +7457,7 @@ pub fn write_cuKernelSetCacheConfig(
pub fn write_cuKernelGetName(
writer: &mut (impl std::io::Write + ?Sized),
name: *mut *const ::core::ffi::c_char,
- hfunc: cuda_types::CUkernel,
+ hfunc: cuda_types::cuda::CUkernel,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7455,7 +7471,7 @@ pub fn write_cuKernelGetName(
}
pub fn write_cuKernelGetParamInfo(
writer: &mut (impl std::io::Write + ?Sized),
- kernel: cuda_types::CUkernel,
+ kernel: cuda_types::cuda::CUkernel,
paramIndex: usize,
paramOffset: *mut usize,
paramSize: *mut usize,
@@ -7510,7 +7526,7 @@ pub fn write_cuMemGetInfo_v2(
}
pub fn write_cuMemAlloc_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -7525,7 +7541,7 @@ pub fn write_cuMemAlloc_v2(
}
pub fn write_cuMemAllocPitch_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
pPitch: *mut usize,
WidthInBytes: usize,
Height: usize,
@@ -7565,7 +7581,7 @@ pub fn write_cuMemAllocPitch_v2(
}
pub fn write_cuMemFree_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: cuda_types::CUdeviceptr,
+ dptr: cuda_types::cuda::CUdeviceptr,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7575,9 +7591,9 @@ pub fn write_cuMemFree_v2(
}
pub fn write_cuMemGetAddressRange_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pbase: *mut cuda_types::CUdeviceptr,
+ pbase: *mut cuda_types::cuda::CUdeviceptr,
psize: *mut usize,
- dptr: cuda_types::CUdeviceptr,
+ dptr: cuda_types::cuda::CUdeviceptr,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7655,7 +7671,7 @@ pub fn write_cuMemHostAlloc(
}
pub fn write_cuMemHostGetDevicePointer_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pdptr: *mut cuda_types::CUdeviceptr,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr,
p: *mut ::core::ffi::c_void,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -7705,7 +7721,7 @@ pub fn write_cuMemHostGetFlags(
}
pub fn write_cuMemAllocManaged(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -7725,10 +7741,10 @@ pub fn write_cuMemAllocManaged(
}
pub fn write_cuDeviceRegisterAsyncNotification(
writer: &mut (impl std::io::Write + ?Sized),
- device: cuda_types::CUdevice,
- callbackFunc: cuda_types::CUasyncCallback,
+ device: cuda_types::cuda::CUdevice,
+ callbackFunc: cuda_types::cuda::CUasyncCallback,
userData: *mut ::core::ffi::c_void,
- callback: *mut cuda_types::CUasyncCallbackHandle,
+ callback: *mut cuda_types::cuda::CUasyncCallbackHandle,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7770,8 +7786,8 @@ pub fn write_cuDeviceRegisterAsyncNotification(
}
pub fn write_cuDeviceUnregisterAsyncNotification(
writer: &mut (impl std::io::Write + ?Sized),
- device: cuda_types::CUdevice,
- callback: cuda_types::CUasyncCallbackHandle,
+ device: cuda_types::cuda::CUdevice,
+ callback: cuda_types::cuda::CUasyncCallbackHandle,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7795,7 +7811,7 @@ pub fn write_cuDeviceUnregisterAsyncNotification(
}
pub fn write_cuDeviceGetByPCIBusId(
writer: &mut (impl std::io::Write + ?Sized),
- dev: *mut cuda_types::CUdevice,
+ dev: *mut cuda_types::cuda::CUdevice,
pciBusId: *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -7817,7 +7833,7 @@ pub fn write_cuDeviceGetPCIBusId(
writer: &mut (impl std::io::Write + ?Sized),
pciBusId: *mut ::core::ffi::c_char,
len: ::core::ffi::c_int,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7840,8 +7856,8 @@ pub fn write_cuDeviceGetPCIBusId(
}
pub fn write_cuIpcGetEventHandle(
writer: &mut (impl std::io::Write + ?Sized),
- pHandle: *mut cuda_types::CUipcEventHandle,
- event: cuda_types::CUevent,
+ pHandle: *mut cuda_types::cuda::CUipcEventHandle,
+ event: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7855,8 +7871,8 @@ pub fn write_cuIpcGetEventHandle(
}
pub fn write_cuIpcOpenEventHandle(
writer: &mut (impl std::io::Write + ?Sized),
- phEvent: *mut cuda_types::CUevent,
- handle: cuda_types::CUipcEventHandle,
+ phEvent: *mut cuda_types::cuda::CUevent,
+ handle: cuda_types::cuda::CUipcEventHandle,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7875,8 +7891,8 @@ pub fn write_cuIpcOpenEventHandle(
}
pub fn write_cuIpcGetMemHandle(
writer: &mut (impl std::io::Write + ?Sized),
- pHandle: *mut cuda_types::CUipcMemHandle,
- dptr: cuda_types::CUdeviceptr,
+ pHandle: *mut cuda_types::cuda::CUipcMemHandle,
+ dptr: cuda_types::cuda::CUdeviceptr,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7890,8 +7906,8 @@ pub fn write_cuIpcGetMemHandle(
}
pub fn write_cuIpcOpenMemHandle_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pdptr: *mut cuda_types::CUdeviceptr,
- handle: cuda_types::CUipcMemHandle,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr,
+ handle: cuda_types::cuda::CUipcMemHandle,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -7915,7 +7931,7 @@ pub fn write_cuIpcOpenMemHandle_v2(
}
pub fn write_cuIpcCloseMemHandle(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: cuda_types::CUdeviceptr,
+ dptr: cuda_types::cuda::CUdeviceptr,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -7960,8 +7976,8 @@ pub fn write_cuMemHostUnregister(
}
pub fn write_cuMemcpy_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dst: cuda_types::CUdeviceptr,
- src: cuda_types::CUdeviceptr,
+ dst: cuda_types::cuda::CUdeviceptr,
+ src: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -7980,10 +7996,10 @@ pub fn write_cuMemcpy_ptds(
}
pub fn write_cuMemcpyPeer_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
- dstContext: cuda_types::CUcontext,
- srcDevice: cuda_types::CUdeviceptr,
- srcContext: cuda_types::CUcontext,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ dstContext: cuda_types::cuda::CUcontext,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
+ srcContext: cuda_types::cuda::CUcontext,
ByteCount: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -8020,7 +8036,7 @@ pub fn write_cuMemcpyPeer_ptds(
}
pub fn write_cuMemcpyHtoD_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
) -> std::io::Result<()> {
@@ -8056,7 +8072,7 @@ pub fn write_cuMemcpyHtoD_v2_ptds(
pub fn write_cuMemcpyDtoH_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -8090,8 +8106,8 @@ pub fn write_cuMemcpyDtoH_v2_ptds(
}
pub fn write_cuMemcpyDtoD_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
- srcDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -8125,9 +8141,9 @@ pub fn write_cuMemcpyDtoD_v2_ptds(
}
pub fn write_cuMemcpyDtoA_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -8170,8 +8186,8 @@ pub fn write_cuMemcpyDtoA_v2_ptds(
}
pub fn write_cuMemcpyAtoD_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
- srcArray: cuda_types::CUarray,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
) -> std::io::Result<()> {
@@ -8215,7 +8231,7 @@ pub fn write_cuMemcpyAtoD_v2_ptds(
}
pub fn write_cuMemcpyHtoA_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
@@ -8261,7 +8277,7 @@ pub fn write_cuMemcpyHtoA_v2_ptds(
pub fn write_cuMemcpyAtoH_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
) -> std::io::Result<()> {
@@ -8305,9 +8321,9 @@ pub fn write_cuMemcpyAtoH_v2_ptds(
}
pub fn write_cuMemcpyAtoA_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
) -> std::io::Result<()> {
@@ -8360,7 +8376,7 @@ pub fn write_cuMemcpyAtoA_v2_ptds(
}
pub fn write_cuMemcpy2D_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8370,7 +8386,7 @@ pub fn write_cuMemcpy2D_v2_ptds(
}
pub fn write_cuMemcpy2DUnaligned_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8385,7 +8401,7 @@ pub fn write_cuMemcpy2DUnaligned_v2_ptds(
}
pub fn write_cuMemcpy3D_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY3D,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8395,7 +8411,7 @@ pub fn write_cuMemcpy3D_v2_ptds(
}
pub fn write_cuMemcpy3DPeer_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_PEER,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8405,10 +8421,10 @@ pub fn write_cuMemcpy3DPeer_ptds(
}
pub fn write_cuMemcpyAsync_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dst: cuda_types::CUdeviceptr,
- src: cuda_types::CUdeviceptr,
+ dst: cuda_types::cuda::CUdeviceptr,
+ src: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8435,12 +8451,12 @@ pub fn write_cuMemcpyAsync_ptsz(
}
pub fn write_cuMemcpyPeerAsync_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
- dstContext: cuda_types::CUcontext,
- srcDevice: cuda_types::CUdeviceptr,
- srcContext: cuda_types::CUcontext,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ dstContext: cuda_types::cuda::CUcontext,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
+ srcContext: cuda_types::cuda::CUcontext,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8500,10 +8516,10 @@ pub fn write_cuMemcpyPeerAsync_ptsz(
}
pub fn write_cuMemcpyHtoDAsync_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8546,9 +8562,9 @@ pub fn write_cuMemcpyHtoDAsync_v2_ptsz(
pub fn write_cuMemcpyDtoHAsync_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8590,10 +8606,10 @@ pub fn write_cuMemcpyDtoHAsync_v2_ptsz(
}
pub fn write_cuMemcpyDtoDAsync_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
- srcDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8635,11 +8651,11 @@ pub fn write_cuMemcpyDtoDAsync_v2_ptsz(
}
pub fn write_cuMemcpyHtoAAsync_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8691,10 +8707,10 @@ pub fn write_cuMemcpyHtoAAsync_v2_ptsz(
pub fn write_cuMemcpyAtoHAsync_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8745,8 +8761,8 @@ pub fn write_cuMemcpyAtoHAsync_v2_ptsz(
}
pub fn write_cuMemcpy2DAsync_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
- hStream: cuda_types::CUstream,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8770,8 +8786,8 @@ pub fn write_cuMemcpy2DAsync_v2_ptsz(
}
pub fn write_cuMemcpy3DAsync_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY3D,
- hStream: cuda_types::CUstream,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8795,8 +8811,8 @@ pub fn write_cuMemcpy3DAsync_v2_ptsz(
}
pub fn write_cuMemcpy3DPeerAsync_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER,
- hStream: cuda_types::CUstream,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_PEER,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -8820,7 +8836,7 @@ pub fn write_cuMemcpy3DPeerAsync_ptsz(
}
pub fn write_cuMemsetD8_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
uc: ::core::ffi::c_uchar,
N: usize,
) -> std::io::Result<()> {
@@ -8845,7 +8861,7 @@ pub fn write_cuMemsetD8_v2_ptds(
}
pub fn write_cuMemsetD16_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
us: ::core::ffi::c_ushort,
N: usize,
) -> std::io::Result<()> {
@@ -8870,7 +8886,7 @@ pub fn write_cuMemsetD16_v2_ptds(
}
pub fn write_cuMemsetD32_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
ui: ::core::ffi::c_uint,
N: usize,
) -> std::io::Result<()> {
@@ -8895,7 +8911,7 @@ pub fn write_cuMemsetD32_v2_ptds(
}
pub fn write_cuMemsetD2D8_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
uc: ::core::ffi::c_uchar,
Width: usize,
@@ -8935,7 +8951,7 @@ pub fn write_cuMemsetD2D8_v2_ptds(
}
pub fn write_cuMemsetD2D16_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
us: ::core::ffi::c_ushort,
Width: usize,
@@ -8980,7 +8996,7 @@ pub fn write_cuMemsetD2D16_v2_ptds(
}
pub fn write_cuMemsetD2D32_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
ui: ::core::ffi::c_uint,
Width: usize,
@@ -9025,10 +9041,10 @@ pub fn write_cuMemsetD2D32_v2_ptds(
}
pub fn write_cuMemsetD8Async_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
uc: ::core::ffi::c_uchar,
N: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9060,10 +9076,10 @@ pub fn write_cuMemsetD8Async_ptsz(
}
pub fn write_cuMemsetD16Async_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
us: ::core::ffi::c_ushort,
N: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9095,10 +9111,10 @@ pub fn write_cuMemsetD16Async_ptsz(
}
pub fn write_cuMemsetD32Async_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
ui: ::core::ffi::c_uint,
N: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9130,12 +9146,12 @@ pub fn write_cuMemsetD32Async_ptsz(
}
pub fn write_cuMemsetD2D8Async_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
uc: ::core::ffi::c_uchar,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9190,12 +9206,12 @@ pub fn write_cuMemsetD2D8Async_ptsz(
}
pub fn write_cuMemsetD2D16Async_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
us: ::core::ffi::c_ushort,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9250,12 +9266,12 @@ pub fn write_cuMemsetD2D16Async_ptsz(
}
pub fn write_cuMemsetD2D32Async_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
ui: ::core::ffi::c_uint,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9310,8 +9326,8 @@ pub fn write_cuMemsetD2D32Async_ptsz(
}
pub fn write_cuArrayCreate_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pHandle: *mut cuda_types::CUarray,
- pAllocateArray: *const cuda_types::CUDA_ARRAY_DESCRIPTOR,
+ pHandle: *mut cuda_types::cuda::CUarray,
+ pAllocateArray: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9330,8 +9346,8 @@ pub fn write_cuArrayCreate_v2(
}
pub fn write_cuArrayGetDescriptor_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pArrayDescriptor: *mut cuda_types::CUDA_ARRAY_DESCRIPTOR,
- hArray: cuda_types::CUarray,
+ pArrayDescriptor: *mut cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR,
+ hArray: cuda_types::cuda::CUarray,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9355,8 +9371,8 @@ pub fn write_cuArrayGetDescriptor_v2(
}
pub fn write_cuArrayGetSparseProperties(
writer: &mut (impl std::io::Write + ?Sized),
- sparseProperties: *mut cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES,
- array: cuda_types::CUarray,
+ sparseProperties: *mut cuda_types::cuda::CUDA_ARRAY_SPARSE_PROPERTIES,
+ array: cuda_types::cuda::CUarray,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9380,8 +9396,8 @@ pub fn write_cuArrayGetSparseProperties(
}
pub fn write_cuMipmappedArrayGetSparseProperties(
writer: &mut (impl std::io::Write + ?Sized),
- sparseProperties: *mut cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES,
- mipmap: cuda_types::CUmipmappedArray,
+ sparseProperties: *mut cuda_types::cuda::CUDA_ARRAY_SPARSE_PROPERTIES,
+ mipmap: cuda_types::cuda::CUmipmappedArray,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9405,9 +9421,9 @@ pub fn write_cuMipmappedArrayGetSparseProperties(
}
pub fn write_cuArrayGetMemoryRequirements(
writer: &mut (impl std::io::Write + ?Sized),
- memoryRequirements: *mut cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS,
- array: cuda_types::CUarray,
- device: cuda_types::CUdevice,
+ memoryRequirements: *mut cuda_types::cuda::CUDA_ARRAY_MEMORY_REQUIREMENTS,
+ array: cuda_types::cuda::CUarray,
+ device: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9440,9 +9456,9 @@ pub fn write_cuArrayGetMemoryRequirements(
}
pub fn write_cuMipmappedArrayGetMemoryRequirements(
writer: &mut (impl std::io::Write + ?Sized),
- memoryRequirements: *mut cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS,
- mipmap: cuda_types::CUmipmappedArray,
- device: cuda_types::CUdevice,
+ memoryRequirements: *mut cuda_types::cuda::CUDA_ARRAY_MEMORY_REQUIREMENTS,
+ mipmap: cuda_types::cuda::CUmipmappedArray,
+ device: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9475,8 +9491,8 @@ pub fn write_cuMipmappedArrayGetMemoryRequirements(
}
pub fn write_cuArrayGetPlane(
writer: &mut (impl std::io::Write + ?Sized),
- pPlaneArray: *mut cuda_types::CUarray,
- hArray: cuda_types::CUarray,
+ pPlaneArray: *mut cuda_types::cuda::CUarray,
+ hArray: cuda_types::cuda::CUarray,
planeIdx: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -9495,7 +9511,7 @@ pub fn write_cuArrayGetPlane(
}
pub fn write_cuArrayDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- hArray: cuda_types::CUarray,
+ hArray: cuda_types::cuda::CUarray,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9505,8 +9521,8 @@ pub fn write_cuArrayDestroy(
}
pub fn write_cuArray3DCreate_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pHandle: *mut cuda_types::CUarray,
- pAllocateArray: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR,
+ pHandle: *mut cuda_types::cuda::CUarray,
+ pAllocateArray: *const cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9525,8 +9541,8 @@ pub fn write_cuArray3DCreate_v2(
}
pub fn write_cuArray3DGetDescriptor_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pArrayDescriptor: *mut cuda_types::CUDA_ARRAY3D_DESCRIPTOR,
- hArray: cuda_types::CUarray,
+ pArrayDescriptor: *mut cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR,
+ hArray: cuda_types::cuda::CUarray,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9550,8 +9566,8 @@ pub fn write_cuArray3DGetDescriptor_v2(
}
pub fn write_cuMipmappedArrayCreate(
writer: &mut (impl std::io::Write + ?Sized),
- pHandle: *mut cuda_types::CUmipmappedArray,
- pMipmappedArrayDesc: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR,
+ pHandle: *mut cuda_types::cuda::CUmipmappedArray,
+ pMipmappedArrayDesc: *const cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR,
numMipmapLevels: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -9585,8 +9601,8 @@ pub fn write_cuMipmappedArrayCreate(
}
pub fn write_cuMipmappedArrayGetLevel(
writer: &mut (impl std::io::Write + ?Sized),
- pLevelArray: *mut cuda_types::CUarray,
- hMipmappedArray: cuda_types::CUmipmappedArray,
+ pLevelArray: *mut cuda_types::cuda::CUarray,
+ hMipmappedArray: cuda_types::cuda::CUmipmappedArray,
level: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -9620,7 +9636,7 @@ pub fn write_cuMipmappedArrayGetLevel(
}
pub fn write_cuMipmappedArrayDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- hMipmappedArray: cuda_types::CUmipmappedArray,
+ hMipmappedArray: cuda_types::cuda::CUmipmappedArray,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9636,9 +9652,9 @@ pub fn write_cuMipmappedArrayDestroy(
pub fn write_cuMemGetHandleForAddressRange(
writer: &mut (impl std::io::Write + ?Sized),
handle: *mut ::core::ffi::c_void,
- dptr: cuda_types::CUdeviceptr,
+ dptr: cuda_types::cuda::CUdeviceptr,
size: usize,
- handleType: cuda_types::CUmemRangeHandleType,
+ handleType: cuda_types::cuda::CUmemRangeHandleType,
flags: ::core::ffi::c_ulonglong,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -9690,10 +9706,10 @@ pub fn write_cuMemGetHandleForAddressRange(
}
pub fn write_cuMemAddressReserve(
writer: &mut (impl std::io::Write + ?Sized),
- ptr: *mut cuda_types::CUdeviceptr,
+ ptr: *mut cuda_types::cuda::CUdeviceptr,
size: usize,
alignment: usize,
- addr: cuda_types::CUdeviceptr,
+ addr: cuda_types::cuda::CUdeviceptr,
flags: ::core::ffi::c_ulonglong,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -9725,7 +9741,7 @@ pub fn write_cuMemAddressReserve(
}
pub fn write_cuMemAddressFree(
writer: &mut (impl std::io::Write + ?Sized),
- ptr: cuda_types::CUdeviceptr,
+ ptr: cuda_types::cuda::CUdeviceptr,
size: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -9740,9 +9756,9 @@ pub fn write_cuMemAddressFree(
}
pub fn write_cuMemCreate(
writer: &mut (impl std::io::Write + ?Sized),
- handle: *mut cuda_types::CUmemGenericAllocationHandle,
+ handle: *mut cuda_types::cuda::CUmemGenericAllocationHandle,
size: usize,
- prop: *const cuda_types::CUmemAllocationProp,
+ prop: *const cuda_types::cuda::CUmemAllocationProp,
flags: ::core::ffi::c_ulonglong,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -9765,7 +9781,7 @@ pub fn write_cuMemCreate(
}
pub fn write_cuMemRelease(
writer: &mut (impl std::io::Write + ?Sized),
- handle: cuda_types::CUmemGenericAllocationHandle,
+ handle: cuda_types::cuda::CUmemGenericAllocationHandle,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9775,10 +9791,10 @@ pub fn write_cuMemRelease(
}
pub fn write_cuMemMap(
writer: &mut (impl std::io::Write + ?Sized),
- ptr: cuda_types::CUdeviceptr,
+ ptr: cuda_types::cuda::CUdeviceptr,
size: usize,
offset: usize,
- handle: cuda_types::CUmemGenericAllocationHandle,
+ handle: cuda_types::cuda::CUmemGenericAllocationHandle,
flags: ::core::ffi::c_ulonglong,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -9805,9 +9821,9 @@ pub fn write_cuMemMap(
}
pub fn write_cuMemMapArrayAsync_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- mapInfoList: *mut cuda_types::CUarrayMapInfo,
+ mapInfoList: *mut cuda_types::cuda::CUarrayMapInfo,
count: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9847,7 +9863,7 @@ pub fn write_cuMemMapArrayAsync_ptsz(
}
pub fn write_cuMemUnmap(
writer: &mut (impl std::io::Write + ?Sized),
- ptr: cuda_types::CUdeviceptr,
+ ptr: cuda_types::cuda::CUdeviceptr,
size: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -9862,9 +9878,9 @@ pub fn write_cuMemUnmap(
}
pub fn write_cuMemSetAccess(
writer: &mut (impl std::io::Write + ?Sized),
- ptr: cuda_types::CUdeviceptr,
+ ptr: cuda_types::cuda::CUdeviceptr,
size: usize,
- desc: *const cuda_types::CUmemAccessDesc,
+ desc: *const cuda_types::cuda::CUmemAccessDesc,
count: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -9888,8 +9904,8 @@ pub fn write_cuMemSetAccess(
pub fn write_cuMemGetAccess(
writer: &mut (impl std::io::Write + ?Sized),
flags: *mut ::core::ffi::c_ulonglong,
- location: *const cuda_types::CUmemLocation,
- ptr: cuda_types::CUdeviceptr,
+ location: *const cuda_types::cuda::CUmemLocation,
+ ptr: cuda_types::cuda::CUdeviceptr,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9908,8 +9924,8 @@ pub fn write_cuMemGetAccess(
pub fn write_cuMemExportToShareableHandle(
writer: &mut (impl std::io::Write + ?Sized),
shareableHandle: *mut ::core::ffi::c_void,
- handle: cuda_types::CUmemGenericAllocationHandle,
- handleType: cuda_types::CUmemAllocationHandleType,
+ handle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ handleType: cuda_types::cuda::CUmemAllocationHandleType,
flags: ::core::ffi::c_ulonglong,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -9952,9 +9968,9 @@ pub fn write_cuMemExportToShareableHandle(
}
pub fn write_cuMemImportFromShareableHandle(
writer: &mut (impl std::io::Write + ?Sized),
- handle: *mut cuda_types::CUmemGenericAllocationHandle,
+ handle: *mut cuda_types::cuda::CUmemGenericAllocationHandle,
osHandle: *mut ::core::ffi::c_void,
- shHandleType: cuda_types::CUmemAllocationHandleType,
+ shHandleType: cuda_types::cuda::CUmemAllocationHandleType,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -9988,8 +10004,8 @@ pub fn write_cuMemImportFromShareableHandle(
pub fn write_cuMemGetAllocationGranularity(
writer: &mut (impl std::io::Write + ?Sized),
granularity: *mut usize,
- prop: *const cuda_types::CUmemAllocationProp,
- option: cuda_types::CUmemAllocationGranularity_flags,
+ prop: *const cuda_types::cuda::CUmemAllocationProp,
+ option: cuda_types::cuda::CUmemAllocationGranularity_flags,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10022,8 +10038,8 @@ pub fn write_cuMemGetAllocationGranularity(
}
pub fn write_cuMemGetAllocationPropertiesFromHandle(
writer: &mut (impl std::io::Write + ?Sized),
- prop: *mut cuda_types::CUmemAllocationProp,
- handle: cuda_types::CUmemGenericAllocationHandle,
+ prop: *mut cuda_types::cuda::CUmemAllocationProp,
+ handle: cuda_types::cuda::CUmemGenericAllocationHandle,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10047,7 +10063,7 @@ pub fn write_cuMemGetAllocationPropertiesFromHandle(
}
pub fn write_cuMemRetainAllocationHandle(
writer: &mut (impl std::io::Write + ?Sized),
- handle: *mut cuda_types::CUmemGenericAllocationHandle,
+ handle: *mut cuda_types::cuda::CUmemGenericAllocationHandle,
addr: *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -10072,8 +10088,8 @@ pub fn write_cuMemRetainAllocationHandle(
}
pub fn write_cuMemFreeAsync_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: cuda_types::CUdeviceptr,
- hStream: cuda_types::CUstream,
+ dptr: cuda_types::cuda::CUdeviceptr,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10087,9 +10103,9 @@ pub fn write_cuMemFreeAsync_ptsz(
}
pub fn write_cuMemAllocAsync_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10117,7 +10133,7 @@ pub fn write_cuMemAllocAsync_ptsz(
}
pub fn write_cuMemPoolTrimTo(
writer: &mut (impl std::io::Write + ?Sized),
- pool: cuda_types::CUmemoryPool,
+ pool: cuda_types::cuda::CUmemoryPool,
minBytesToKeep: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -10137,8 +10153,8 @@ pub fn write_cuMemPoolTrimTo(
}
pub fn write_cuMemPoolSetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
- pool: cuda_types::CUmemoryPool,
- attr: cuda_types::CUmemPool_attribute,
+ pool: cuda_types::cuda::CUmemoryPool,
+ attr: cuda_types::cuda::CUmemPool_attribute,
value: *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -10157,8 +10173,8 @@ pub fn write_cuMemPoolSetAttribute(
}
pub fn write_cuMemPoolGetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
- pool: cuda_types::CUmemoryPool,
- attr: cuda_types::CUmemPool_attribute,
+ pool: cuda_types::cuda::CUmemoryPool,
+ attr: cuda_types::cuda::CUmemPool_attribute,
value: *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -10177,8 +10193,8 @@ pub fn write_cuMemPoolGetAttribute(
}
pub fn write_cuMemPoolSetAccess(
writer: &mut (impl std::io::Write + ?Sized),
- pool: cuda_types::CUmemoryPool,
- map: *const cuda_types::CUmemAccessDesc,
+ pool: cuda_types::cuda::CUmemoryPool,
+ map: *const cuda_types::cuda::CUmemAccessDesc,
count: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -10197,9 +10213,9 @@ pub fn write_cuMemPoolSetAccess(
}
pub fn write_cuMemPoolGetAccess(
writer: &mut (impl std::io::Write + ?Sized),
- flags: *mut cuda_types::CUmemAccess_flags,
- memPool: cuda_types::CUmemoryPool,
- location: *mut cuda_types::CUmemLocation,
+ flags: *mut cuda_types::cuda::CUmemAccess_flags,
+ memPool: cuda_types::cuda::CUmemoryPool,
+ location: *mut cuda_types::cuda::CUmemLocation,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10217,8 +10233,8 @@ pub fn write_cuMemPoolGetAccess(
}
pub fn write_cuMemPoolCreate(
writer: &mut (impl std::io::Write + ?Sized),
- pool: *mut cuda_types::CUmemoryPool,
- poolProps: *const cuda_types::CUmemPoolProps,
+ pool: *mut cuda_types::cuda::CUmemoryPool,
+ poolProps: *const cuda_types::cuda::CUmemPoolProps,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10232,7 +10248,7 @@ pub fn write_cuMemPoolCreate(
}
pub fn write_cuMemPoolDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- pool: cuda_types::CUmemoryPool,
+ pool: cuda_types::cuda::CUmemoryPool,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10242,10 +10258,10 @@ pub fn write_cuMemPoolDestroy(
}
pub fn write_cuMemAllocFromPoolAsync_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- pool: cuda_types::CUmemoryPool,
- hStream: cuda_types::CUstream,
+ pool: cuda_types::cuda::CUmemoryPool,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10288,8 +10304,8 @@ pub fn write_cuMemAllocFromPoolAsync_ptsz(
pub fn write_cuMemPoolExportToShareableHandle(
writer: &mut (impl std::io::Write + ?Sized),
handle_out: *mut ::core::ffi::c_void,
- pool: cuda_types::CUmemoryPool,
- handleType: cuda_types::CUmemAllocationHandleType,
+ pool: cuda_types::cuda::CUmemoryPool,
+ handleType: cuda_types::cuda::CUmemAllocationHandleType,
flags: ::core::ffi::c_ulonglong,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -10332,9 +10348,9 @@ pub fn write_cuMemPoolExportToShareableHandle(
}
pub fn write_cuMemPoolImportFromShareableHandle(
writer: &mut (impl std::io::Write + ?Sized),
- pool_out: *mut cuda_types::CUmemoryPool,
+ pool_out: *mut cuda_types::cuda::CUmemoryPool,
handle: *mut ::core::ffi::c_void,
- handleType: cuda_types::CUmemAllocationHandleType,
+ handleType: cuda_types::cuda::CUmemAllocationHandleType,
flags: ::core::ffi::c_ulonglong,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -10377,8 +10393,8 @@ pub fn write_cuMemPoolImportFromShareableHandle(
}
pub fn write_cuMemPoolExportPointer(
writer: &mut (impl std::io::Write + ?Sized),
- shareData_out: *mut cuda_types::CUmemPoolPtrExportData,
- ptr: cuda_types::CUdeviceptr,
+ shareData_out: *mut cuda_types::cuda::CUmemPoolPtrExportData,
+ ptr: cuda_types::cuda::CUdeviceptr,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10397,9 +10413,9 @@ pub fn write_cuMemPoolExportPointer(
}
pub fn write_cuMemPoolImportPointer(
writer: &mut (impl std::io::Write + ?Sized),
- ptr_out: *mut cuda_types::CUdeviceptr,
- pool: cuda_types::CUmemoryPool,
- shareData: *mut cuda_types::CUmemPoolPtrExportData,
+ ptr_out: *mut cuda_types::cuda::CUdeviceptr,
+ pool: cuda_types::cuda::CUmemoryPool,
+ shareData: *mut cuda_types::cuda::CUmemPoolPtrExportData,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10427,8 +10443,8 @@ pub fn write_cuMemPoolImportPointer(
}
pub fn write_cuMulticastCreate(
writer: &mut (impl std::io::Write + ?Sized),
- mcHandle: *mut cuda_types::CUmemGenericAllocationHandle,
- prop: *const cuda_types::CUmulticastObjectProp,
+ mcHandle: *mut cuda_types::cuda::CUmemGenericAllocationHandle,
+ prop: *const cuda_types::cuda::CUmulticastObjectProp,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10442,8 +10458,8 @@ pub fn write_cuMulticastCreate(
}
pub fn write_cuMulticastAddDevice(
writer: &mut (impl std::io::Write + ?Sized),
- mcHandle: cuda_types::CUmemGenericAllocationHandle,
- dev: cuda_types::CUdevice,
+ mcHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10462,9 +10478,9 @@ pub fn write_cuMulticastAddDevice(
}
pub fn write_cuMulticastBindMem(
writer: &mut (impl std::io::Write + ?Sized),
- mcHandle: cuda_types::CUmemGenericAllocationHandle,
+ mcHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
mcOffset: usize,
- memHandle: cuda_types::CUmemGenericAllocationHandle,
+ memHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
memOffset: usize,
size: usize,
flags: ::core::ffi::c_ulonglong,
@@ -10507,9 +10523,9 @@ pub fn write_cuMulticastBindMem(
}
pub fn write_cuMulticastBindAddr(
writer: &mut (impl std::io::Write + ?Sized),
- mcHandle: cuda_types::CUmemGenericAllocationHandle,
+ mcHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
mcOffset: usize,
- memptr: cuda_types::CUdeviceptr,
+ memptr: cuda_types::cuda::CUdeviceptr,
size: usize,
flags: ::core::ffi::c_ulonglong,
) -> std::io::Result<()> {
@@ -10547,8 +10563,8 @@ pub fn write_cuMulticastBindAddr(
}
pub fn write_cuMulticastUnbind(
writer: &mut (impl std::io::Write + ?Sized),
- mcHandle: cuda_types::CUmemGenericAllocationHandle,
- dev: cuda_types::CUdevice,
+ mcHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ dev: cuda_types::cuda::CUdevice,
mcOffset: usize,
size: usize,
) -> std::io::Result<()> {
@@ -10573,8 +10589,8 @@ pub fn write_cuMulticastUnbind(
pub fn write_cuMulticastGetGranularity(
writer: &mut (impl std::io::Write + ?Sized),
granularity: *mut usize,
- prop: *const cuda_types::CUmulticastObjectProp,
- option: cuda_types::CUmulticastGranularity_flags,
+ prop: *const cuda_types::cuda::CUmulticastObjectProp,
+ option: cuda_types::cuda::CUmulticastGranularity_flags,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10608,8 +10624,8 @@ pub fn write_cuMulticastGetGranularity(
pub fn write_cuPointerGetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
data: *mut ::core::ffi::c_void,
- attribute: cuda_types::CUpointer_attribute,
- ptr: cuda_types::CUdeviceptr,
+ attribute: cuda_types::cuda::CUpointer_attribute,
+ ptr: cuda_types::cuda::CUdeviceptr,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10632,10 +10648,10 @@ pub fn write_cuPointerGetAttribute(
}
pub fn write_cuMemPrefetchAsync_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- dstDevice: cuda_types::CUdevice,
- hStream: cuda_types::CUstream,
+ dstDevice: cuda_types::cuda::CUdevice,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10677,11 +10693,11 @@ pub fn write_cuMemPrefetchAsync_ptsz(
}
pub fn write_cuMemPrefetchAsync_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- location: cuda_types::CUmemLocation,
+ location: cuda_types::cuda::CUmemLocation,
flags: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10732,10 +10748,10 @@ pub fn write_cuMemPrefetchAsync_v2_ptsz(
}
pub fn write_cuMemAdvise(
writer: &mut (impl std::io::Write + ?Sized),
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- advice: cuda_types::CUmem_advise,
- device: cuda_types::CUdevice,
+ advice: cuda_types::cuda::CUmem_advise,
+ device: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10757,10 +10773,10 @@ pub fn write_cuMemAdvise(
}
pub fn write_cuMemAdvise_v2(
writer: &mut (impl std::io::Write + ?Sized),
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- advice: cuda_types::CUmem_advise,
- location: cuda_types::CUmemLocation,
+ advice: cuda_types::cuda::CUmem_advise,
+ location: cuda_types::cuda::CUmemLocation,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10784,8 +10800,8 @@ pub fn write_cuMemRangeGetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
data: *mut ::core::ffi::c_void,
dataSize: usize,
- attribute: cuda_types::CUmem_range_attribute,
- devPtr: cuda_types::CUdeviceptr,
+ attribute: cuda_types::cuda::CUmem_range_attribute,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -10834,9 +10850,9 @@ pub fn write_cuMemRangeGetAttributes(
writer: &mut (impl std::io::Write + ?Sized),
data: *mut *mut ::core::ffi::c_void,
dataSizes: *mut usize,
- attributes: *mut cuda_types::CUmem_range_attribute,
+ attributes: *mut cuda_types::cuda::CUmem_range_attribute,
numAttributes: usize,
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -10898,8 +10914,8 @@ pub fn write_cuMemRangeGetAttributes(
pub fn write_cuPointerSetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
value: *const ::core::ffi::c_void,
- attribute: cuda_types::CUpointer_attribute,
- ptr: cuda_types::CUdeviceptr,
+ attribute: cuda_types::cuda::CUpointer_attribute,
+ ptr: cuda_types::cuda::CUdeviceptr,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10923,9 +10939,9 @@ pub fn write_cuPointerSetAttribute(
pub fn write_cuPointerGetAttributes(
writer: &mut (impl std::io::Write + ?Sized),
numAttributes: ::core::ffi::c_uint,
- attributes: *mut cuda_types::CUpointer_attribute,
+ attributes: *mut cuda_types::cuda::CUpointer_attribute,
data: *mut *mut ::core::ffi::c_void,
- ptr: cuda_types::CUdeviceptr,
+ ptr: cuda_types::cuda::CUdeviceptr,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -10957,7 +10973,7 @@ pub fn write_cuPointerGetAttributes(
}
pub fn write_cuStreamCreate(
writer: &mut (impl std::io::Write + ?Sized),
- phStream: *mut cuda_types::CUstream,
+ phStream: *mut cuda_types::cuda::CUstream,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -10972,7 +10988,7 @@ pub fn write_cuStreamCreate(
}
pub fn write_cuStreamCreateWithPriority(
writer: &mut (impl std::io::Write + ?Sized),
- phStream: *mut cuda_types::CUstream,
+ phStream: *mut cuda_types::cuda::CUstream,
flags: ::core::ffi::c_uint,
priority: ::core::ffi::c_int,
) -> std::io::Result<()> {
@@ -11007,7 +11023,7 @@ pub fn write_cuStreamCreateWithPriority(
}
pub fn write_cuStreamGetPriority_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
priority: *mut ::core::ffi::c_int,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -11032,7 +11048,7 @@ pub fn write_cuStreamGetPriority_ptsz(
}
pub fn write_cuStreamGetFlags_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
flags: *mut ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -11052,7 +11068,7 @@ pub fn write_cuStreamGetFlags_ptsz(
}
pub fn write_cuStreamGetId_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
streamId: *mut ::core::ffi::c_ulonglong,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -11067,8 +11083,8 @@ pub fn write_cuStreamGetId_ptsz(
}
pub fn write_cuStreamGetCtx_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- pctx: *mut cuda_types::CUcontext,
+ hStream: cuda_types::cuda::CUstream,
+ pctx: *mut cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11082,8 +11098,8 @@ pub fn write_cuStreamGetCtx_ptsz(
}
pub fn write_cuStreamWaitEvent_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- hEvent: cuda_types::CUevent,
+ hStream: cuda_types::cuda::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -11117,8 +11133,8 @@ pub fn write_cuStreamWaitEvent_ptsz(
}
pub fn write_cuStreamAddCallback_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- callback: cuda_types::CUstreamCallback,
+ hStream: cuda_types::cuda::CUstream,
+ callback: cuda_types::cuda::CUstreamCallback,
userData: *mut ::core::ffi::c_void,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -11162,8 +11178,8 @@ pub fn write_cuStreamAddCallback_ptsz(
}
pub fn write_cuStreamBeginCapture_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- mode: cuda_types::CUstreamCaptureMode,
+ hStream: cuda_types::cuda::CUstream,
+ mode: cuda_types::cuda::CUstreamCaptureMode,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11187,12 +11203,12 @@ pub fn write_cuStreamBeginCapture_v2_ptsz(
}
pub fn write_cuStreamBeginCaptureToGraph_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- mode: cuda_types::CUstreamCaptureMode,
+ mode: cuda_types::cuda::CUstreamCaptureMode,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11252,7 +11268,7 @@ pub fn write_cuStreamBeginCaptureToGraph_ptsz(
}
pub fn write_cuThreadExchangeStreamCaptureMode(
writer: &mut (impl std::io::Write + ?Sized),
- mode: *mut cuda_types::CUstreamCaptureMode,
+ mode: *mut cuda_types::cuda::CUstreamCaptureMode,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11267,8 +11283,8 @@ pub fn write_cuThreadExchangeStreamCaptureMode(
}
pub fn write_cuStreamEndCapture_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- phGraph: *mut cuda_types::CUgraph,
+ hStream: cuda_types::cuda::CUstream,
+ phGraph: *mut cuda_types::cuda::CUgraph,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11292,8 +11308,8 @@ pub fn write_cuStreamEndCapture_ptsz(
}
pub fn write_cuStreamIsCapturing_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- captureStatus: *mut cuda_types::CUstreamCaptureStatus,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus: *mut cuda_types::cuda::CUstreamCaptureStatus,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11317,11 +11333,11 @@ pub fn write_cuStreamIsCapturing_ptsz(
}
pub fn write_cuStreamGetCaptureInfo_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- graph_out: *mut cuda_types::CUgraph,
- dependencies_out: *mut *const cuda_types::CUgraphNode,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ graph_out: *mut cuda_types::cuda::CUgraph,
+ dependencies_out: *mut *const cuda_types::cuda::CUgraphNode,
numDependencies_out: *mut usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -11382,12 +11398,12 @@ pub fn write_cuStreamGetCaptureInfo_v2_ptsz(
}
pub fn write_cuStreamGetCaptureInfo_v3_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- graph_out: *mut cuda_types::CUgraph,
- dependencies_out: *mut *const cuda_types::CUgraphNode,
- edgeData_out: *mut *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ graph_out: *mut cuda_types::cuda::CUgraph,
+ dependencies_out: *mut *const cuda_types::cuda::CUgraphNode,
+ edgeData_out: *mut *const cuda_types::cuda::CUgraphEdgeData,
numDependencies_out: *mut usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -11457,8 +11473,8 @@ pub fn write_cuStreamGetCaptureInfo_v3_ptsz(
}
pub fn write_cuStreamUpdateCaptureDependencies_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- dependencies: *mut cuda_types::CUgraphNode,
+ hStream: cuda_types::cuda::CUstream,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
numDependencies: usize,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -11502,9 +11518,9 @@ pub fn write_cuStreamUpdateCaptureDependencies_ptsz(
}
pub fn write_cuStreamUpdateCaptureDependencies_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- dependencies: *mut cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -11557,8 +11573,8 @@ pub fn write_cuStreamUpdateCaptureDependencies_v2_ptsz(
}
pub fn write_cuStreamAttachMemAsync_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- dptr: cuda_types::CUdeviceptr,
+ hStream: cuda_types::cuda::CUstream,
+ dptr: cuda_types::cuda::CUdeviceptr,
length: usize,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -11602,7 +11618,7 @@ pub fn write_cuStreamAttachMemAsync_ptsz(
}
pub fn write_cuStreamQuery_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11612,7 +11628,7 @@ pub fn write_cuStreamQuery_ptsz(
}
pub fn write_cuStreamSynchronize_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11627,7 +11643,7 @@ pub fn write_cuStreamSynchronize_ptsz(
}
pub fn write_cuStreamDestroy_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11637,8 +11653,8 @@ pub fn write_cuStreamDestroy_v2(
}
pub fn write_cuStreamCopyAttributes_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dst: cuda_types::CUstream,
- src: cuda_types::CUstream,
+ dst: cuda_types::cuda::CUstream,
+ src: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11662,7 +11678,7 @@ pub fn write_cuStreamCopyAttributes_ptsz(
}
pub fn write_cuEventCreate(
writer: &mut (impl std::io::Write + ?Sized),
- phEvent: *mut cuda_types::CUevent,
+ phEvent: *mut cuda_types::cuda::CUevent,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -11677,8 +11693,8 @@ pub fn write_cuEventCreate(
}
pub fn write_cuEventRecord_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hEvent: cuda_types::CUevent,
- hStream: cuda_types::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11692,8 +11708,8 @@ pub fn write_cuEventRecord_ptsz(
}
pub fn write_cuEventRecordWithFlags_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hEvent: cuda_types::CUevent,
- hStream: cuda_types::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
+ hStream: cuda_types::cuda::CUstream,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -11727,7 +11743,7 @@ pub fn write_cuEventRecordWithFlags_ptsz(
}
pub fn write_cuEventQuery(
writer: &mut (impl std::io::Write + ?Sized),
- hEvent: cuda_types::CUevent,
+ hEvent: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11737,7 +11753,7 @@ pub fn write_cuEventQuery(
}
pub fn write_cuEventSynchronize(
writer: &mut (impl std::io::Write + ?Sized),
- hEvent: cuda_types::CUevent,
+ hEvent: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11747,7 +11763,7 @@ pub fn write_cuEventSynchronize(
}
pub fn write_cuEventDestroy_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hEvent: cuda_types::CUevent,
+ hEvent: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11758,8 +11774,8 @@ pub fn write_cuEventDestroy_v2(
pub fn write_cuEventElapsedTime(
writer: &mut (impl std::io::Write + ?Sized),
pMilliseconds: *mut f32,
- hStart: cuda_types::CUevent,
- hEnd: cuda_types::CUevent,
+ hStart: cuda_types::cuda::CUevent,
+ hEnd: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11782,8 +11798,8 @@ pub fn write_cuEventElapsedTime(
}
pub fn write_cuImportExternalMemory(
writer: &mut (impl std::io::Write + ?Sized),
- extMem_out: *mut cuda_types::CUexternalMemory,
- memHandleDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_HANDLE_DESC,
+ extMem_out: *mut cuda_types::cuda::CUexternalMemory,
+ memHandleDesc: *const cuda_types::cuda::CUDA_EXTERNAL_MEMORY_HANDLE_DESC,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11807,9 +11823,9 @@ pub fn write_cuImportExternalMemory(
}
pub fn write_cuExternalMemoryGetMappedBuffer(
writer: &mut (impl std::io::Write + ?Sized),
- devPtr: *mut cuda_types::CUdeviceptr,
- extMem: cuda_types::CUexternalMemory,
- bufferDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_BUFFER_DESC,
+ devPtr: *mut cuda_types::cuda::CUdeviceptr,
+ extMem: cuda_types::cuda::CUexternalMemory,
+ bufferDesc: *const cuda_types::cuda::CUDA_EXTERNAL_MEMORY_BUFFER_DESC,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11842,9 +11858,9 @@ pub fn write_cuExternalMemoryGetMappedBuffer(
}
pub fn write_cuExternalMemoryGetMappedMipmappedArray(
writer: &mut (impl std::io::Write + ?Sized),
- mipmap: *mut cuda_types::CUmipmappedArray,
- extMem: cuda_types::CUexternalMemory,
- mipmapDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC,
+ mipmap: *mut cuda_types::cuda::CUmipmappedArray,
+ extMem: cuda_types::cuda::CUexternalMemory,
+ mipmapDesc: *const cuda_types::cuda::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11877,7 +11893,7 @@ pub fn write_cuExternalMemoryGetMappedMipmappedArray(
}
pub fn write_cuDestroyExternalMemory(
writer: &mut (impl std::io::Write + ?Sized),
- extMem: cuda_types::CUexternalMemory,
+ extMem: cuda_types::cuda::CUexternalMemory,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11892,8 +11908,8 @@ pub fn write_cuDestroyExternalMemory(
}
pub fn write_cuImportExternalSemaphore(
writer: &mut (impl std::io::Write + ?Sized),
- extSem_out: *mut cuda_types::CUexternalSemaphore,
- semHandleDesc: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC,
+ extSem_out: *mut cuda_types::cuda::CUexternalSemaphore,
+ semHandleDesc: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11917,10 +11933,10 @@ pub fn write_cuImportExternalSemaphore(
}
pub fn write_cuSignalExternalSemaphoresAsync_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- extSemArray: *const cuda_types::CUexternalSemaphore,
- paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
+ extSemArray: *const cuda_types::cuda::CUexternalSemaphore,
+ paramsArray: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
numExtSems: ::core::ffi::c_uint,
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -11962,10 +11978,10 @@ pub fn write_cuSignalExternalSemaphoresAsync_ptsz(
}
pub fn write_cuWaitExternalSemaphoresAsync_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- extSemArray: *const cuda_types::CUexternalSemaphore,
- paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
+ extSemArray: *const cuda_types::cuda::CUexternalSemaphore,
+ paramsArray: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
numExtSems: ::core::ffi::c_uint,
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -12007,7 +12023,7 @@ pub fn write_cuWaitExternalSemaphoresAsync_ptsz(
}
pub fn write_cuDestroyExternalSemaphore(
writer: &mut (impl std::io::Write + ?Sized),
- extSem: cuda_types::CUexternalSemaphore,
+ extSem: cuda_types::cuda::CUexternalSemaphore,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -12022,9 +12038,9 @@ pub fn write_cuDestroyExternalSemaphore(
}
pub fn write_cuStreamWaitValue32_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -12067,9 +12083,9 @@ pub fn write_cuStreamWaitValue32_v2_ptsz(
}
pub fn write_cuStreamWaitValue64_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -12112,9 +12128,9 @@ pub fn write_cuStreamWaitValue64_v2_ptsz(
}
pub fn write_cuStreamWriteValue32_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -12157,9 +12173,9 @@ pub fn write_cuStreamWriteValue32_v2_ptsz(
}
pub fn write_cuStreamWriteValue64_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -12202,9 +12218,9 @@ pub fn write_cuStreamWriteValue64_v2_ptsz(
}
pub fn write_cuStreamBatchMemOp_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
count: ::core::ffi::c_uint,
- paramArray: *mut cuda_types::CUstreamBatchMemOpParams,
+ paramArray: *mut cuda_types::cuda::CUstreamBatchMemOpParams,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -12248,8 +12264,8 @@ pub fn write_cuStreamBatchMemOp_v2_ptsz(
pub fn write_cuFuncGetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
pi: *mut ::core::ffi::c_int,
- attrib: cuda_types::CUfunction_attribute,
- hfunc: cuda_types::CUfunction,
+ attrib: cuda_types::cuda::CUfunction_attribute,
+ hfunc: cuda_types::cuda::CUfunction,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -12267,8 +12283,8 @@ pub fn write_cuFuncGetAttribute(
}
pub fn write_cuFuncSetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
- hfunc: cuda_types::CUfunction,
- attrib: cuda_types::CUfunction_attribute,
+ hfunc: cuda_types::cuda::CUfunction,
+ attrib: cuda_types::cuda::CUfunction_attribute,
value: ::core::ffi::c_int,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -12287,8 +12303,8 @@ pub fn write_cuFuncSetAttribute(
}
pub fn write_cuFuncSetCacheConfig(
writer: &mut (impl std::io::Write + ?Sized),
- hfunc: cuda_types::CUfunction,
- config: cuda_types::CUfunc_cache,
+ hfunc: cuda_types::cuda::CUfunction,
+ config: cuda_types::cuda::CUfunc_cache,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -12302,8 +12318,8 @@ pub fn write_cuFuncSetCacheConfig(
}
pub fn write_cuFuncGetModule(
writer: &mut (impl std::io::Write + ?Sized),
- hmod: *mut cuda_types::CUmodule,
- hfunc: cuda_types::CUfunction,
+ hmod: *mut cuda_types::cuda::CUmodule,
+ hfunc: cuda_types::cuda::CUfunction,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -12318,7 +12334,7 @@ pub fn write_cuFuncGetModule(
pub fn write_cuFuncGetName(
writer: &mut (impl std::io::Write + ?Sized),
name: *mut *const ::core::ffi::c_char,
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -12332,7 +12348,7 @@ pub fn write_cuFuncGetName(
}
pub fn write_cuFuncGetParamInfo(
writer: &mut (impl std::io::Write + ?Sized),
- func: cuda_types::CUfunction,
+ func: cuda_types::cuda::CUfunction,
paramIndex: usize,
paramOffset: *mut usize,
paramSize: *mut usize,
@@ -12370,7 +12386,7 @@ pub fn write_cuFuncGetParamInfo(
)?;
writer.write_all(b")")
}
-impl crate::format::CudaDisplay for cuda_types::CUfunctionLoadingState_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUfunctionLoadingState_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -12378,14 +12394,14 @@ impl crate::format::CudaDisplay for cuda_types::CUfunctionLoadingState_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUfunctionLoadingState_enum::CU_FUNCTION_LOADING_STATE_UNLOADED => {
+ &cuda_types::cuda::CUfunctionLoadingState_enum::CU_FUNCTION_LOADING_STATE_UNLOADED => {
writer
.write_all(stringify!(CU_FUNCTION_LOADING_STATE_UNLOADED).as_bytes())
}
- &cuda_types::CUfunctionLoadingState_enum::CU_FUNCTION_LOADING_STATE_LOADED => {
+ &cuda_types::cuda::CUfunctionLoadingState_enum::CU_FUNCTION_LOADING_STATE_LOADED => {
writer.write_all(stringify!(CU_FUNCTION_LOADING_STATE_LOADED).as_bytes())
}
- &cuda_types::CUfunctionLoadingState_enum::CU_FUNCTION_LOADING_STATE_MAX => {
+ &cuda_types::cuda::CUfunctionLoadingState_enum::CU_FUNCTION_LOADING_STATE_MAX => {
writer.write_all(stringify!(CU_FUNCTION_LOADING_STATE_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
@@ -12394,8 +12410,8 @@ impl crate::format::CudaDisplay for cuda_types::CUfunctionLoadingState_enum {
}
pub fn write_cuFuncIsLoaded(
writer: &mut (impl std::io::Write + ?Sized),
- state: *mut cuda_types::CUfunctionLoadingState,
- function: cuda_types::CUfunction,
+ state: *mut cuda_types::cuda::CUfunctionLoadingState,
+ function: cuda_types::cuda::CUfunction,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -12409,7 +12425,7 @@ pub fn write_cuFuncIsLoaded(
}
pub fn write_cuFuncLoad(
writer: &mut (impl std::io::Write + ?Sized),
- function: cuda_types::CUfunction,
+ function: cuda_types::cuda::CUfunction,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -12419,7 +12435,7 @@ pub fn write_cuFuncLoad(
}
pub fn write_cuLaunchKernel_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
gridDimX: ::core::ffi::c_uint,
gridDimY: ::core::ffi::c_uint,
gridDimZ: ::core::ffi::c_uint,
@@ -12427,7 +12443,7 @@ pub fn write_cuLaunchKernel_ptsz(
blockDimY: ::core::ffi::c_uint,
blockDimZ: ::core::ffi::c_uint,
sharedMemBytes: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
kernelParams: *mut *mut ::core::ffi::c_void,
extra: *mut *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
@@ -12519,8 +12535,8 @@ pub fn write_cuLaunchKernel_ptsz(
}
pub fn write_cuLaunchKernelEx_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- config: *const cuda_types::CUlaunchConfig,
- f: cuda_types::CUfunction,
+ config: *const cuda_types::cuda::CUlaunchConfig,
+ f: cuda_types::cuda::CUfunction,
kernelParams: *mut *mut ::core::ffi::c_void,
extra: *mut *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
@@ -12554,7 +12570,7 @@ pub fn write_cuLaunchKernelEx_ptsz(
}
pub fn write_cuLaunchCooperativeKernel_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
gridDimX: ::core::ffi::c_uint,
gridDimY: ::core::ffi::c_uint,
gridDimZ: ::core::ffi::c_uint,
@@ -12562,7 +12578,7 @@ pub fn write_cuLaunchCooperativeKernel_ptsz(
blockDimY: ::core::ffi::c_uint,
blockDimZ: ::core::ffi::c_uint,
sharedMemBytes: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
kernelParams: *mut *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -12659,7 +12675,7 @@ pub fn write_cuLaunchCooperativeKernel_ptsz(
}
pub fn write_cuLaunchCooperativeKernelMultiDevice(
writer: &mut (impl std::io::Write + ?Sized),
- launchParamsList: *mut cuda_types::CUDA_LAUNCH_PARAMS,
+ launchParamsList: *mut cuda_types::cuda::CUDA_LAUNCH_PARAMS,
numDevices: ::core::ffi::c_uint,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -12694,8 +12710,8 @@ pub fn write_cuLaunchCooperativeKernelMultiDevice(
}
pub fn write_cuLaunchHostFunc_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- fn_: cuda_types::CUhostFn,
+ hStream: cuda_types::cuda::CUstream,
+ fn_: cuda_types::cuda::CUhostFn,
userData: *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -12724,7 +12740,7 @@ pub fn write_cuLaunchHostFunc_ptsz(
}
pub fn write_cuFuncSetBlockShape(
writer: &mut (impl std::io::Write + ?Sized),
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
x: ::core::ffi::c_int,
y: ::core::ffi::c_int,
z: ::core::ffi::c_int,
@@ -12749,7 +12765,7 @@ pub fn write_cuFuncSetBlockShape(
}
pub fn write_cuFuncSetSharedSize(
writer: &mut (impl std::io::Write + ?Sized),
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
bytes: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -12764,7 +12780,7 @@ pub fn write_cuFuncSetSharedSize(
}
pub fn write_cuParamSetSize(
writer: &mut (impl std::io::Write + ?Sized),
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
numbytes: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -12779,7 +12795,7 @@ pub fn write_cuParamSetSize(
}
pub fn write_cuParamSeti(
writer: &mut (impl std::io::Write + ?Sized),
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
offset: ::core::ffi::c_int,
value: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -12799,7 +12815,7 @@ pub fn write_cuParamSeti(
}
pub fn write_cuParamSetf(
writer: &mut (impl std::io::Write + ?Sized),
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
offset: ::core::ffi::c_int,
value: f32,
) -> std::io::Result<()> {
@@ -12819,7 +12835,7 @@ pub fn write_cuParamSetf(
}
pub fn write_cuParamSetv(
writer: &mut (impl std::io::Write + ?Sized),
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
offset: ::core::ffi::c_int,
ptr: *mut ::core::ffi::c_void,
numbytes: ::core::ffi::c_uint,
@@ -12844,7 +12860,7 @@ pub fn write_cuParamSetv(
}
pub fn write_cuLaunch(
writer: &mut (impl std::io::Write + ?Sized),
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -12854,7 +12870,7 @@ pub fn write_cuLaunch(
}
pub fn write_cuLaunchGrid(
writer: &mut (impl std::io::Write + ?Sized),
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
grid_width: ::core::ffi::c_int,
grid_height: ::core::ffi::c_int,
) -> std::io::Result<()> {
@@ -12874,10 +12890,10 @@ pub fn write_cuLaunchGrid(
}
pub fn write_cuLaunchGridAsync(
writer: &mut (impl std::io::Write + ?Sized),
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
grid_width: ::core::ffi::c_int,
grid_height: ::core::ffi::c_int,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -12909,9 +12925,9 @@ pub fn write_cuLaunchGridAsync(
}
pub fn write_cuParamSetTexRef(
writer: &mut (impl std::io::Write + ?Sized),
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
texunit: ::core::ffi::c_int,
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -12929,8 +12945,8 @@ pub fn write_cuParamSetTexRef(
}
pub fn write_cuFuncSetSharedMemConfig(
writer: &mut (impl std::io::Write + ?Sized),
- hfunc: cuda_types::CUfunction,
- config: cuda_types::CUsharedconfig,
+ hfunc: cuda_types::cuda::CUfunction,
+ config: cuda_types::cuda::CUsharedconfig,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -12954,7 +12970,7 @@ pub fn write_cuFuncSetSharedMemConfig(
}
pub fn write_cuGraphCreate(
writer: &mut (impl std::io::Write + ?Sized),
- phGraph: *mut cuda_types::CUgraph,
+ phGraph: *mut cuda_types::cuda::CUgraph,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -12969,11 +12985,11 @@ pub fn write_cuGraphCreate(
}
pub fn write_cuGraphAddKernelNode_v2(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13024,8 +13040,8 @@ pub fn write_cuGraphAddKernelNode_v2(
}
pub fn write_cuGraphKernelNodeGetParams_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_KERNEL_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13049,8 +13065,8 @@ pub fn write_cuGraphKernelNodeGetParams_v2(
}
pub fn write_cuGraphKernelNodeSetParams_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13074,12 +13090,12 @@ pub fn write_cuGraphKernelNodeSetParams_v2(
}
pub fn write_cuGraphAddMemcpyNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- copyParams: *const cuda_types::CUDA_MEMCPY3D,
- ctx: cuda_types::CUcontext,
+ copyParams: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ ctx: cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13129,8 +13145,8 @@ pub fn write_cuGraphAddMemcpyNode(
}
pub fn write_cuGraphMemcpyNodeGetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_MEMCPY3D,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_MEMCPY3D,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13154,8 +13170,8 @@ pub fn write_cuGraphMemcpyNodeGetParams(
}
pub fn write_cuGraphMemcpyNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_MEMCPY3D,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_MEMCPY3D,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13179,12 +13195,12 @@ pub fn write_cuGraphMemcpyNodeSetParams(
}
pub fn write_cuGraphAddMemsetNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- memsetParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS,
- ctx: cuda_types::CUcontext,
+ memsetParams: *const cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS,
+ ctx: cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13234,8 +13250,8 @@ pub fn write_cuGraphAddMemsetNode(
}
pub fn write_cuGraphMemsetNodeGetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_MEMSET_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13259,8 +13275,8 @@ pub fn write_cuGraphMemsetNodeGetParams(
}
pub fn write_cuGraphMemsetNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13284,11 +13300,11 @@ pub fn write_cuGraphMemsetNodeSetParams(
}
pub fn write_cuGraphAddHostNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS,
+ nodeParams: *const cuda_types::cuda::CUDA_HOST_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13334,8 +13350,8 @@ pub fn write_cuGraphAddHostNode(
}
pub fn write_cuGraphHostNodeGetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_HOST_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_HOST_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13359,8 +13375,8 @@ pub fn write_cuGraphHostNodeGetParams(
}
pub fn write_cuGraphHostNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_HOST_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13384,11 +13400,11 @@ pub fn write_cuGraphHostNodeSetParams(
}
pub fn write_cuGraphAddChildGraphNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- childGraph: cuda_types::CUgraph,
+ childGraph: cuda_types::cuda::CUgraph,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13439,8 +13455,8 @@ pub fn write_cuGraphAddChildGraphNode(
}
pub fn write_cuGraphChildGraphNodeGetGraph(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- phGraph: *mut cuda_types::CUgraph,
+ hNode: cuda_types::cuda::CUgraphNode,
+ phGraph: *mut cuda_types::cuda::CUgraph,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13464,9 +13480,9 @@ pub fn write_cuGraphChildGraphNodeGetGraph(
}
pub fn write_cuGraphAddEmptyNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -13504,11 +13520,11 @@ pub fn write_cuGraphAddEmptyNode(
}
pub fn write_cuGraphAddEventRecordNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- event: cuda_types::CUevent,
+ event: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13559,8 +13575,8 @@ pub fn write_cuGraphAddEventRecordNode(
}
pub fn write_cuGraphEventRecordNodeGetEvent(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- event_out: *mut cuda_types::CUevent,
+ hNode: cuda_types::cuda::CUgraphNode,
+ event_out: *mut cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13584,8 +13600,8 @@ pub fn write_cuGraphEventRecordNodeGetEvent(
}
pub fn write_cuGraphEventRecordNodeSetEvent(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- event: cuda_types::CUevent,
+ hNode: cuda_types::cuda::CUgraphNode,
+ event: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13609,11 +13625,11 @@ pub fn write_cuGraphEventRecordNodeSetEvent(
}
pub fn write_cuGraphAddEventWaitNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- event: cuda_types::CUevent,
+ event: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13664,8 +13680,8 @@ pub fn write_cuGraphAddEventWaitNode(
}
pub fn write_cuGraphEventWaitNodeGetEvent(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- event_out: *mut cuda_types::CUevent,
+ hNode: cuda_types::cuda::CUgraphNode,
+ event_out: *mut cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13689,8 +13705,8 @@ pub fn write_cuGraphEventWaitNodeGetEvent(
}
pub fn write_cuGraphEventWaitNodeSetEvent(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- event: cuda_types::CUevent,
+ hNode: cuda_types::cuda::CUgraphNode,
+ event: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13714,11 +13730,11 @@ pub fn write_cuGraphEventWaitNodeSetEvent(
}
pub fn write_cuGraphAddExternalSemaphoresSignalNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13769,8 +13785,8 @@ pub fn write_cuGraphAddExternalSemaphoresSignalNode(
}
pub fn write_cuGraphExternalSemaphoresSignalNodeGetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- params_out: *mut cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ params_out: *mut cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13794,8 +13810,8 @@ pub fn write_cuGraphExternalSemaphoresSignalNodeGetParams(
}
pub fn write_cuGraphExternalSemaphoresSignalNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13819,11 +13835,11 @@ pub fn write_cuGraphExternalSemaphoresSignalNodeSetParams(
}
pub fn write_cuGraphAddExternalSemaphoresWaitNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13874,8 +13890,8 @@ pub fn write_cuGraphAddExternalSemaphoresWaitNode(
}
pub fn write_cuGraphExternalSemaphoresWaitNodeGetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- params_out: *mut cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ params_out: *mut cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13899,8 +13915,8 @@ pub fn write_cuGraphExternalSemaphoresWaitNodeGetParams(
}
pub fn write_cuGraphExternalSemaphoresWaitNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13924,11 +13940,11 @@ pub fn write_cuGraphExternalSemaphoresWaitNodeSetParams(
}
pub fn write_cuGraphAddBatchMemOpNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS,
+ nodeParams: *const cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -13979,8 +13995,8 @@ pub fn write_cuGraphAddBatchMemOpNode(
}
pub fn write_cuGraphBatchMemOpNodeGetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams_out: *mut cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams_out: *mut cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14004,8 +14020,8 @@ pub fn write_cuGraphBatchMemOpNodeGetParams(
}
pub fn write_cuGraphBatchMemOpNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14029,9 +14045,9 @@ pub fn write_cuGraphBatchMemOpNodeSetParams(
}
pub fn write_cuGraphExecBatchMemOpNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14064,11 +14080,11 @@ pub fn write_cuGraphExecBatchMemOpNodeSetParams(
}
pub fn write_cuGraphAddMemAllocNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *mut cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS,
+ nodeParams: *mut cuda_types::cuda::CUDA_MEM_ALLOC_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14119,8 +14135,8 @@ pub fn write_cuGraphAddMemAllocNode(
}
pub fn write_cuGraphMemAllocNodeGetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- params_out: *mut cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS,
+ hNode: cuda_types::cuda::CUgraphNode,
+ params_out: *mut cuda_types::cuda::CUDA_MEM_ALLOC_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14144,11 +14160,11 @@ pub fn write_cuGraphMemAllocNodeGetParams(
}
pub fn write_cuGraphAddMemFreeNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- dptr: cuda_types::CUdeviceptr,
+ dptr: cuda_types::cuda::CUdeviceptr,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14194,8 +14210,8 @@ pub fn write_cuGraphAddMemFreeNode(
}
pub fn write_cuGraphMemFreeNodeGetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- dptr_out: *mut cuda_types::CUdeviceptr,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dptr_out: *mut cuda_types::cuda::CUdeviceptr,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14219,7 +14235,7 @@ pub fn write_cuGraphMemFreeNodeGetParams(
}
pub fn write_cuDeviceGraphMemTrim(
writer: &mut (impl std::io::Write + ?Sized),
- device: cuda_types::CUdevice,
+ device: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14229,8 +14245,8 @@ pub fn write_cuDeviceGraphMemTrim(
}
pub fn write_cuDeviceGetGraphMemAttribute(
writer: &mut (impl std::io::Write + ?Sized),
- device: cuda_types::CUdevice,
- attr: cuda_types::CUgraphMem_attribute,
+ device: cuda_types::cuda::CUdevice,
+ attr: cuda_types::cuda::CUgraphMem_attribute,
value: *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14264,8 +14280,8 @@ pub fn write_cuDeviceGetGraphMemAttribute(
}
pub fn write_cuDeviceSetGraphMemAttribute(
writer: &mut (impl std::io::Write + ?Sized),
- device: cuda_types::CUdevice,
- attr: cuda_types::CUgraphMem_attribute,
+ device: cuda_types::cuda::CUdevice,
+ attr: cuda_types::cuda::CUgraphMem_attribute,
value: *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14299,8 +14315,8 @@ pub fn write_cuDeviceSetGraphMemAttribute(
}
pub fn write_cuGraphClone(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphClone: *mut cuda_types::CUgraph,
- originalGraph: cuda_types::CUgraph,
+ phGraphClone: *mut cuda_types::cuda::CUgraph,
+ originalGraph: cuda_types::cuda::CUgraph,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14314,9 +14330,9 @@ pub fn write_cuGraphClone(
}
pub fn write_cuGraphNodeFindInClone(
writer: &mut (impl std::io::Write + ?Sized),
- phNode: *mut cuda_types::CUgraphNode,
- hOriginalNode: cuda_types::CUgraphNode,
- hClonedGraph: cuda_types::CUgraph,
+ phNode: *mut cuda_types::cuda::CUgraphNode,
+ hOriginalNode: cuda_types::cuda::CUgraphNode,
+ hClonedGraph: cuda_types::cuda::CUgraph,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14349,8 +14365,8 @@ pub fn write_cuGraphNodeFindInClone(
}
pub fn write_cuGraphNodeGetType(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- type_: *mut cuda_types::CUgraphNodeType,
+ hNode: cuda_types::cuda::CUgraphNode,
+ type_: *mut cuda_types::cuda::CUgraphNodeType,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14364,8 +14380,8 @@ pub fn write_cuGraphNodeGetType(
}
pub fn write_cuGraphGetNodes(
writer: &mut (impl std::io::Write + ?Sized),
- hGraph: cuda_types::CUgraph,
- nodes: *mut cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ nodes: *mut cuda_types::cuda::CUgraphNode,
numNodes: *mut usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14384,8 +14400,8 @@ pub fn write_cuGraphGetNodes(
}
pub fn write_cuGraphGetRootNodes(
writer: &mut (impl std::io::Write + ?Sized),
- hGraph: cuda_types::CUgraph,
- rootNodes: *mut cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ rootNodes: *mut cuda_types::cuda::CUgraphNode,
numRootNodes: *mut usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14414,9 +14430,9 @@ pub fn write_cuGraphGetRootNodes(
}
pub fn write_cuGraphGetEdges(
writer: &mut (impl std::io::Write + ?Sized),
- hGraph: cuda_types::CUgraph,
- from: *mut cuda_types::CUgraphNode,
- to: *mut cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *mut cuda_types::cuda::CUgraphNode,
+ to: *mut cuda_types::cuda::CUgraphNode,
numEdges: *mut usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14439,10 +14455,10 @@ pub fn write_cuGraphGetEdges(
}
pub fn write_cuGraphGetEdges_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hGraph: cuda_types::CUgraph,
- from: *mut cuda_types::CUgraphNode,
- to: *mut cuda_types::CUgraphNode,
- edgeData: *mut cuda_types::CUgraphEdgeData,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *mut cuda_types::cuda::CUgraphNode,
+ to: *mut cuda_types::cuda::CUgraphNode,
+ edgeData: *mut cuda_types::cuda::CUgraphEdgeData,
numEdges: *mut usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14469,8 +14485,8 @@ pub fn write_cuGraphGetEdges_v2(
}
pub fn write_cuGraphNodeGetDependencies(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- dependencies: *mut cuda_types::CUgraphNode,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
numDependencies: *mut usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14504,9 +14520,9 @@ pub fn write_cuGraphNodeGetDependencies(
}
pub fn write_cuGraphNodeGetDependencies_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- dependencies: *mut cuda_types::CUgraphNode,
- edgeData: *mut cuda_types::CUgraphEdgeData,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
+ edgeData: *mut cuda_types::cuda::CUgraphEdgeData,
numDependencies: *mut usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14549,8 +14565,8 @@ pub fn write_cuGraphNodeGetDependencies_v2(
}
pub fn write_cuGraphNodeGetDependentNodes(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- dependentNodes: *mut cuda_types::CUgraphNode,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dependentNodes: *mut cuda_types::cuda::CUgraphNode,
numDependentNodes: *mut usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14584,9 +14600,9 @@ pub fn write_cuGraphNodeGetDependentNodes(
}
pub fn write_cuGraphNodeGetDependentNodes_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- dependentNodes: *mut cuda_types::CUgraphNode,
- edgeData: *mut cuda_types::CUgraphEdgeData,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dependentNodes: *mut cuda_types::cuda::CUgraphNode,
+ edgeData: *mut cuda_types::cuda::CUgraphEdgeData,
numDependentNodes: *mut usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14629,9 +14645,9 @@ pub fn write_cuGraphNodeGetDependentNodes_v2(
}
pub fn write_cuGraphAddDependencies(
writer: &mut (impl std::io::Write + ?Sized),
- hGraph: cuda_types::CUgraph,
- from: *const cuda_types::CUgraphNode,
- to: *const cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *const cuda_types::cuda::CUgraphNode,
+ to: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14664,10 +14680,10 @@ pub fn write_cuGraphAddDependencies(
}
pub fn write_cuGraphAddDependencies_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hGraph: cuda_types::CUgraph,
- from: *const cuda_types::CUgraphNode,
- to: *const cuda_types::CUgraphNode,
- edgeData: *const cuda_types::CUgraphEdgeData,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *const cuda_types::cuda::CUgraphNode,
+ to: *const cuda_types::cuda::CUgraphNode,
+ edgeData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14719,9 +14735,9 @@ pub fn write_cuGraphAddDependencies_v2(
}
pub fn write_cuGraphRemoveDependencies(
writer: &mut (impl std::io::Write + ?Sized),
- hGraph: cuda_types::CUgraph,
- from: *const cuda_types::CUgraphNode,
- to: *const cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *const cuda_types::cuda::CUgraphNode,
+ to: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14764,10 +14780,10 @@ pub fn write_cuGraphRemoveDependencies(
}
pub fn write_cuGraphRemoveDependencies_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hGraph: cuda_types::CUgraph,
- from: *const cuda_types::CUgraphNode,
- to: *const cuda_types::CUgraphNode,
- edgeData: *const cuda_types::CUgraphEdgeData,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *const cuda_types::cuda::CUgraphNode,
+ to: *const cuda_types::cuda::CUgraphNode,
+ edgeData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14819,7 +14835,7 @@ pub fn write_cuGraphRemoveDependencies_v2(
}
pub fn write_cuGraphDestroyNode(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
+ hNode: cuda_types::cuda::CUgraphNode,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14829,8 +14845,8 @@ pub fn write_cuGraphDestroyNode(
}
pub fn write_cuGraphInstantiateWithFlags(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
flags: ::core::ffi::c_ulonglong,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -14864,9 +14880,9 @@ pub fn write_cuGraphInstantiateWithFlags(
}
pub fn write_cuGraphInstantiateWithParams_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- instantiateParams: *mut cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS,
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ instantiateParams: *mut cuda_types::cuda::CUDA_GRAPH_INSTANTIATE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14899,8 +14915,8 @@ pub fn write_cuGraphInstantiateWithParams_ptsz(
}
pub fn write_cuGraphExecGetFlags(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- flags: *mut cuda_types::cuuint64_t,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ flags: *mut cuda_types::cuda::cuuint64_t,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14919,9 +14935,9 @@ pub fn write_cuGraphExecGetFlags(
}
pub fn write_cuGraphExecKernelNodeSetParams_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14954,10 +14970,10 @@ pub fn write_cuGraphExecKernelNodeSetParams_v2(
}
pub fn write_cuGraphExecMemcpyNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- copyParams: *const cuda_types::CUDA_MEMCPY3D,
- ctx: cuda_types::CUcontext,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ copyParams: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ ctx: cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -14999,10 +15015,10 @@ pub fn write_cuGraphExecMemcpyNodeSetParams(
}
pub fn write_cuGraphExecMemsetNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- memsetParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS,
- ctx: cuda_types::CUcontext,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ memsetParams: *const cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS,
+ ctx: cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15044,9 +15060,9 @@ pub fn write_cuGraphExecMemsetNodeSetParams(
}
pub fn write_cuGraphExecHostNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_HOST_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15079,9 +15095,9 @@ pub fn write_cuGraphExecHostNodeSetParams(
}
pub fn write_cuGraphExecChildGraphNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- childGraph: cuda_types::CUgraph,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ childGraph: cuda_types::cuda::CUgraph,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15114,9 +15130,9 @@ pub fn write_cuGraphExecChildGraphNodeSetParams(
}
pub fn write_cuGraphExecEventRecordNodeSetEvent(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- event: cuda_types::CUevent,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ event: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15149,9 +15165,9 @@ pub fn write_cuGraphExecEventRecordNodeSetEvent(
}
pub fn write_cuGraphExecEventWaitNodeSetEvent(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- event: cuda_types::CUevent,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ event: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15184,9 +15200,9 @@ pub fn write_cuGraphExecEventWaitNodeSetEvent(
}
pub fn write_cuGraphExecExternalSemaphoresSignalNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15219,9 +15235,9 @@ pub fn write_cuGraphExecExternalSemaphoresSignalNodeSetParams(
}
pub fn write_cuGraphExecExternalSemaphoresWaitNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15254,8 +15270,8 @@ pub fn write_cuGraphExecExternalSemaphoresWaitNodeSetParams(
}
pub fn write_cuGraphNodeSetEnabled(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
isEnabled: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -15284,8 +15300,8 @@ pub fn write_cuGraphNodeSetEnabled(
}
pub fn write_cuGraphNodeGetEnabled(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
isEnabled: *mut ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -15314,8 +15330,8 @@ pub fn write_cuGraphNodeGetEnabled(
}
pub fn write_cuGraphUpload_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hStream: cuda_types::CUstream,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15334,8 +15350,8 @@ pub fn write_cuGraphUpload_ptsz(
}
pub fn write_cuGraphLaunch_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hStream: cuda_types::CUstream,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15354,7 +15370,7 @@ pub fn write_cuGraphLaunch_ptsz(
}
pub fn write_cuGraphExecDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15369,7 +15385,7 @@ pub fn write_cuGraphExecDestroy(
}
pub fn write_cuGraphDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- hGraph: cuda_types::CUgraph,
+ hGraph: cuda_types::cuda::CUgraph,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15379,9 +15395,9 @@ pub fn write_cuGraphDestroy(
}
pub fn write_cuGraphExecUpdate_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- resultInfo: *mut cuda_types::CUgraphExecUpdateResultInfo,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ resultInfo: *mut cuda_types::cuda::CUgraphExecUpdateResultInfo,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15409,8 +15425,8 @@ pub fn write_cuGraphExecUpdate_v2(
}
pub fn write_cuGraphKernelNodeCopyAttributes(
writer: &mut (impl std::io::Write + ?Sized),
- dst: cuda_types::CUgraphNode,
- src: cuda_types::CUgraphNode,
+ dst: cuda_types::cuda::CUgraphNode,
+ src: cuda_types::cuda::CUgraphNode,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15434,7 +15450,7 @@ pub fn write_cuGraphKernelNodeCopyAttributes(
}
pub fn write_cuGraphDebugDotPrint(
writer: &mut (impl std::io::Write + ?Sized),
- hGraph: cuda_types::CUgraph,
+ hGraph: cuda_types::cuda::CUgraph,
path: *const ::core::ffi::c_char,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -15454,9 +15470,9 @@ pub fn write_cuGraphDebugDotPrint(
}
pub fn write_cuUserObjectCreate(
writer: &mut (impl std::io::Write + ?Sized),
- object_out: *mut cuda_types::CUuserObject,
+ object_out: *mut cuda_types::cuda::CUuserObject,
ptr: *mut ::core::ffi::c_void,
- destroy: cuda_types::CUhostFn,
+ destroy: cuda_types::cuda::CUhostFn,
initialRefcount: ::core::ffi::c_uint,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -15494,7 +15510,7 @@ pub fn write_cuUserObjectCreate(
}
pub fn write_cuUserObjectRetain(
writer: &mut (impl std::io::Write + ?Sized),
- object: cuda_types::CUuserObject,
+ object: cuda_types::cuda::CUuserObject,
count: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -15509,7 +15525,7 @@ pub fn write_cuUserObjectRetain(
}
pub fn write_cuUserObjectRelease(
writer: &mut (impl std::io::Write + ?Sized),
- object: cuda_types::CUuserObject,
+ object: cuda_types::cuda::CUuserObject,
count: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -15524,8 +15540,8 @@ pub fn write_cuUserObjectRelease(
}
pub fn write_cuGraphRetainUserObject(
writer: &mut (impl std::io::Write + ?Sized),
- graph: cuda_types::CUgraph,
- object: cuda_types::CUuserObject,
+ graph: cuda_types::cuda::CUgraph,
+ object: cuda_types::cuda::CUuserObject,
count: ::core::ffi::c_uint,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -15569,8 +15585,8 @@ pub fn write_cuGraphRetainUserObject(
}
pub fn write_cuGraphReleaseUserObject(
writer: &mut (impl std::io::Write + ?Sized),
- graph: cuda_types::CUgraph,
- object: cuda_types::CUuserObject,
+ graph: cuda_types::cuda::CUgraph,
+ object: cuda_types::cuda::CUuserObject,
count: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -15604,11 +15620,11 @@ pub fn write_cuGraphReleaseUserObject(
}
pub fn write_cuGraphAddNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *mut cuda_types::CUgraphNodeParams,
+ nodeParams: *mut cuda_types::cuda::CUgraphNodeParams,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15639,12 +15655,12 @@ pub fn write_cuGraphAddNode(
}
pub fn write_cuGraphAddNode_v2(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- nodeParams: *mut cuda_types::CUgraphNodeParams,
+ nodeParams: *mut cuda_types::cuda::CUgraphNodeParams,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15699,8 +15715,8 @@ pub fn write_cuGraphAddNode_v2(
}
pub fn write_cuGraphNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUgraphNodeParams,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUgraphNodeParams,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15719,9 +15735,9 @@ pub fn write_cuGraphNodeSetParams(
}
pub fn write_cuGraphExecNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUgraphNodeParams,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUgraphNodeParams,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -15754,9 +15770,9 @@ pub fn write_cuGraphExecNodeSetParams(
}
pub fn write_cuGraphConditionalHandleCreate(
writer: &mut (impl std::io::Write + ?Sized),
- pHandle_out: *mut cuda_types::CUgraphConditionalHandle,
- hGraph: cuda_types::CUgraph,
- ctx: cuda_types::CUcontext,
+ pHandle_out: *mut cuda_types::cuda::CUgraphConditionalHandle,
+ hGraph: cuda_types::cuda::CUgraph,
+ ctx: cuda_types::cuda::CUcontext,
defaultLaunchValue: ::core::ffi::c_uint,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -15810,7 +15826,7 @@ pub fn write_cuGraphConditionalHandleCreate(
pub fn write_cuOccupancyMaxActiveBlocksPerMultiprocessor(
writer: &mut (impl std::io::Write + ?Sized),
numBlocks: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
+ func: cuda_types::cuda::CUfunction,
blockSize: ::core::ffi::c_int,
dynamicSMemSize: usize,
) -> std::io::Result<()> {
@@ -15855,7 +15871,7 @@ pub fn write_cuOccupancyMaxActiveBlocksPerMultiprocessor(
pub fn write_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
writer: &mut (impl std::io::Write + ?Sized),
numBlocks: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
+ func: cuda_types::cuda::CUfunction,
blockSize: ::core::ffi::c_int,
dynamicSMemSize: usize,
flags: ::core::ffi::c_uint,
@@ -15911,8 +15927,8 @@ pub fn write_cuOccupancyMaxPotentialBlockSize(
writer: &mut (impl std::io::Write + ?Sized),
minGridSize: *mut ::core::ffi::c_int,
blockSize: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
- blockSizeToDynamicSMemSize: cuda_types::CUoccupancyB2DSize,
+ func: cuda_types::cuda::CUfunction,
+ blockSizeToDynamicSMemSize: cuda_types::cuda::CUoccupancyB2DSize,
dynamicSMemSize: usize,
blockSizeLimit: ::core::ffi::c_int,
) -> std::io::Result<()> {
@@ -15976,8 +15992,8 @@ pub fn write_cuOccupancyMaxPotentialBlockSizeWithFlags(
writer: &mut (impl std::io::Write + ?Sized),
minGridSize: *mut ::core::ffi::c_int,
blockSize: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
- blockSizeToDynamicSMemSize: cuda_types::CUoccupancyB2DSize,
+ func: cuda_types::cuda::CUfunction,
+ blockSizeToDynamicSMemSize: cuda_types::cuda::CUoccupancyB2DSize,
dynamicSMemSize: usize,
blockSizeLimit: ::core::ffi::c_int,
flags: ::core::ffi::c_uint,
@@ -16050,7 +16066,7 @@ pub fn write_cuOccupancyMaxPotentialBlockSizeWithFlags(
pub fn write_cuOccupancyAvailableDynamicSMemPerBlock(
writer: &mut (impl std::io::Write + ?Sized),
dynamicSmemSize: *mut usize,
- func: cuda_types::CUfunction,
+ func: cuda_types::cuda::CUfunction,
numBlocks: ::core::ffi::c_int,
blockSize: ::core::ffi::c_int,
) -> std::io::Result<()> {
@@ -16095,8 +16111,8 @@ pub fn write_cuOccupancyAvailableDynamicSMemPerBlock(
pub fn write_cuOccupancyMaxPotentialClusterSize(
writer: &mut (impl std::io::Write + ?Sized),
clusterSize: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
- config: *const cuda_types::CUlaunchConfig,
+ func: cuda_types::cuda::CUfunction,
+ config: *const cuda_types::cuda::CUlaunchConfig,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16130,8 +16146,8 @@ pub fn write_cuOccupancyMaxPotentialClusterSize(
pub fn write_cuOccupancyMaxActiveClusters(
writer: &mut (impl std::io::Write + ?Sized),
numClusters: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
- config: *const cuda_types::CUlaunchConfig,
+ func: cuda_types::cuda::CUfunction,
+ config: *const cuda_types::cuda::CUlaunchConfig,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16164,8 +16180,8 @@ pub fn write_cuOccupancyMaxActiveClusters(
}
pub fn write_cuTexRefSetArray(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
- hArray: cuda_types::CUarray,
+ hTexRef: cuda_types::cuda::CUtexref,
+ hArray: cuda_types::cuda::CUarray,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -16184,8 +16200,8 @@ pub fn write_cuTexRefSetArray(
}
pub fn write_cuTexRefSetMipmappedArray(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
- hMipmappedArray: cuda_types::CUmipmappedArray,
+ hTexRef: cuda_types::cuda::CUtexref,
+ hMipmappedArray: cuda_types::cuda::CUmipmappedArray,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -16220,8 +16236,8 @@ pub fn write_cuTexRefSetMipmappedArray(
pub fn write_cuTexRefSetAddress_v2(
writer: &mut (impl std::io::Write + ?Sized),
ByteOffset: *mut usize,
- hTexRef: cuda_types::CUtexref,
- dptr: cuda_types::CUdeviceptr,
+ hTexRef: cuda_types::cuda::CUtexref,
+ dptr: cuda_types::cuda::CUdeviceptr,
bytes: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -16254,9 +16270,9 @@ pub fn write_cuTexRefSetAddress_v2(
}
pub fn write_cuTexRefSetAddress2D_v3(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
- desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR,
- dptr: cuda_types::CUdeviceptr,
+ hTexRef: cuda_types::cuda::CUtexref,
+ desc: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR,
+ dptr: cuda_types::cuda::CUdeviceptr,
Pitch: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -16299,8 +16315,8 @@ pub fn write_cuTexRefSetAddress2D_v3(
}
pub fn write_cuTexRefSetFormat(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
- fmt: cuda_types::CUarray_format,
+ hTexRef: cuda_types::cuda::CUtexref,
+ fmt: cuda_types::cuda::CUarray_format,
NumPackedComponents: ::core::ffi::c_int,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -16324,9 +16340,9 @@ pub fn write_cuTexRefSetFormat(
}
pub fn write_cuTexRefSetAddressMode(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
dim: ::core::ffi::c_int,
- am: cuda_types::CUaddress_mode,
+ am: cuda_types::cuda::CUaddress_mode,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16349,8 +16365,8 @@ pub fn write_cuTexRefSetAddressMode(
}
pub fn write_cuTexRefSetFilterMode(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
- fm: cuda_types::CUfilter_mode,
+ hTexRef: cuda_types::cuda::CUtexref,
+ fm: cuda_types::cuda::CUfilter_mode,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16369,8 +16385,8 @@ pub fn write_cuTexRefSetFilterMode(
}
pub fn write_cuTexRefSetMipmapFilterMode(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
- fm: cuda_types::CUfilter_mode,
+ hTexRef: cuda_types::cuda::CUtexref,
+ fm: cuda_types::cuda::CUfilter_mode,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16394,7 +16410,7 @@ pub fn write_cuTexRefSetMipmapFilterMode(
}
pub fn write_cuTexRefSetMipmapLevelBias(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
bias: f32,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -16419,7 +16435,7 @@ pub fn write_cuTexRefSetMipmapLevelBias(
}
pub fn write_cuTexRefSetMipmapLevelClamp(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
minMipmapLevelClamp: f32,
maxMipmapLevelClamp: f32,
) -> std::io::Result<()> {
@@ -16454,7 +16470,7 @@ pub fn write_cuTexRefSetMipmapLevelClamp(
}
pub fn write_cuTexRefSetMaxAnisotropy(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
maxAniso: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -16479,7 +16495,7 @@ pub fn write_cuTexRefSetMaxAnisotropy(
}
pub fn write_cuTexRefSetBorderColor(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
pBorderColor: *mut f32,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -16504,7 +16520,7 @@ pub fn write_cuTexRefSetBorderColor(
}
pub fn write_cuTexRefSetFlags(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -16519,8 +16535,8 @@ pub fn write_cuTexRefSetFlags(
}
pub fn write_cuTexRefGetAddress_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pdptr: *mut cuda_types::CUdeviceptr,
- hTexRef: cuda_types::CUtexref,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16539,8 +16555,8 @@ pub fn write_cuTexRefGetAddress_v2(
}
pub fn write_cuTexRefGetArray(
writer: &mut (impl std::io::Write + ?Sized),
- phArray: *mut cuda_types::CUarray,
- hTexRef: cuda_types::CUtexref,
+ phArray: *mut cuda_types::cuda::CUarray,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16554,8 +16570,8 @@ pub fn write_cuTexRefGetArray(
}
pub fn write_cuTexRefGetMipmappedArray(
writer: &mut (impl std::io::Write + ?Sized),
- phMipmappedArray: *mut cuda_types::CUmipmappedArray,
- hTexRef: cuda_types::CUtexref,
+ phMipmappedArray: *mut cuda_types::cuda::CUmipmappedArray,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16579,8 +16595,8 @@ pub fn write_cuTexRefGetMipmappedArray(
}
pub fn write_cuTexRefGetAddressMode(
writer: &mut (impl std::io::Write + ?Sized),
- pam: *mut cuda_types::CUaddress_mode,
- hTexRef: cuda_types::CUtexref,
+ pam: *mut cuda_types::cuda::CUaddress_mode,
+ hTexRef: cuda_types::cuda::CUtexref,
dim: ::core::ffi::c_int,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -16604,8 +16620,8 @@ pub fn write_cuTexRefGetAddressMode(
}
pub fn write_cuTexRefGetFilterMode(
writer: &mut (impl std::io::Write + ?Sized),
- pfm: *mut cuda_types::CUfilter_mode,
- hTexRef: cuda_types::CUtexref,
+ pfm: *mut cuda_types::cuda::CUfilter_mode,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16624,9 +16640,9 @@ pub fn write_cuTexRefGetFilterMode(
}
pub fn write_cuTexRefGetFormat(
writer: &mut (impl std::io::Write + ?Sized),
- pFormat: *mut cuda_types::CUarray_format,
+ pFormat: *mut cuda_types::cuda::CUarray_format,
pNumChannels: *mut ::core::ffi::c_int,
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16649,8 +16665,8 @@ pub fn write_cuTexRefGetFormat(
}
pub fn write_cuTexRefGetMipmapFilterMode(
writer: &mut (impl std::io::Write + ?Sized),
- pfm: *mut cuda_types::CUfilter_mode,
- hTexRef: cuda_types::CUtexref,
+ pfm: *mut cuda_types::cuda::CUfilter_mode,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16675,7 +16691,7 @@ pub fn write_cuTexRefGetMipmapFilterMode(
pub fn write_cuTexRefGetMipmapLevelBias(
writer: &mut (impl std::io::Write + ?Sized),
pbias: *mut f32,
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16701,7 +16717,7 @@ pub fn write_cuTexRefGetMipmapLevelClamp(
writer: &mut (impl std::io::Write + ?Sized),
pminMipmapLevelClamp: *mut f32,
pmaxMipmapLevelClamp: *mut f32,
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16735,7 +16751,7 @@ pub fn write_cuTexRefGetMipmapLevelClamp(
pub fn write_cuTexRefGetMaxAnisotropy(
writer: &mut (impl std::io::Write + ?Sized),
pmaxAniso: *mut ::core::ffi::c_int,
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16760,7 +16776,7 @@ pub fn write_cuTexRefGetMaxAnisotropy(
pub fn write_cuTexRefGetBorderColor(
writer: &mut (impl std::io::Write + ?Sized),
pBorderColor: *mut f32,
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16785,7 +16801,7 @@ pub fn write_cuTexRefGetBorderColor(
pub fn write_cuTexRefGetFlags(
writer: &mut (impl std::io::Write + ?Sized),
pFlags: *mut ::core::ffi::c_uint,
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16799,7 +16815,7 @@ pub fn write_cuTexRefGetFlags(
}
pub fn write_cuTexRefCreate(
writer: &mut (impl std::io::Write + ?Sized),
- pTexRef: *mut cuda_types::CUtexref,
+ pTexRef: *mut cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16809,7 +16825,7 @@ pub fn write_cuTexRefCreate(
}
pub fn write_cuTexRefDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16819,8 +16835,8 @@ pub fn write_cuTexRefDestroy(
}
pub fn write_cuSurfRefSetArray(
writer: &mut (impl std::io::Write + ?Sized),
- hSurfRef: cuda_types::CUsurfref,
- hArray: cuda_types::CUarray,
+ hSurfRef: cuda_types::cuda::CUsurfref,
+ hArray: cuda_types::cuda::CUarray,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -16839,8 +16855,8 @@ pub fn write_cuSurfRefSetArray(
}
pub fn write_cuSurfRefGetArray(
writer: &mut (impl std::io::Write + ?Sized),
- phArray: *mut cuda_types::CUarray,
- hSurfRef: cuda_types::CUsurfref,
+ phArray: *mut cuda_types::cuda::CUarray,
+ hSurfRef: cuda_types::cuda::CUsurfref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16854,10 +16870,10 @@ pub fn write_cuSurfRefGetArray(
}
pub fn write_cuTexObjectCreate(
writer: &mut (impl std::io::Write + ?Sized),
- pTexObject: *mut cuda_types::CUtexObject,
- pResDesc: *const cuda_types::CUDA_RESOURCE_DESC,
- pTexDesc: *const cuda_types::CUDA_TEXTURE_DESC,
- pResViewDesc: *const cuda_types::CUDA_RESOURCE_VIEW_DESC,
+ pTexObject: *mut cuda_types::cuda::CUtexObject,
+ pResDesc: *const cuda_types::cuda::CUDA_RESOURCE_DESC,
+ pTexDesc: *const cuda_types::cuda::CUDA_TEXTURE_DESC,
+ pResViewDesc: *const cuda_types::cuda::CUDA_RESOURCE_VIEW_DESC,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16889,7 +16905,7 @@ pub fn write_cuTexObjectCreate(
}
pub fn write_cuTexObjectDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- texObject: cuda_types::CUtexObject,
+ texObject: cuda_types::cuda::CUtexObject,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16904,8 +16920,8 @@ pub fn write_cuTexObjectDestroy(
}
pub fn write_cuTexObjectGetResourceDesc(
writer: &mut (impl std::io::Write + ?Sized),
- pResDesc: *mut cuda_types::CUDA_RESOURCE_DESC,
- texObject: cuda_types::CUtexObject,
+ pResDesc: *mut cuda_types::cuda::CUDA_RESOURCE_DESC,
+ texObject: cuda_types::cuda::CUtexObject,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16929,8 +16945,8 @@ pub fn write_cuTexObjectGetResourceDesc(
}
pub fn write_cuTexObjectGetTextureDesc(
writer: &mut (impl std::io::Write + ?Sized),
- pTexDesc: *mut cuda_types::CUDA_TEXTURE_DESC,
- texObject: cuda_types::CUtexObject,
+ pTexDesc: *mut cuda_types::cuda::CUDA_TEXTURE_DESC,
+ texObject: cuda_types::cuda::CUtexObject,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16954,8 +16970,8 @@ pub fn write_cuTexObjectGetTextureDesc(
}
pub fn write_cuTexObjectGetResourceViewDesc(
writer: &mut (impl std::io::Write + ?Sized),
- pResViewDesc: *mut cuda_types::CUDA_RESOURCE_VIEW_DESC,
- texObject: cuda_types::CUtexObject,
+ pResViewDesc: *mut cuda_types::cuda::CUDA_RESOURCE_VIEW_DESC,
+ texObject: cuda_types::cuda::CUtexObject,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16979,8 +16995,8 @@ pub fn write_cuTexObjectGetResourceViewDesc(
}
pub fn write_cuSurfObjectCreate(
writer: &mut (impl std::io::Write + ?Sized),
- pSurfObject: *mut cuda_types::CUsurfObject,
- pResDesc: *const cuda_types::CUDA_RESOURCE_DESC,
+ pSurfObject: *mut cuda_types::cuda::CUsurfObject,
+ pResDesc: *const cuda_types::cuda::CUDA_RESOURCE_DESC,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -16999,7 +17015,7 @@ pub fn write_cuSurfObjectCreate(
}
pub fn write_cuSurfObjectDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- surfObject: cuda_types::CUsurfObject,
+ surfObject: cuda_types::cuda::CUsurfObject,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17014,8 +17030,8 @@ pub fn write_cuSurfObjectDestroy(
}
pub fn write_cuSurfObjectGetResourceDesc(
writer: &mut (impl std::io::Write + ?Sized),
- pResDesc: *mut cuda_types::CUDA_RESOURCE_DESC,
- surfObject: cuda_types::CUsurfObject,
+ pResDesc: *mut cuda_types::cuda::CUDA_RESOURCE_DESC,
+ surfObject: cuda_types::cuda::CUsurfObject,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17039,18 +17055,18 @@ pub fn write_cuSurfObjectGetResourceDesc(
}
pub fn write_cuTensorMapEncodeTiled(
writer: &mut (impl std::io::Write + ?Sized),
- tensorMap: *mut cuda_types::CUtensorMap,
- tensorDataType: cuda_types::CUtensorMapDataType,
- tensorRank: cuda_types::cuuint32_t,
+ tensorMap: *mut cuda_types::cuda::CUtensorMap,
+ tensorDataType: cuda_types::cuda::CUtensorMapDataType,
+ tensorRank: cuda_types::cuda::cuuint32_t,
globalAddress: *mut ::core::ffi::c_void,
- globalDim: *const cuda_types::cuuint64_t,
- globalStrides: *const cuda_types::cuuint64_t,
- boxDim: *const cuda_types::cuuint32_t,
- elementStrides: *const cuda_types::cuuint32_t,
- interleave: cuda_types::CUtensorMapInterleave,
- swizzle: cuda_types::CUtensorMapSwizzle,
- l2Promotion: cuda_types::CUtensorMapL2promotion,
- oobFill: cuda_types::CUtensorMapFloatOOBfill,
+ globalDim: *const cuda_types::cuda::cuuint64_t,
+ globalStrides: *const cuda_types::cuda::cuuint64_t,
+ boxDim: *const cuda_types::cuda::cuuint32_t,
+ elementStrides: *const cuda_types::cuda::cuuint32_t,
+ interleave: cuda_types::cuda::CUtensorMapInterleave,
+ swizzle: cuda_types::cuda::CUtensorMapSwizzle,
+ l2Promotion: cuda_types::cuda::CUtensorMapL2promotion,
+ oobFill: cuda_types::cuda::CUtensorMapFloatOOBfill,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17164,21 +17180,21 @@ pub fn write_cuTensorMapEncodeTiled(
}
pub fn write_cuTensorMapEncodeIm2col(
writer: &mut (impl std::io::Write + ?Sized),
- tensorMap: *mut cuda_types::CUtensorMap,
- tensorDataType: cuda_types::CUtensorMapDataType,
- tensorRank: cuda_types::cuuint32_t,
+ tensorMap: *mut cuda_types::cuda::CUtensorMap,
+ tensorDataType: cuda_types::cuda::CUtensorMapDataType,
+ tensorRank: cuda_types::cuda::cuuint32_t,
globalAddress: *mut ::core::ffi::c_void,
- globalDim: *const cuda_types::cuuint64_t,
- globalStrides: *const cuda_types::cuuint64_t,
+ globalDim: *const cuda_types::cuda::cuuint64_t,
+ globalStrides: *const cuda_types::cuda::cuuint64_t,
pixelBoxLowerCorner: *const ::core::ffi::c_int,
pixelBoxUpperCorner: *const ::core::ffi::c_int,
- channelsPerPixel: cuda_types::cuuint32_t,
- pixelsPerColumn: cuda_types::cuuint32_t,
- elementStrides: *const cuda_types::cuuint32_t,
- interleave: cuda_types::CUtensorMapInterleave,
- swizzle: cuda_types::CUtensorMapSwizzle,
- l2Promotion: cuda_types::CUtensorMapL2promotion,
- oobFill: cuda_types::CUtensorMapFloatOOBfill,
+ channelsPerPixel: cuda_types::cuda::cuuint32_t,
+ pixelsPerColumn: cuda_types::cuda::cuuint32_t,
+ elementStrides: *const cuda_types::cuda::cuuint32_t,
+ interleave: cuda_types::cuda::CUtensorMapInterleave,
+ swizzle: cuda_types::cuda::CUtensorMapSwizzle,
+ l2Promotion: cuda_types::cuda::CUtensorMapL2promotion,
+ oobFill: cuda_types::cuda::CUtensorMapFloatOOBfill,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17319,7 +17335,7 @@ pub fn write_cuTensorMapEncodeIm2col(
}
pub fn write_cuTensorMapReplaceAddress(
writer: &mut (impl std::io::Write + ?Sized),
- tensorMap: *mut cuda_types::CUtensorMap,
+ tensorMap: *mut cuda_types::cuda::CUtensorMap,
globalAddress: *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -17345,8 +17361,8 @@ pub fn write_cuTensorMapReplaceAddress(
pub fn write_cuDeviceCanAccessPeer(
writer: &mut (impl std::io::Write + ?Sized),
canAccessPeer: *mut ::core::ffi::c_int,
- dev: cuda_types::CUdevice,
- peerDev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
+ peerDev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17374,7 +17390,7 @@ pub fn write_cuDeviceCanAccessPeer(
}
pub fn write_cuCtxEnablePeerAccess(
writer: &mut (impl std::io::Write + ?Sized),
- peerContext: cuda_types::CUcontext,
+ peerContext: cuda_types::cuda::CUcontext,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -17394,7 +17410,7 @@ pub fn write_cuCtxEnablePeerAccess(
}
pub fn write_cuCtxDisablePeerAccess(
writer: &mut (impl std::io::Write + ?Sized),
- peerContext: cuda_types::CUcontext,
+ peerContext: cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17410,9 +17426,9 @@ pub fn write_cuCtxDisablePeerAccess(
pub fn write_cuDeviceGetP2PAttribute(
writer: &mut (impl std::io::Write + ?Sized),
value: *mut ::core::ffi::c_int,
- attrib: cuda_types::CUdevice_P2PAttribute,
- srcDevice: cuda_types::CUdevice,
- dstDevice: cuda_types::CUdevice,
+ attrib: cuda_types::cuda::CUdevice_P2PAttribute,
+ srcDevice: cuda_types::cuda::CUdevice,
+ dstDevice: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17454,7 +17470,7 @@ pub fn write_cuDeviceGetP2PAttribute(
}
pub fn write_cuGraphicsUnregisterResource(
writer: &mut (impl std::io::Write + ?Sized),
- resource: cuda_types::CUgraphicsResource,
+ resource: cuda_types::cuda::CUgraphicsResource,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17469,8 +17485,8 @@ pub fn write_cuGraphicsUnregisterResource(
}
pub fn write_cuGraphicsSubResourceGetMappedArray(
writer: &mut (impl std::io::Write + ?Sized),
- pArray: *mut cuda_types::CUarray,
- resource: cuda_types::CUgraphicsResource,
+ pArray: *mut cuda_types::cuda::CUarray,
+ resource: cuda_types::cuda::CUgraphicsResource,
arrayIndex: ::core::ffi::c_uint,
mipLevel: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -17514,8 +17530,8 @@ pub fn write_cuGraphicsSubResourceGetMappedArray(
}
pub fn write_cuGraphicsResourceGetMappedMipmappedArray(
writer: &mut (impl std::io::Write + ?Sized),
- pMipmappedArray: *mut cuda_types::CUmipmappedArray,
- resource: cuda_types::CUgraphicsResource,
+ pMipmappedArray: *mut cuda_types::cuda::CUmipmappedArray,
+ resource: cuda_types::cuda::CUgraphicsResource,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17539,9 +17555,9 @@ pub fn write_cuGraphicsResourceGetMappedMipmappedArray(
}
pub fn write_cuGraphicsResourceGetMappedPointer_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pDevPtr: *mut cuda_types::CUdeviceptr,
+ pDevPtr: *mut cuda_types::cuda::CUdeviceptr,
pSize: *mut usize,
- resource: cuda_types::CUgraphicsResource,
+ resource: cuda_types::cuda::CUgraphicsResource,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17574,7 +17590,7 @@ pub fn write_cuGraphicsResourceGetMappedPointer_v2(
}
pub fn write_cuGraphicsResourceSetMapFlags_v2(
writer: &mut (impl std::io::Write + ?Sized),
- resource: cuda_types::CUgraphicsResource,
+ resource: cuda_types::cuda::CUgraphicsResource,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -17600,8 +17616,8 @@ pub fn write_cuGraphicsResourceSetMapFlags_v2(
pub fn write_cuGraphicsMapResources_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
count: ::core::ffi::c_uint,
- resources: *mut cuda_types::CUgraphicsResource,
- hStream: cuda_types::CUstream,
+ resources: *mut cuda_types::cuda::CUgraphicsResource,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17635,8 +17651,8 @@ pub fn write_cuGraphicsMapResources_ptsz(
pub fn write_cuGraphicsUnmapResources_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
count: ::core::ffi::c_uint,
- resources: *mut cuda_types::CUgraphicsResource,
- hStream: cuda_types::CUstream,
+ resources: *mut cuda_types::cuda::CUgraphicsResource,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17672,8 +17688,8 @@ pub fn write_cuGetProcAddress_v2(
symbol: *const ::core::ffi::c_char,
pfn: *mut *mut ::core::ffi::c_void,
cudaVersion: ::core::ffi::c_int,
- flags: cuda_types::cuuint64_t,
- symbolStatus: *mut cuda_types::CUdriverProcAddressQueryResult,
+ flags: cuda_types::cuda::cuuint64_t,
+ symbolStatus: *mut cuda_types::cuda::CUdriverProcAddressQueryResult,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17707,7 +17723,7 @@ pub fn write_cuGetProcAddress_v2(
)?;
writer.write_all(b")")
}
-impl crate::format::CudaDisplay for cuda_types::CUcoredumpSettings_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUcoredumpSettings_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -17715,25 +17731,25 @@ impl crate::format::CudaDisplay for cuda_types::CUcoredumpSettings_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_ENABLE_ON_EXCEPTION => {
+ &cuda_types::cuda::CUcoredumpSettings_enum::CU_COREDUMP_ENABLE_ON_EXCEPTION => {
writer.write_all(stringify!(CU_COREDUMP_ENABLE_ON_EXCEPTION).as_bytes())
}
- &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_TRIGGER_HOST => {
+ &cuda_types::cuda::CUcoredumpSettings_enum::CU_COREDUMP_TRIGGER_HOST => {
writer.write_all(stringify!(CU_COREDUMP_TRIGGER_HOST).as_bytes())
}
- &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_LIGHTWEIGHT => {
+ &cuda_types::cuda::CUcoredumpSettings_enum::CU_COREDUMP_LIGHTWEIGHT => {
writer.write_all(stringify!(CU_COREDUMP_LIGHTWEIGHT).as_bytes())
}
- &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_ENABLE_USER_TRIGGER => {
+ &cuda_types::cuda::CUcoredumpSettings_enum::CU_COREDUMP_ENABLE_USER_TRIGGER => {
writer.write_all(stringify!(CU_COREDUMP_ENABLE_USER_TRIGGER).as_bytes())
}
- &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_FILE => {
+ &cuda_types::cuda::CUcoredumpSettings_enum::CU_COREDUMP_FILE => {
writer.write_all(stringify!(CU_COREDUMP_FILE).as_bytes())
}
- &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_PIPE => {
+ &cuda_types::cuda::CUcoredumpSettings_enum::CU_COREDUMP_PIPE => {
writer.write_all(stringify!(CU_COREDUMP_PIPE).as_bytes())
}
- &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_MAX => {
+ &cuda_types::cuda::CUcoredumpSettings_enum::CU_COREDUMP_MAX => {
writer.write_all(stringify!(CU_COREDUMP_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
@@ -17742,7 +17758,7 @@ impl crate::format::CudaDisplay for cuda_types::CUcoredumpSettings_enum {
}
pub fn write_cuCoredumpGetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
- attrib: cuda_types::CUcoredumpSettings,
+ attrib: cuda_types::cuda::CUcoredumpSettings,
value: *mut ::core::ffi::c_void,
size: *mut usize,
) -> std::io::Result<()> {
@@ -17772,7 +17788,7 @@ pub fn write_cuCoredumpGetAttribute(
}
pub fn write_cuCoredumpGetAttributeGlobal(
writer: &mut (impl std::io::Write + ?Sized),
- attrib: cuda_types::CUcoredumpSettings,
+ attrib: cuda_types::cuda::CUcoredumpSettings,
value: *mut ::core::ffi::c_void,
size: *mut usize,
) -> std::io::Result<()> {
@@ -17807,7 +17823,7 @@ pub fn write_cuCoredumpGetAttributeGlobal(
}
pub fn write_cuCoredumpSetAttribute(
writer: &mut (impl std::io::Write + ?Sized),
- attrib: cuda_types::CUcoredumpSettings,
+ attrib: cuda_types::cuda::CUcoredumpSettings,
value: *mut ::core::ffi::c_void,
size: *mut usize,
) -> std::io::Result<()> {
@@ -17837,7 +17853,7 @@ pub fn write_cuCoredumpSetAttribute(
}
pub fn write_cuCoredumpSetAttributeGlobal(
writer: &mut (impl std::io::Write + ?Sized),
- attrib: cuda_types::CUcoredumpSettings,
+ attrib: cuda_types::cuda::CUcoredumpSettings,
value: *mut ::core::ffi::c_void,
size: *mut usize,
) -> std::io::Result<()> {
@@ -17873,7 +17889,7 @@ pub fn write_cuCoredumpSetAttributeGlobal(
pub fn write_cuGetExportTable(
writer: &mut (impl std::io::Write + ?Sized),
ppExportTable: *mut *const ::core::ffi::c_void,
- pExportTableId: *const cuda_types::CUuuid,
+ pExportTableId: *const cuda_types::cuda::CUuuid,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -17895,7 +17911,7 @@ pub fn write_cuGetExportTable(
)?;
writer.write_all(b")")
}
-impl crate::format::CudaDisplay for cuda_types::CUgreenCtx {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgreenCtx {
fn write(
&self,
_fn_name: &'static str,
@@ -17905,7 +17921,7 @@ impl crate::format::CudaDisplay for cuda_types::CUgreenCtx {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUdevResourceDesc {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUdevResourceDesc {
fn write(
&self,
_fn_name: &'static str,
@@ -17915,7 +17931,7 @@ impl crate::format::CudaDisplay for cuda_types::CUdevResourceDesc {
write!(writer, "{:p}", *self)
}
}
-impl crate::format::CudaDisplay for cuda_types::CUgreenCtxCreate_flags {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUgreenCtxCreate_flags {
fn write(
&self,
_fn_name: &'static str,
@@ -17923,14 +17939,14 @@ impl crate::format::CudaDisplay for cuda_types::CUgreenCtxCreate_flags {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUgreenCtxCreate_flags::CU_GREEN_CTX_DEFAULT_STREAM => {
+ &cuda_types::cuda::CUgreenCtxCreate_flags::CU_GREEN_CTX_DEFAULT_STREAM => {
writer.write_all(stringify!(CU_GREEN_CTX_DEFAULT_STREAM).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUdevResourceType {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUdevResourceType {
fn write(
&self,
_fn_name: &'static str,
@@ -17938,20 +17954,20 @@ impl crate::format::CudaDisplay for cuda_types::CUdevResourceType {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUdevResourceType::CU_DEV_RESOURCE_TYPE_INVALID => {
+ &cuda_types::cuda::CUdevResourceType::CU_DEV_RESOURCE_TYPE_INVALID => {
writer.write_all(stringify!(CU_DEV_RESOURCE_TYPE_INVALID).as_bytes())
}
- &cuda_types::CUdevResourceType::CU_DEV_RESOURCE_TYPE_SM => {
+ &cuda_types::cuda::CUdevResourceType::CU_DEV_RESOURCE_TYPE_SM => {
writer.write_all(stringify!(CU_DEV_RESOURCE_TYPE_SM).as_bytes())
}
- &cuda_types::CUdevResourceType::CU_DEV_RESOURCE_TYPE_MAX => {
+ &cuda_types::cuda::CUdevResourceType::CU_DEV_RESOURCE_TYPE_MAX => {
writer.write_all(stringify!(CU_DEV_RESOURCE_TYPE_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUdevSmResource_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUdevSmResource_st {
fn write(
&self,
_fn_name: &'static str,
@@ -17965,9 +17981,9 @@ impl crate::format::CudaDisplay for cuda_types::CUdevSmResource_st {
}
pub fn write_cuGreenCtxCreate(
writer: &mut (impl std::io::Write + ?Sized),
- phCtx: *mut cuda_types::CUgreenCtx,
- desc: cuda_types::CUdevResourceDesc,
- dev: cuda_types::CUdevice,
+ phCtx: *mut cuda_types::cuda::CUgreenCtx,
+ desc: cuda_types::cuda::CUdevResourceDesc,
+ dev: cuda_types::cuda::CUdevice,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -17990,7 +18006,7 @@ pub fn write_cuGreenCtxCreate(
}
pub fn write_cuGreenCtxDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- hCtx: cuda_types::CUgreenCtx,
+ hCtx: cuda_types::cuda::CUgreenCtx,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18000,8 +18016,8 @@ pub fn write_cuGreenCtxDestroy(
}
pub fn write_cuCtxFromGreenCtx(
writer: &mut (impl std::io::Write + ?Sized),
- pContext: *mut cuda_types::CUcontext,
- hCtx: cuda_types::CUgreenCtx,
+ pContext: *mut cuda_types::cuda::CUcontext,
+ hCtx: cuda_types::cuda::CUgreenCtx,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18015,9 +18031,9 @@ pub fn write_cuCtxFromGreenCtx(
}
pub fn write_cuDeviceGetDevResource(
writer: &mut (impl std::io::Write + ?Sized),
- device: cuda_types::CUdevice,
- resource: *mut cuda_types::CUdevResource,
- type_: cuda_types::CUdevResourceType,
+ device: cuda_types::cuda::CUdevice,
+ resource: *mut cuda_types::cuda::CUdevResource,
+ type_: cuda_types::cuda::CUdevResourceType,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18050,9 +18066,9 @@ pub fn write_cuDeviceGetDevResource(
}
pub fn write_cuCtxGetDevResource(
writer: &mut (impl std::io::Write + ?Sized),
- hCtx: cuda_types::CUcontext,
- resource: *mut cuda_types::CUdevResource,
- type_: cuda_types::CUdevResourceType,
+ hCtx: cuda_types::cuda::CUcontext,
+ resource: *mut cuda_types::cuda::CUdevResource,
+ type_: cuda_types::cuda::CUdevResourceType,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18075,9 +18091,9 @@ pub fn write_cuCtxGetDevResource(
}
pub fn write_cuGreenCtxGetDevResource(
writer: &mut (impl std::io::Write + ?Sized),
- hCtx: cuda_types::CUgreenCtx,
- resource: *mut cuda_types::CUdevResource,
- type_: cuda_types::CUdevResourceType,
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ resource: *mut cuda_types::cuda::CUdevResource,
+ type_: cuda_types::cuda::CUdevResourceType,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18110,10 +18126,10 @@ pub fn write_cuGreenCtxGetDevResource(
}
pub fn write_cuDevSmResourceSplitByCount(
writer: &mut (impl std::io::Write + ?Sized),
- result: *mut cuda_types::CUdevResource,
+ result: *mut cuda_types::cuda::CUdevResource,
nbGroups: *mut ::core::ffi::c_uint,
- input: *const cuda_types::CUdevResource,
- remaining: *mut cuda_types::CUdevResource,
+ input: *const cuda_types::cuda::CUdevResource,
+ remaining: *mut cuda_types::cuda::CUdevResource,
useFlags: ::core::ffi::c_uint,
minCount: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -18175,8 +18191,8 @@ pub fn write_cuDevSmResourceSplitByCount(
}
pub fn write_cuDevResourceGenerateDesc(
writer: &mut (impl std::io::Write + ?Sized),
- phDesc: *mut cuda_types::CUdevResourceDesc,
- resources: *mut cuda_types::CUdevResource,
+ phDesc: *mut cuda_types::cuda::CUdevResourceDesc,
+ resources: *mut cuda_types::cuda::CUdevResource,
nbResources: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -18210,8 +18226,8 @@ pub fn write_cuDevResourceGenerateDesc(
}
pub fn write_cuGreenCtxRecordEvent(
writer: &mut (impl std::io::Write + ?Sized),
- hCtx: cuda_types::CUgreenCtx,
- hEvent: cuda_types::CUevent,
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ hEvent: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18230,8 +18246,8 @@ pub fn write_cuGreenCtxRecordEvent(
}
pub fn write_cuGreenCtxWaitEvent(
writer: &mut (impl std::io::Write + ?Sized),
- hCtx: cuda_types::CUgreenCtx,
- hEvent: cuda_types::CUevent,
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ hEvent: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18245,8 +18261,8 @@ pub fn write_cuGreenCtxWaitEvent(
}
pub fn write_cuStreamGetGreenCtx(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- phCtx: *mut cuda_types::CUgreenCtx,
+ hStream: cuda_types::cuda::CUstream,
+ phCtx: *mut cuda_types::cuda::CUgreenCtx,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18280,7 +18296,7 @@ pub fn write_cuMemHostRegister(
}
pub fn write_cuGraphicsResourceSetMapFlags(
writer: &mut (impl std::io::Write + ?Sized),
- resource: cuda_types::CUgraphicsResource,
+ resource: cuda_types::cuda::CUgraphicsResource,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -18306,9 +18322,9 @@ pub fn write_cuGraphicsResourceSetMapFlags(
pub fn write_cuLinkCreate(
writer: &mut (impl std::io::Write + ?Sized),
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- stateOut: *mut cuda_types::CUlinkState,
+ stateOut: *mut cuda_types::cuda::CUlinkState,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18330,13 +18346,13 @@ pub fn write_cuLinkCreate(
}
pub fn write_cuLinkAddData(
writer: &mut (impl std::io::Write + ?Sized),
- state: cuda_types::CUlinkState,
- type_: cuda_types::CUjitInputType,
+ state: cuda_types::cuda::CUlinkState,
+ type_: cuda_types::cuda::CUjitInputType,
data: *mut ::core::ffi::c_void,
size: usize,
name: *const ::core::ffi::c_char,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -18375,11 +18391,11 @@ pub fn write_cuLinkAddData(
}
pub fn write_cuLinkAddFile(
writer: &mut (impl std::io::Write + ?Sized),
- state: cuda_types::CUlinkState,
- type_: cuda_types::CUjitInputType,
+ state: cuda_types::cuda::CUlinkState,
+ type_: cuda_types::cuda::CUjitInputType,
path: *const ::core::ffi::c_char,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -18410,9 +18426,9 @@ pub fn write_cuLinkAddFile(
}
pub fn write_cuTexRefSetAddress2D_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
- desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR,
- dptr: cuda_types::CUdeviceptr,
+ hTexRef: cuda_types::cuda::CUtexref,
+ desc: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR,
+ dptr: cuda_types::cuda::CUdeviceptr,
Pitch: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -18453,7 +18469,7 @@ pub fn write_cuTexRefSetAddress2D_v2(
)?;
writer.write_all(b")")
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY2D_v1_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_MEMCPY2D_v1_st {
fn write(
&self,
_fn_name: &'static str,
@@ -18495,7 +18511,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY2D_v1_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY3D_v1_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_MEMCPY3D_v1_st {
fn write(
&self,
_fn_name: &'static str,
@@ -18551,7 +18567,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY3D_v1_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY_DESCRIPTOR_v1_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR_v1_st {
fn write(
&self,
_fn_name: &'static str,
@@ -18569,7 +18585,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY_DESCRIPTOR_v1_st {
writer.write_all(b" }")
}
}
-impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1_st {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR_v1_st {
fn write(
&self,
_fn_name: &'static str,
@@ -18594,7 +18610,7 @@ impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1_st {
pub fn write_cuDeviceTotalMem(
writer: &mut (impl std::io::Write + ?Sized),
bytes: *mut ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18608,9 +18624,9 @@ pub fn write_cuDeviceTotalMem(
}
pub fn write_cuCtxCreate(
writer: &mut (impl std::io::Write + ?Sized),
- pctx: *mut cuda_types::CUcontext,
+ pctx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18628,9 +18644,9 @@ pub fn write_cuCtxCreate(
}
pub fn write_cuModuleGetGlobal(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
bytes: *mut ::core::ffi::c_uint,
- hmod: cuda_types::CUmodule,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -18668,7 +18684,7 @@ pub fn write_cuMemGetInfo(
}
pub fn write_cuMemAlloc(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
bytesize: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -18683,7 +18699,7 @@ pub fn write_cuMemAlloc(
}
pub fn write_cuMemAllocPitch(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
pPitch: *mut ::core::ffi::c_uint,
WidthInBytes: ::core::ffi::c_uint,
Height: ::core::ffi::c_uint,
@@ -18723,7 +18739,7 @@ pub fn write_cuMemAllocPitch(
}
pub fn write_cuMemFree(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: cuda_types::CUdeviceptr_v1,
+ dptr: cuda_types::cuda::CUdeviceptr_v1,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18733,9 +18749,9 @@ pub fn write_cuMemFree(
}
pub fn write_cuMemGetAddressRange(
writer: &mut (impl std::io::Write + ?Sized),
- pbase: *mut cuda_types::CUdeviceptr_v1,
+ pbase: *mut cuda_types::cuda::CUdeviceptr_v1,
psize: *mut ::core::ffi::c_uint,
- dptr: cuda_types::CUdeviceptr_v1,
+ dptr: cuda_types::cuda::CUdeviceptr_v1,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -18768,7 +18784,7 @@ pub fn write_cuMemAllocHost(
}
pub fn write_cuMemHostGetDevicePointer(
writer: &mut (impl std::io::Write + ?Sized),
- pdptr: *mut cuda_types::CUdeviceptr_v1,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr_v1,
p: *mut ::core::ffi::c_void,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -18798,7 +18814,7 @@ pub fn write_cuMemHostGetDevicePointer(
}
pub fn write_cuMemcpyHtoD(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
srcHost: *const ::core::ffi::c_void,
ByteCount: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -18819,7 +18835,7 @@ pub fn write_cuMemcpyHtoD(
pub fn write_cuMemcpyDtoH(
writer: &mut (impl std::io::Write + ?Sized),
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -18838,8 +18854,8 @@ pub fn write_cuMemcpyDtoH(
}
pub fn write_cuMemcpyDtoD(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr_v1,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -18858,9 +18874,9 @@ pub fn write_cuMemcpyDtoD(
}
pub fn write_cuMemcpyDtoA(
writer: &mut (impl std::io::Write + ?Sized),
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: ::core::ffi::c_uint,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -18883,8 +18899,8 @@ pub fn write_cuMemcpyDtoA(
}
pub fn write_cuMemcpyAtoD(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr_v1,
- srcArray: cuda_types::CUarray,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: ::core::ffi::c_uint,
ByteCount: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -18908,7 +18924,7 @@ pub fn write_cuMemcpyAtoD(
}
pub fn write_cuMemcpyHtoA(
writer: &mut (impl std::io::Write + ?Sized),
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: ::core::ffi::c_uint,
srcHost: *const ::core::ffi::c_void,
ByteCount: ::core::ffi::c_uint,
@@ -18934,7 +18950,7 @@ pub fn write_cuMemcpyHtoA(
pub fn write_cuMemcpyAtoH(
writer: &mut (impl std::io::Write + ?Sized),
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: ::core::ffi::c_uint,
ByteCount: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -18958,9 +18974,9 @@ pub fn write_cuMemcpyAtoH(
}
pub fn write_cuMemcpyAtoA(
writer: &mut (impl std::io::Write + ?Sized),
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: ::core::ffi::c_uint,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: ::core::ffi::c_uint,
ByteCount: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -18988,11 +19004,11 @@ pub fn write_cuMemcpyAtoA(
}
pub fn write_cuMemcpyHtoAAsync(
writer: &mut (impl std::io::Write + ?Sized),
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: ::core::ffi::c_uint,
srcHost: *const ::core::ffi::c_void,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19019,10 +19035,10 @@ pub fn write_cuMemcpyHtoAAsync(
pub fn write_cuMemcpyAtoHAsync(
writer: &mut (impl std::io::Write + ?Sized),
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: ::core::ffi::c_uint,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19048,7 +19064,7 @@ pub fn write_cuMemcpyAtoHAsync(
}
pub fn write_cuMemcpy2D(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY2D_v1,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D_v1,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19058,7 +19074,7 @@ pub fn write_cuMemcpy2D(
}
pub fn write_cuMemcpy2DUnaligned(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY2D_v1,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D_v1,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19068,7 +19084,7 @@ pub fn write_cuMemcpy2DUnaligned(
}
pub fn write_cuMemcpy3D(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY3D_v1,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_v1,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19078,10 +19094,10 @@ pub fn write_cuMemcpy3D(
}
pub fn write_cuMemcpyHtoDAsync(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
srcHost: *const ::core::ffi::c_void,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19104,9 +19120,9 @@ pub fn write_cuMemcpyHtoDAsync(
pub fn write_cuMemcpyDtoHAsync(
writer: &mut (impl std::io::Write + ?Sized),
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19128,10 +19144,10 @@ pub fn write_cuMemcpyDtoHAsync(
}
pub fn write_cuMemcpyDtoDAsync(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr_v1,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19153,8 +19169,8 @@ pub fn write_cuMemcpyDtoDAsync(
}
pub fn write_cuMemcpy2DAsync(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY2D_v1,
- hStream: cuda_types::CUstream,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D_v1,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19168,8 +19184,8 @@ pub fn write_cuMemcpy2DAsync(
}
pub fn write_cuMemcpy3DAsync(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY3D_v1,
- hStream: cuda_types::CUstream,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_v1,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19183,7 +19199,7 @@ pub fn write_cuMemcpy3DAsync(
}
pub fn write_cuMemsetD8(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
uc: ::core::ffi::c_uchar,
N: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -19203,7 +19219,7 @@ pub fn write_cuMemsetD8(
}
pub fn write_cuMemsetD16(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
us: ::core::ffi::c_ushort,
N: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -19223,7 +19239,7 @@ pub fn write_cuMemsetD16(
}
pub fn write_cuMemsetD32(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
ui: ::core::ffi::c_uint,
N: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -19243,7 +19259,7 @@ pub fn write_cuMemsetD32(
}
pub fn write_cuMemsetD2D8(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
dstPitch: ::core::ffi::c_uint,
uc: ::core::ffi::c_uchar,
Width: ::core::ffi::c_uint,
@@ -19273,7 +19289,7 @@ pub fn write_cuMemsetD2D8(
}
pub fn write_cuMemsetD2D16(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
dstPitch: ::core::ffi::c_uint,
us: ::core::ffi::c_ushort,
Width: ::core::ffi::c_uint,
@@ -19303,7 +19319,7 @@ pub fn write_cuMemsetD2D16(
}
pub fn write_cuMemsetD2D32(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
dstPitch: ::core::ffi::c_uint,
ui: ::core::ffi::c_uint,
Width: ::core::ffi::c_uint,
@@ -19333,8 +19349,8 @@ pub fn write_cuMemsetD2D32(
}
pub fn write_cuArrayCreate(
writer: &mut (impl std::io::Write + ?Sized),
- pHandle: *mut cuda_types::CUarray,
- pAllocateArray: *const cuda_types::CUDA_ARRAY_DESCRIPTOR_v1,
+ pHandle: *mut cuda_types::cuda::CUarray,
+ pAllocateArray: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR_v1,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19353,8 +19369,8 @@ pub fn write_cuArrayCreate(
}
pub fn write_cuArrayGetDescriptor(
writer: &mut (impl std::io::Write + ?Sized),
- pArrayDescriptor: *mut cuda_types::CUDA_ARRAY_DESCRIPTOR_v1,
- hArray: cuda_types::CUarray,
+ pArrayDescriptor: *mut cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR_v1,
+ hArray: cuda_types::cuda::CUarray,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19373,8 +19389,8 @@ pub fn write_cuArrayGetDescriptor(
}
pub fn write_cuArray3DCreate(
writer: &mut (impl std::io::Write + ?Sized),
- pHandle: *mut cuda_types::CUarray,
- pAllocateArray: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1,
+ pHandle: *mut cuda_types::cuda::CUarray,
+ pAllocateArray: *const cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR_v1,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19393,8 +19409,8 @@ pub fn write_cuArray3DCreate(
}
pub fn write_cuArray3DGetDescriptor(
writer: &mut (impl std::io::Write + ?Sized),
- pArrayDescriptor: *mut cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1,
- hArray: cuda_types::CUarray,
+ pArrayDescriptor: *mut cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR_v1,
+ hArray: cuda_types::cuda::CUarray,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19419,8 +19435,8 @@ pub fn write_cuArray3DGetDescriptor(
pub fn write_cuTexRefSetAddress(
writer: &mut (impl std::io::Write + ?Sized),
ByteOffset: *mut ::core::ffi::c_uint,
- hTexRef: cuda_types::CUtexref,
- dptr: cuda_types::CUdeviceptr_v1,
+ hTexRef: cuda_types::cuda::CUtexref,
+ dptr: cuda_types::cuda::CUdeviceptr_v1,
bytes: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -19448,9 +19464,9 @@ pub fn write_cuTexRefSetAddress(
}
pub fn write_cuTexRefSetAddress2D(
writer: &mut (impl std::io::Write + ?Sized),
- hTexRef: cuda_types::CUtexref,
- desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR_v1,
- dptr: cuda_types::CUdeviceptr_v1,
+ hTexRef: cuda_types::cuda::CUtexref,
+ desc: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR_v1,
+ dptr: cuda_types::cuda::CUdeviceptr_v1,
Pitch: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -19478,8 +19494,8 @@ pub fn write_cuTexRefSetAddress2D(
}
pub fn write_cuTexRefGetAddress(
writer: &mut (impl std::io::Write + ?Sized),
- pdptr: *mut cuda_types::CUdeviceptr_v1,
- hTexRef: cuda_types::CUtexref,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr_v1,
+ hTexRef: cuda_types::cuda::CUtexref,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19493,9 +19509,9 @@ pub fn write_cuTexRefGetAddress(
}
pub fn write_cuGraphicsResourceGetMappedPointer(
writer: &mut (impl std::io::Write + ?Sized),
- pDevPtr: *mut cuda_types::CUdeviceptr_v1,
+ pDevPtr: *mut cuda_types::cuda::CUdeviceptr_v1,
pSize: *mut ::core::ffi::c_uint,
- resource: cuda_types::CUgraphicsResource,
+ resource: cuda_types::cuda::CUgraphicsResource,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19528,7 +19544,7 @@ pub fn write_cuGraphicsResourceGetMappedPointer(
}
pub fn write_cuCtxDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- ctx: cuda_types::CUcontext,
+ ctx: cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19538,7 +19554,7 @@ pub fn write_cuCtxDestroy(
}
pub fn write_cuCtxPopCurrent(
writer: &mut (impl std::io::Write + ?Sized),
- pctx: *mut cuda_types::CUcontext,
+ pctx: *mut cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19548,7 +19564,7 @@ pub fn write_cuCtxPopCurrent(
}
pub fn write_cuCtxPushCurrent(
writer: &mut (impl std::io::Write + ?Sized),
- ctx: cuda_types::CUcontext,
+ ctx: cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19558,7 +19574,7 @@ pub fn write_cuCtxPushCurrent(
}
pub fn write_cuStreamDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19568,7 +19584,7 @@ pub fn write_cuStreamDestroy(
}
pub fn write_cuEventDestroy(
writer: &mut (impl std::io::Write + ?Sized),
- hEvent: cuda_types::CUevent,
+ hEvent: cuda_types::cuda::CUevent,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19578,7 +19594,7 @@ pub fn write_cuEventDestroy(
}
pub fn write_cuDevicePrimaryCtxRelease(
writer: &mut (impl std::io::Write + ?Sized),
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19593,7 +19609,7 @@ pub fn write_cuDevicePrimaryCtxRelease(
}
pub fn write_cuDevicePrimaryCtxReset(
writer: &mut (impl std::io::Write + ?Sized),
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19603,7 +19619,7 @@ pub fn write_cuDevicePrimaryCtxReset(
}
pub fn write_cuDevicePrimaryCtxSetFlags(
writer: &mut (impl std::io::Write + ?Sized),
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -19628,7 +19644,7 @@ pub fn write_cuDevicePrimaryCtxSetFlags(
}
pub fn write_cuMemcpyHtoD_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
) -> std::io::Result<()> {
@@ -19649,7 +19665,7 @@ pub fn write_cuMemcpyHtoD_v2(
pub fn write_cuMemcpyDtoH_v2(
writer: &mut (impl std::io::Write + ?Sized),
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -19668,8 +19684,8 @@ pub fn write_cuMemcpyDtoH_v2(
}
pub fn write_cuMemcpyDtoD_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
- srcDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -19688,9 +19704,9 @@ pub fn write_cuMemcpyDtoD_v2(
}
pub fn write_cuMemcpyDtoA_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -19713,8 +19729,8 @@ pub fn write_cuMemcpyDtoA_v2(
}
pub fn write_cuMemcpyAtoD_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
- srcArray: cuda_types::CUarray,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
) -> std::io::Result<()> {
@@ -19738,7 +19754,7 @@ pub fn write_cuMemcpyAtoD_v2(
}
pub fn write_cuMemcpyHtoA_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
@@ -19764,7 +19780,7 @@ pub fn write_cuMemcpyHtoA_v2(
pub fn write_cuMemcpyAtoH_v2(
writer: &mut (impl std::io::Write + ?Sized),
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
) -> std::io::Result<()> {
@@ -19788,9 +19804,9 @@ pub fn write_cuMemcpyAtoH_v2(
}
pub fn write_cuMemcpyAtoA_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
) -> std::io::Result<()> {
@@ -19818,11 +19834,11 @@ pub fn write_cuMemcpyAtoA_v2(
}
pub fn write_cuMemcpyHtoAAsync_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19874,10 +19890,10 @@ pub fn write_cuMemcpyHtoAAsync_v2(
pub fn write_cuMemcpyAtoHAsync_v2(
writer: &mut (impl std::io::Write + ?Sized),
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19928,7 +19944,7 @@ pub fn write_cuMemcpyAtoHAsync_v2(
}
pub fn write_cuMemcpy2D_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19938,7 +19954,7 @@ pub fn write_cuMemcpy2D_v2(
}
pub fn write_cuMemcpy2DUnaligned_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19953,7 +19969,7 @@ pub fn write_cuMemcpy2DUnaligned_v2(
}
pub fn write_cuMemcpy3D_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY3D,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -19963,10 +19979,10 @@ pub fn write_cuMemcpy3D_v2(
}
pub fn write_cuMemcpyHtoDAsync_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20009,9 +20025,9 @@ pub fn write_cuMemcpyHtoDAsync_v2(
pub fn write_cuMemcpyDtoHAsync_v2(
writer: &mut (impl std::io::Write + ?Sized),
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20053,10 +20069,10 @@ pub fn write_cuMemcpyDtoHAsync_v2(
}
pub fn write_cuMemcpyDtoDAsync_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
- srcDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20098,8 +20114,8 @@ pub fn write_cuMemcpyDtoDAsync_v2(
}
pub fn write_cuMemcpy2DAsync_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
- hStream: cuda_types::CUstream,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20113,8 +20129,8 @@ pub fn write_cuMemcpy2DAsync_v2(
}
pub fn write_cuMemcpy3DAsync_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY3D,
- hStream: cuda_types::CUstream,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20128,7 +20144,7 @@ pub fn write_cuMemcpy3DAsync_v2(
}
pub fn write_cuMemsetD8_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
uc: ::core::ffi::c_uchar,
N: usize,
) -> std::io::Result<()> {
@@ -20148,7 +20164,7 @@ pub fn write_cuMemsetD8_v2(
}
pub fn write_cuMemsetD16_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
us: ::core::ffi::c_ushort,
N: usize,
) -> std::io::Result<()> {
@@ -20168,7 +20184,7 @@ pub fn write_cuMemsetD16_v2(
}
pub fn write_cuMemsetD32_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
ui: ::core::ffi::c_uint,
N: usize,
) -> std::io::Result<()> {
@@ -20188,7 +20204,7 @@ pub fn write_cuMemsetD32_v2(
}
pub fn write_cuMemsetD2D8_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
uc: ::core::ffi::c_uchar,
Width: usize,
@@ -20218,7 +20234,7 @@ pub fn write_cuMemsetD2D8_v2(
}
pub fn write_cuMemsetD2D16_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
us: ::core::ffi::c_ushort,
Width: usize,
@@ -20248,7 +20264,7 @@ pub fn write_cuMemsetD2D16_v2(
}
pub fn write_cuMemsetD2D32_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
ui: ::core::ffi::c_uint,
Width: usize,
@@ -20278,8 +20294,8 @@ pub fn write_cuMemsetD2D32_v2(
}
pub fn write_cuMemcpy(
writer: &mut (impl std::io::Write + ?Sized),
- dst: cuda_types::CUdeviceptr,
- src: cuda_types::CUdeviceptr,
+ dst: cuda_types::cuda::CUdeviceptr,
+ src: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -20298,10 +20314,10 @@ pub fn write_cuMemcpy(
}
pub fn write_cuMemcpyAsync(
writer: &mut (impl std::io::Write + ?Sized),
- dst: cuda_types::CUdeviceptr,
- src: cuda_types::CUdeviceptr,
+ dst: cuda_types::cuda::CUdeviceptr,
+ src: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20323,10 +20339,10 @@ pub fn write_cuMemcpyAsync(
}
pub fn write_cuMemcpyPeer(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
- dstContext: cuda_types::CUcontext,
- srcDevice: cuda_types::CUdeviceptr,
- srcContext: cuda_types::CUcontext,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ dstContext: cuda_types::cuda::CUcontext,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
+ srcContext: cuda_types::cuda::CUcontext,
ByteCount: usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -20353,12 +20369,12 @@ pub fn write_cuMemcpyPeer(
}
pub fn write_cuMemcpyPeerAsync(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
- dstContext: cuda_types::CUcontext,
- srcDevice: cuda_types::CUdeviceptr,
- srcContext: cuda_types::CUcontext,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ dstContext: cuda_types::cuda::CUcontext,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
+ srcContext: cuda_types::cuda::CUcontext,
ByteCount: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20398,7 +20414,7 @@ pub fn write_cuMemcpyPeerAsync(
}
pub fn write_cuMemcpy3DPeer(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_PEER,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20408,8 +20424,8 @@ pub fn write_cuMemcpy3DPeer(
}
pub fn write_cuMemcpy3DPeerAsync(
writer: &mut (impl std::io::Write + ?Sized),
- pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER,
- hStream: cuda_types::CUstream,
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_PEER,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20423,10 +20439,10 @@ pub fn write_cuMemcpy3DPeerAsync(
}
pub fn write_cuMemsetD8Async(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
uc: ::core::ffi::c_uchar,
N: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20448,10 +20464,10 @@ pub fn write_cuMemsetD8Async(
}
pub fn write_cuMemsetD16Async(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
us: ::core::ffi::c_ushort,
N: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20473,10 +20489,10 @@ pub fn write_cuMemsetD16Async(
}
pub fn write_cuMemsetD32Async(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
ui: ::core::ffi::c_uint,
N: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20498,12 +20514,12 @@ pub fn write_cuMemsetD32Async(
}
pub fn write_cuMemsetD2D8Async(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
uc: ::core::ffi::c_uchar,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20533,12 +20549,12 @@ pub fn write_cuMemsetD2D8Async(
}
pub fn write_cuMemsetD2D16Async(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
us: ::core::ffi::c_ushort,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20573,12 +20589,12 @@ pub fn write_cuMemsetD2D16Async(
}
pub fn write_cuMemsetD2D32Async(
writer: &mut (impl std::io::Write + ?Sized),
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
ui: ::core::ffi::c_uint,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20613,7 +20629,7 @@ pub fn write_cuMemsetD2D32Async(
}
pub fn write_cuStreamGetPriority(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
priority: *mut ::core::ffi::c_int,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -20633,7 +20649,7 @@ pub fn write_cuStreamGetPriority(
}
pub fn write_cuStreamGetId(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
streamId: *mut ::core::ffi::c_ulonglong,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -20648,7 +20664,7 @@ pub fn write_cuStreamGetId(
}
pub fn write_cuStreamGetFlags(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
flags: *mut ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -20663,8 +20679,8 @@ pub fn write_cuStreamGetFlags(
}
pub fn write_cuStreamGetCtx(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- pctx: *mut cuda_types::CUcontext,
+ hStream: cuda_types::cuda::CUstream,
+ pctx: *mut cuda_types::cuda::CUcontext,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20678,8 +20694,8 @@ pub fn write_cuStreamGetCtx(
}
pub fn write_cuStreamWaitEvent(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- hEvent: cuda_types::CUevent,
+ hStream: cuda_types::cuda::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -20698,8 +20714,8 @@ pub fn write_cuStreamWaitEvent(
}
pub fn write_cuStreamAddCallback(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- callback: cuda_types::CUstreamCallback,
+ hStream: cuda_types::cuda::CUstream,
+ callback: cuda_types::cuda::CUstreamCallback,
userData: *mut ::core::ffi::c_void,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -20733,8 +20749,8 @@ pub fn write_cuStreamAddCallback(
}
pub fn write_cuStreamAttachMemAsync(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- dptr: cuda_types::CUdeviceptr,
+ hStream: cuda_types::cuda::CUstream,
+ dptr: cuda_types::cuda::CUdeviceptr,
length: usize,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -20773,7 +20789,7 @@ pub fn write_cuStreamAttachMemAsync(
}
pub fn write_cuStreamQuery(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20783,7 +20799,7 @@ pub fn write_cuStreamQuery(
}
pub fn write_cuStreamSynchronize(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20793,8 +20809,8 @@ pub fn write_cuStreamSynchronize(
}
pub fn write_cuEventRecord(
writer: &mut (impl std::io::Write + ?Sized),
- hEvent: cuda_types::CUevent,
- hStream: cuda_types::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20808,8 +20824,8 @@ pub fn write_cuEventRecord(
}
pub fn write_cuEventRecordWithFlags(
writer: &mut (impl std::io::Write + ?Sized),
- hEvent: cuda_types::CUevent,
- hStream: cuda_types::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
+ hStream: cuda_types::cuda::CUstream,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -20843,7 +20859,7 @@ pub fn write_cuEventRecordWithFlags(
}
pub fn write_cuLaunchKernel(
writer: &mut (impl std::io::Write + ?Sized),
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
gridDimX: ::core::ffi::c_uint,
gridDimY: ::core::ffi::c_uint,
gridDimZ: ::core::ffi::c_uint,
@@ -20851,7 +20867,7 @@ pub fn write_cuLaunchKernel(
blockDimY: ::core::ffi::c_uint,
blockDimZ: ::core::ffi::c_uint,
sharedMemBytes: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
kernelParams: *mut *mut ::core::ffi::c_void,
extra: *mut *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
@@ -20908,8 +20924,8 @@ pub fn write_cuLaunchKernel(
}
pub fn write_cuLaunchKernelEx(
writer: &mut (impl std::io::Write + ?Sized),
- config: *const cuda_types::CUlaunchConfig,
- f: cuda_types::CUfunction,
+ config: *const cuda_types::cuda::CUlaunchConfig,
+ f: cuda_types::cuda::CUfunction,
kernelParams: *mut *mut ::core::ffi::c_void,
extra: *mut *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
@@ -20938,8 +20954,8 @@ pub fn write_cuLaunchKernelEx(
}
pub fn write_cuLaunchHostFunc(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- fn_: cuda_types::CUhostFn,
+ hStream: cuda_types::cuda::CUstream,
+ fn_: cuda_types::cuda::CUhostFn,
userData: *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -20959,8 +20975,8 @@ pub fn write_cuLaunchHostFunc(
pub fn write_cuGraphicsMapResources(
writer: &mut (impl std::io::Write + ?Sized),
count: ::core::ffi::c_uint,
- resources: *mut cuda_types::CUgraphicsResource,
- hStream: cuda_types::CUstream,
+ resources: *mut cuda_types::cuda::CUgraphicsResource,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -20994,8 +21010,8 @@ pub fn write_cuGraphicsMapResources(
pub fn write_cuGraphicsUnmapResources(
writer: &mut (impl std::io::Write + ?Sized),
count: ::core::ffi::c_uint,
- resources: *mut cuda_types::CUgraphicsResource,
- hStream: cuda_types::CUstream,
+ resources: *mut cuda_types::cuda::CUgraphicsResource,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -21028,9 +21044,9 @@ pub fn write_cuGraphicsUnmapResources(
}
pub fn write_cuStreamWriteValue32(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21053,9 +21069,9 @@ pub fn write_cuStreamWriteValue32(
}
pub fn write_cuStreamWaitValue32(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21078,9 +21094,9 @@ pub fn write_cuStreamWaitValue32(
}
pub fn write_cuStreamWriteValue64(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21103,9 +21119,9 @@ pub fn write_cuStreamWriteValue64(
}
pub fn write_cuStreamWaitValue64(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21128,9 +21144,9 @@ pub fn write_cuStreamWaitValue64(
}
pub fn write_cuStreamBatchMemOp(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
count: ::core::ffi::c_uint,
- paramArray: *mut cuda_types::CUstreamBatchMemOpParams,
+ paramArray: *mut cuda_types::cuda::CUstreamBatchMemOpParams,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21165,9 +21181,9 @@ pub fn write_cuStreamBatchMemOp(
}
pub fn write_cuStreamWriteValue32_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21210,9 +21226,9 @@ pub fn write_cuStreamWriteValue32_ptsz(
}
pub fn write_cuStreamWaitValue32_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21255,9 +21271,9 @@ pub fn write_cuStreamWaitValue32_ptsz(
}
pub fn write_cuStreamWriteValue64_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21300,9 +21316,9 @@ pub fn write_cuStreamWriteValue64_ptsz(
}
pub fn write_cuStreamWaitValue64_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21345,9 +21361,9 @@ pub fn write_cuStreamWaitValue64_ptsz(
}
pub fn write_cuStreamBatchMemOp_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
count: ::core::ffi::c_uint,
- paramArray: *mut cuda_types::CUstreamBatchMemOpParams,
+ paramArray: *mut cuda_types::cuda::CUstreamBatchMemOpParams,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21397,9 +21413,9 @@ pub fn write_cuStreamBatchMemOp_ptsz(
}
pub fn write_cuStreamWriteValue32_v2(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21442,9 +21458,9 @@ pub fn write_cuStreamWriteValue32_v2(
}
pub fn write_cuStreamWaitValue32_v2(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21482,9 +21498,9 @@ pub fn write_cuStreamWaitValue32_v2(
}
pub fn write_cuStreamWriteValue64_v2(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21527,9 +21543,9 @@ pub fn write_cuStreamWriteValue64_v2(
}
pub fn write_cuStreamWaitValue64_v2(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21567,9 +21583,9 @@ pub fn write_cuStreamWaitValue64_v2(
}
pub fn write_cuStreamBatchMemOp_v2(
writer: &mut (impl std::io::Write + ?Sized),
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
count: ::core::ffi::c_uint,
- paramArray: *mut cuda_types::CUstreamBatchMemOpParams,
+ paramArray: *mut cuda_types::cuda::CUstreamBatchMemOpParams,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21609,10 +21625,10 @@ pub fn write_cuStreamBatchMemOp_v2(
}
pub fn write_cuMemPrefetchAsync(
writer: &mut (impl std::io::Write + ?Sized),
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- dstDevice: cuda_types::CUdevice,
- hStream: cuda_types::CUstream,
+ dstDevice: cuda_types::cuda::CUdevice,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -21639,11 +21655,11 @@ pub fn write_cuMemPrefetchAsync(
}
pub fn write_cuMemPrefetchAsync_v2(
writer: &mut (impl std::io::Write + ?Sized),
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- location: cuda_types::CUmemLocation,
+ location: cuda_types::cuda::CUmemLocation,
flags: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -21684,7 +21700,7 @@ pub fn write_cuMemPrefetchAsync_v2(
}
pub fn write_cuLaunchCooperativeKernel(
writer: &mut (impl std::io::Write + ?Sized),
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
gridDimX: ::core::ffi::c_uint,
gridDimY: ::core::ffi::c_uint,
gridDimZ: ::core::ffi::c_uint,
@@ -21692,7 +21708,7 @@ pub fn write_cuLaunchCooperativeKernel(
blockDimY: ::core::ffi::c_uint,
blockDimZ: ::core::ffi::c_uint,
sharedMemBytes: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
kernelParams: *mut *mut ::core::ffi::c_void,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -21784,10 +21800,10 @@ pub fn write_cuLaunchCooperativeKernel(
}
pub fn write_cuSignalExternalSemaphoresAsync(
writer: &mut (impl std::io::Write + ?Sized),
- extSemArray: *const cuda_types::CUexternalSemaphore,
- paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
+ extSemArray: *const cuda_types::cuda::CUexternalSemaphore,
+ paramsArray: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
numExtSems: ::core::ffi::c_uint,
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -21829,10 +21845,10 @@ pub fn write_cuSignalExternalSemaphoresAsync(
}
pub fn write_cuWaitExternalSemaphoresAsync(
writer: &mut (impl std::io::Write + ?Sized),
- extSemArray: *const cuda_types::CUexternalSemaphore,
- paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
+ extSemArray: *const cuda_types::cuda::CUexternalSemaphore,
+ paramsArray: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
numExtSems: ::core::ffi::c_uint,
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -21874,7 +21890,7 @@ pub fn write_cuWaitExternalSemaphoresAsync(
}
pub fn write_cuStreamBeginCapture(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -21889,7 +21905,7 @@ pub fn write_cuStreamBeginCapture(
}
pub fn write_cuStreamBeginCapture_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -21904,8 +21920,8 @@ pub fn write_cuStreamBeginCapture_ptsz(
}
pub fn write_cuStreamBeginCapture_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- mode: cuda_types::CUstreamCaptureMode,
+ hStream: cuda_types::cuda::CUstream,
+ mode: cuda_types::cuda::CUstreamCaptureMode,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -21929,12 +21945,12 @@ pub fn write_cuStreamBeginCapture_v2(
}
pub fn write_cuStreamBeginCaptureToGraph(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- mode: cuda_types::CUstreamCaptureMode,
+ mode: cuda_types::cuda::CUstreamCaptureMode,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -21994,8 +22010,8 @@ pub fn write_cuStreamBeginCaptureToGraph(
}
pub fn write_cuStreamEndCapture(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- phGraph: *mut cuda_types::CUgraph,
+ hStream: cuda_types::cuda::CUstream,
+ phGraph: *mut cuda_types::cuda::CUgraph,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22009,8 +22025,8 @@ pub fn write_cuStreamEndCapture(
}
pub fn write_cuStreamIsCapturing(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- captureStatus: *mut cuda_types::CUstreamCaptureStatus,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus: *mut cuda_types::cuda::CUstreamCaptureStatus,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22029,9 +22045,9 @@ pub fn write_cuStreamIsCapturing(
}
pub fn write_cuStreamGetCaptureInfo(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22064,9 +22080,9 @@ pub fn write_cuStreamGetCaptureInfo(
}
pub fn write_cuStreamGetCaptureInfo_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22099,11 +22115,11 @@ pub fn write_cuStreamGetCaptureInfo_ptsz(
}
pub fn write_cuStreamGetCaptureInfo_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- graph_out: *mut cuda_types::CUgraph,
- dependencies_out: *mut *const cuda_types::CUgraphNode,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ graph_out: *mut cuda_types::cuda::CUgraph,
+ dependencies_out: *mut *const cuda_types::cuda::CUgraphNode,
numDependencies_out: *mut usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -22164,12 +22180,12 @@ pub fn write_cuStreamGetCaptureInfo_v2(
}
pub fn write_cuStreamGetCaptureInfo_v3(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- graph_out: *mut cuda_types::CUgraph,
- dependencies_out: *mut *const cuda_types::CUgraphNode,
- edgeData_out: *mut *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ graph_out: *mut cuda_types::cuda::CUgraph,
+ dependencies_out: *mut *const cuda_types::cuda::CUgraphNode,
+ edgeData_out: *mut *const cuda_types::cuda::CUgraphEdgeData,
numDependencies_out: *mut usize,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -22239,11 +22255,11 @@ pub fn write_cuStreamGetCaptureInfo_v3(
}
pub fn write_cuGraphAddKernelNode(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v1,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22289,8 +22305,8 @@ pub fn write_cuGraphAddKernelNode(
}
pub fn write_cuGraphKernelNodeGetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_KERNEL_NODE_PARAMS_v1,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v1,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22314,8 +22330,8 @@ pub fn write_cuGraphKernelNodeGetParams(
}
pub fn write_cuGraphKernelNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v1,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22339,9 +22355,9 @@ pub fn write_cuGraphKernelNodeSetParams(
}
pub fn write_cuGraphExecKernelNodeSetParams(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v1,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22374,9 +22390,9 @@ pub fn write_cuGraphExecKernelNodeSetParams(
}
pub fn write_cuGraphInstantiateWithParams(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- instantiateParams: *mut cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS,
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ instantiateParams: *mut cuda_types::cuda::CUDA_GRAPH_INSTANTIATE_PARAMS,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22409,10 +22425,10 @@ pub fn write_cuGraphInstantiateWithParams(
}
pub fn write_cuGraphExecUpdate(
writer: &mut (impl std::io::Write + ?Sized),
- hGraphExec: cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- hErrorNode_out: *mut cuda_types::CUgraphNode,
- updateResult_out: *mut cuda_types::CUgraphExecUpdateResult,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ hErrorNode_out: *mut cuda_types::cuda::CUgraphNode,
+ updateResult_out: *mut cuda_types::cuda::CUgraphExecUpdateResult,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22449,8 +22465,8 @@ pub fn write_cuGraphExecUpdate(
}
pub fn write_cuGraphUpload(
writer: &mut (impl std::io::Write + ?Sized),
- hGraph: cuda_types::CUgraphExec,
- hStream: cuda_types::CUstream,
+ hGraph: cuda_types::cuda::CUgraphExec,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22464,8 +22480,8 @@ pub fn write_cuGraphUpload(
}
pub fn write_cuGraphLaunch(
writer: &mut (impl std::io::Write + ?Sized),
- hGraph: cuda_types::CUgraphExec,
- hStream: cuda_types::CUstream,
+ hGraph: cuda_types::cuda::CUgraphExec,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22479,8 +22495,8 @@ pub fn write_cuGraphLaunch(
}
pub fn write_cuStreamCopyAttributes(
writer: &mut (impl std::io::Write + ?Sized),
- dstStream: cuda_types::CUstream,
- srcStream: cuda_types::CUstream,
+ dstStream: cuda_types::cuda::CUstream,
+ srcStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22504,8 +22520,8 @@ pub fn write_cuStreamCopyAttributes(
}
pub fn write_cuIpcOpenMemHandle(
writer: &mut (impl std::io::Write + ?Sized),
- pdptr: *mut cuda_types::CUdeviceptr,
- handle: cuda_types::CUipcMemHandle,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr,
+ handle: cuda_types::cuda::CUipcMemHandle,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -22524,9 +22540,9 @@ pub fn write_cuIpcOpenMemHandle(
}
pub fn write_cuGraphInstantiate(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- phErrorNode: *mut cuda_types::CUgraphNode,
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ phErrorNode: *mut cuda_types::cuda::CUgraphNode,
logBuffer: *mut ::core::ffi::c_char,
bufferSize: usize,
) -> std::io::Result<()> {
@@ -22574,9 +22590,9 @@ pub fn write_cuGraphInstantiate(
}
pub fn write_cuGraphInstantiate_v2(
writer: &mut (impl std::io::Write + ?Sized),
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- phErrorNode: *mut cuda_types::CUgraphNode,
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ phErrorNode: *mut cuda_types::cuda::CUgraphNode,
logBuffer: *mut ::core::ffi::c_char,
bufferSize: usize,
) -> std::io::Result<()> {
@@ -22629,9 +22645,9 @@ pub fn write_cuGraphInstantiate_v2(
}
pub fn write_cuMemMapArrayAsync(
writer: &mut (impl std::io::Write + ?Sized),
- mapInfoList: *mut cuda_types::CUarrayMapInfo,
+ mapInfoList: *mut cuda_types::cuda::CUarrayMapInfo,
count: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22661,8 +22677,8 @@ pub fn write_cuMemMapArrayAsync(
}
pub fn write_cuMemFreeAsync(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: cuda_types::CUdeviceptr,
- hStream: cuda_types::CUstream,
+ dptr: cuda_types::cuda::CUdeviceptr,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22676,9 +22692,9 @@ pub fn write_cuMemFreeAsync(
}
pub fn write_cuMemAllocAsync(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22696,10 +22712,10 @@ pub fn write_cuMemAllocAsync(
}
pub fn write_cuMemAllocFromPoolAsync(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- pool: cuda_types::CUmemoryPool,
- hStream: cuda_types::CUstream,
+ pool: cuda_types::cuda::CUmemoryPool,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22741,8 +22757,8 @@ pub fn write_cuMemAllocFromPoolAsync(
}
pub fn write_cuStreamUpdateCaptureDependencies(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- dependencies: *mut cuda_types::CUgraphNode,
+ hStream: cuda_types::cuda::CUstream,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
numDependencies: usize,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -22786,9 +22802,9 @@ pub fn write_cuStreamUpdateCaptureDependencies(
}
pub fn write_cuStreamUpdateCaptureDependencies_v2(
writer: &mut (impl std::io::Write + ?Sized),
- hStream: cuda_types::CUstream,
- dependencies: *mut cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -22844,7 +22860,7 @@ pub fn write_cuGetProcAddress(
symbol: *const ::core::ffi::c_char,
pfn: *mut *mut ::core::ffi::c_void,
cudaVersion: ::core::ffi::c_int,
- flags: cuda_types::cuuint64_t,
+ flags: cuda_types::cuda::cuuint64_t,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22869,7 +22885,7 @@ pub fn write_cuGetProcAddress(
crate::format::CudaDisplay::write(&flags, "cuGetProcAddress", arg_idx, writer)?;
writer.write_all(b")")
}
-impl crate::format::CudaDisplay for cuda_types::CUoutput_mode_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUoutput_mode_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -22877,10 +22893,10 @@ impl crate::format::CudaDisplay for cuda_types::CUoutput_mode_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUoutput_mode_enum::CU_OUT_KEY_VALUE_PAIR => {
+ &cuda_types::cuda::CUoutput_mode_enum::CU_OUT_KEY_VALUE_PAIR => {
writer.write_all(stringify!(CU_OUT_KEY_VALUE_PAIR).as_bytes())
}
- &cuda_types::CUoutput_mode_enum::CU_OUT_CSV => {
+ &cuda_types::cuda::CUoutput_mode_enum::CU_OUT_CSV => {
writer.write_all(stringify!(CU_OUT_CSV).as_bytes())
}
_ => write!(writer, "{}", self.0),
@@ -22891,7 +22907,7 @@ pub fn write_cuProfilerInitialize(
writer: &mut (impl std::io::Write + ?Sized),
configFile: *const ::core::ffi::c_char,
outputFile: *const ::core::ffi::c_char,
- outputMode: cuda_types::CUoutput_mode,
+ outputMode: cuda_types::cuda::CUoutput_mode,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -22934,8 +22950,8 @@ pub fn write_cuProfilerStop(
}
pub fn write_cuGraphicsGLRegisterBuffer(
writer: &mut (impl std::io::Write + ?Sized),
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- buffer: cuda_types::GLuint,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ buffer: cuda_types::cuda::GLuint,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -22969,9 +22985,9 @@ pub fn write_cuGraphicsGLRegisterBuffer(
}
pub fn write_cuGraphicsGLRegisterImage(
writer: &mut (impl std::io::Write + ?Sized),
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- image: cuda_types::GLuint,
- target: cuda_types::GLenum,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ image: cuda_types::cuda::GLuint,
+ target: cuda_types::cuda::GLenum,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -23012,7 +23028,7 @@ pub fn write_cuGraphicsGLRegisterImage(
)?;
writer.write_all(b")")
}
-impl crate::format::CudaDisplay for cuda_types::CUGLDeviceList_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUGLDeviceList_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -23020,20 +23036,20 @@ impl crate::format::CudaDisplay for cuda_types::CUGLDeviceList_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUGLDeviceList_enum::CU_GL_DEVICE_LIST_ALL => {
+ &cuda_types::cuda::CUGLDeviceList_enum::CU_GL_DEVICE_LIST_ALL => {
writer.write_all(stringify!(CU_GL_DEVICE_LIST_ALL).as_bytes())
}
- &cuda_types::CUGLDeviceList_enum::CU_GL_DEVICE_LIST_CURRENT_FRAME => {
+ &cuda_types::cuda::CUGLDeviceList_enum::CU_GL_DEVICE_LIST_CURRENT_FRAME => {
writer.write_all(stringify!(CU_GL_DEVICE_LIST_CURRENT_FRAME).as_bytes())
}
- &cuda_types::CUGLDeviceList_enum::CU_GL_DEVICE_LIST_NEXT_FRAME => {
+ &cuda_types::cuda::CUGLDeviceList_enum::CU_GL_DEVICE_LIST_NEXT_FRAME => {
writer.write_all(stringify!(CU_GL_DEVICE_LIST_NEXT_FRAME).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUGLmap_flags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUGLmap_flags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -23041,14 +23057,14 @@ impl crate::format::CudaDisplay for cuda_types::CUGLmap_flags_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUGLmap_flags_enum::CU_GL_MAP_RESOURCE_FLAGS_NONE => {
+ &cuda_types::cuda::CUGLmap_flags_enum::CU_GL_MAP_RESOURCE_FLAGS_NONE => {
writer.write_all(stringify!(CU_GL_MAP_RESOURCE_FLAGS_NONE).as_bytes())
}
- &cuda_types::CUGLmap_flags_enum::CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY => {
+ &cuda_types::cuda::CUGLmap_flags_enum::CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY => {
writer
.write_all(stringify!(CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY).as_bytes())
}
- &cuda_types::CUGLmap_flags_enum::CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD => {
+ &cuda_types::cuda::CUGLmap_flags_enum::CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD => {
writer
.write_all(
stringify!(CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD).as_bytes(),
@@ -23060,9 +23076,9 @@ impl crate::format::CudaDisplay for cuda_types::CUGLmap_flags_enum {
}
pub fn write_cuGLCtxCreate_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pCtx: *mut cuda_types::CUcontext,
+ pCtx: *mut cuda_types::cuda::CUcontext,
Flags: ::core::ffi::c_uint,
- device: cuda_types::CUdevice,
+ device: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -23085,7 +23101,7 @@ pub fn write_cuGLInit(
}
pub fn write_cuGLRegisterBufferObject(
writer: &mut (impl std::io::Write + ?Sized),
- buffer: cuda_types::GLuint,
+ buffer: cuda_types::cuda::GLuint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -23100,9 +23116,9 @@ pub fn write_cuGLRegisterBufferObject(
}
pub fn write_cuGLMapBufferObject_v2_ptds(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
size: *mut usize,
- buffer: cuda_types::GLuint,
+ buffer: cuda_types::cuda::GLuint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -23135,7 +23151,7 @@ pub fn write_cuGLMapBufferObject_v2_ptds(
}
pub fn write_cuGLUnmapBufferObject(
writer: &mut (impl std::io::Write + ?Sized),
- buffer: cuda_types::GLuint,
+ buffer: cuda_types::cuda::GLuint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -23150,7 +23166,7 @@ pub fn write_cuGLUnmapBufferObject(
}
pub fn write_cuGLUnregisterBufferObject(
writer: &mut (impl std::io::Write + ?Sized),
- buffer: cuda_types::GLuint,
+ buffer: cuda_types::cuda::GLuint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -23165,7 +23181,7 @@ pub fn write_cuGLUnregisterBufferObject(
}
pub fn write_cuGLSetBufferObjectMapFlags(
writer: &mut (impl std::io::Write + ?Sized),
- buffer: cuda_types::GLuint,
+ buffer: cuda_types::cuda::GLuint,
Flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -23190,10 +23206,10 @@ pub fn write_cuGLSetBufferObjectMapFlags(
}
pub fn write_cuGLMapBufferObjectAsync_v2_ptsz(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
size: *mut usize,
- buffer: cuda_types::GLuint,
- hStream: cuda_types::CUstream,
+ buffer: cuda_types::cuda::GLuint,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -23235,8 +23251,8 @@ pub fn write_cuGLMapBufferObjectAsync_v2_ptsz(
}
pub fn write_cuGLUnmapBufferObjectAsync(
writer: &mut (impl std::io::Write + ?Sized),
- buffer: cuda_types::GLuint,
- hStream: cuda_types::CUstream,
+ buffer: cuda_types::cuda::GLuint,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -23260,9 +23276,9 @@ pub fn write_cuGLUnmapBufferObjectAsync(
}
pub fn write_cuGLMapBufferObject_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
size: *mut usize,
- buffer: cuda_types::GLuint,
+ buffer: cuda_types::cuda::GLuint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -23285,10 +23301,10 @@ pub fn write_cuGLMapBufferObject_v2(
}
pub fn write_cuGLMapBufferObjectAsync_v2(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
size: *mut usize,
- buffer: cuda_types::GLuint,
- hStream: cuda_types::CUstream,
+ buffer: cuda_types::cuda::GLuint,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -23330,9 +23346,9 @@ pub fn write_cuGLMapBufferObjectAsync_v2(
}
pub fn write_cuGLCtxCreate(
writer: &mut (impl std::io::Write + ?Sized),
- pCtx: *mut cuda_types::CUcontext,
+ pCtx: *mut cuda_types::cuda::CUcontext,
Flags: ::core::ffi::c_uint,
- device: cuda_types::CUdevice,
+ device: cuda_types::cuda::CUdevice,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -23350,9 +23366,9 @@ pub fn write_cuGLCtxCreate(
}
pub fn write_cuGLMapBufferObject(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
size: *mut ::core::ffi::c_uint,
- buffer: cuda_types::GLuint,
+ buffer: cuda_types::cuda::GLuint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -23370,10 +23386,10 @@ pub fn write_cuGLMapBufferObject(
}
pub fn write_cuGLMapBufferObjectAsync(
writer: &mut (impl std::io::Write + ?Sized),
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
size: *mut ::core::ffi::c_uint,
- buffer: cuda_types::GLuint,
- hStream: cuda_types::CUstream,
+ buffer: cuda_types::cuda::GLuint,
+ hStream: cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -23413,7 +23429,7 @@ pub fn write_cuGLMapBufferObjectAsync(
)?;
writer.write_all(b")")
}
-impl crate::format::CudaDisplay for cuda_types::CUeglFrameType_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUeglFrameType_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -23421,17 +23437,17 @@ impl crate::format::CudaDisplay for cuda_types::CUeglFrameType_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUeglFrameType_enum::CU_EGL_FRAME_TYPE_ARRAY => {
+ &cuda_types::cuda::CUeglFrameType_enum::CU_EGL_FRAME_TYPE_ARRAY => {
writer.write_all(stringify!(CU_EGL_FRAME_TYPE_ARRAY).as_bytes())
}
- &cuda_types::CUeglFrameType_enum::CU_EGL_FRAME_TYPE_PITCH => {
+ &cuda_types::cuda::CUeglFrameType_enum::CU_EGL_FRAME_TYPE_PITCH => {
writer.write_all(stringify!(CU_EGL_FRAME_TYPE_PITCH).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUeglResourceLocationFlags_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUeglResourceLocationFlags_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -23439,17 +23455,17 @@ impl crate::format::CudaDisplay for cuda_types::CUeglResourceLocationFlags_enum
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUeglResourceLocationFlags_enum::CU_EGL_RESOURCE_LOCATION_SYSMEM => {
+ &cuda_types::cuda::CUeglResourceLocationFlags_enum::CU_EGL_RESOURCE_LOCATION_SYSMEM => {
writer.write_all(stringify!(CU_EGL_RESOURCE_LOCATION_SYSMEM).as_bytes())
}
- &cuda_types::CUeglResourceLocationFlags_enum::CU_EGL_RESOURCE_LOCATION_VIDMEM => {
+ &cuda_types::cuda::CUeglResourceLocationFlags_enum::CU_EGL_RESOURCE_LOCATION_VIDMEM => {
writer.write_all(stringify!(CU_EGL_RESOURCE_LOCATION_VIDMEM).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUeglColorFormat_enum {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUeglColorFormat_enum {
fn write(
&self,
_fn_name: &'static str,
@@ -23457,512 +23473,512 @@ impl crate::format::CudaDisplay for cuda_types::CUeglColorFormat_enum {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_PLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_PLANAR => {
writer
.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUV420_PLANAR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV422_PLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV422_PLANAR => {
writer
.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUV422_PLANAR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_RGB => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_RGB => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_RGB).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BGR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BGR => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BGR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_ARGB => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_ARGB => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_ARGB).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_RGBA => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_RGBA => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_RGBA).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_L => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_L => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_L).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_R => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_R => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_R).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV444_PLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV444_PLANAR => {
writer
.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUV444_PLANAR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUYV_422 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUYV_422 => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUYV_422).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_UYVY_422 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_UYVY_422 => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_UYVY_422).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_ABGR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_ABGR => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_ABGR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BGRA => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BGRA => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BGRA).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_A => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_A => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_A).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_RG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_RG => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_RG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_AYUV => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_AYUV => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_AYUV).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_VYUY_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_VYUY_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_VYUY_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_UYVY_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_UYVY_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_UYVY_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUYV_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUYV_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUYV_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVYU_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVYU_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YVYU_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUV_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUVA_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUVA_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUVA_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_AYUV_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_AYUV_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_AYUV_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_RGGB => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_RGGB => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_RGGB).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_BGGR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_BGGR => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_BGGR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_GRBG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_GRBG => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_GRBG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_GBRG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_GBRG => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_GBRG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_RGGB => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_RGGB => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER10_RGGB).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_BGGR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_BGGR => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER10_BGGR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_GRBG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_GRBG => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER10_GRBG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_GBRG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_GBRG => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER10_GBRG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_RGGB => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_RGGB => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_RGGB).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_BGGR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_BGGR => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_BGGR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_GRBG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_GRBG => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_GRBG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_GBRG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_GBRG => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_GBRG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER14_RGGB => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER14_RGGB => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER14_RGGB).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER14_BGGR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER14_BGGR => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER14_BGGR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER14_GRBG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER14_GRBG => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER14_GRBG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER14_GBRG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER14_GBRG => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER14_GBRG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER20_RGGB => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER20_RGGB => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER20_RGGB).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER20_BGGR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER20_BGGR => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER20_BGGR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER20_GRBG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER20_GRBG => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER20_GRBG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER20_GBRG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER20_GBRG => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER20_GBRG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU444_PLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU444_PLANAR => {
writer
.write_all(stringify!(CU_EGL_COLOR_FORMAT_YVU444_PLANAR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU422_PLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU422_PLANAR => {
writer
.write_all(stringify!(CU_EGL_COLOR_FORMAT_YVU422_PLANAR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_PLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_PLANAR => {
writer
.write_all(stringify!(CU_EGL_COLOR_FORMAT_YVU420_PLANAR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB => {
writer
.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR => {
writer
.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG => {
writer
.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG => {
writer
.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_BCCR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_BCCR => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_BCCR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_RCCB => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_RCCB => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_RCCB).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_CRBC => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_CRBC => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_CRBC).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_CBRC => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_CBRC => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_CBRC).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_CCCC => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_CCCC => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER10_CCCC).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_BCCR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_BCCR => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_BCCR).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_RCCB => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_RCCB => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_RCCB).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_CRBC => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_CRBC => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_CRBC).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_CBRC => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_CBRC => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_CBRC).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_CCCC => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_CCCC => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_CCCC).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709).as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y_709_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y_709_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y_709_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y10_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10_709_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10_709_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y10_709_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y12_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12_709_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12_709_ER => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y12_709_ER).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUVA => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUVA => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUVA).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUV).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVYU => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVYU => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YVYU).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_VYUY => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_VYUY => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_VYUY).as_bytes())
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER => {
writer
.write_all(
stringify!(CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER)
.as_bytes(),
)
}
- &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_MAX => {
+ &cuda_types::cuda::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_MAX => {
writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_MAX).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::CUeglStreamConnection {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUeglStreamConnection {
fn write(
&self,
_fn_name: &'static str,
@@ -23974,8 +23990,8 @@ impl crate::format::CudaDisplay for cuda_types::CUeglStreamConnection {
}
pub fn write_cuGraphicsEGLRegisterImage(
writer: &mut (impl std::io::Write + ?Sized),
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- image: cuda_types::EGLImageKHR,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ image: cuda_types::cuda::EGLImageKHR,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -24009,8 +24025,8 @@ pub fn write_cuGraphicsEGLRegisterImage(
}
pub fn write_cuEGLStreamConsumerConnect(
writer: &mut (impl std::io::Write + ?Sized),
- conn: *mut cuda_types::CUeglStreamConnection,
- stream: cuda_types::EGLStreamKHR,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ stream: cuda_types::cuda::EGLStreamKHR,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -24034,8 +24050,8 @@ pub fn write_cuEGLStreamConsumerConnect(
}
pub fn write_cuEGLStreamConsumerConnectWithFlags(
writer: &mut (impl std::io::Write + ?Sized),
- conn: *mut cuda_types::CUeglStreamConnection,
- stream: cuda_types::EGLStreamKHR,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ stream: cuda_types::cuda::EGLStreamKHR,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -24069,7 +24085,7 @@ pub fn write_cuEGLStreamConsumerConnectWithFlags(
}
pub fn write_cuEGLStreamConsumerDisconnect(
writer: &mut (impl std::io::Write + ?Sized),
- conn: *mut cuda_types::CUeglStreamConnection,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -24084,9 +24100,9 @@ pub fn write_cuEGLStreamConsumerDisconnect(
}
pub fn write_cuEGLStreamConsumerAcquireFrame(
writer: &mut (impl std::io::Write + ?Sized),
- conn: *mut cuda_types::CUeglStreamConnection,
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- pStream: *mut cuda_types::CUstream,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ pStream: *mut cuda_types::cuda::CUstream,
timeout: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -24129,9 +24145,9 @@ pub fn write_cuEGLStreamConsumerAcquireFrame(
}
pub fn write_cuEGLStreamConsumerReleaseFrame(
writer: &mut (impl std::io::Write + ?Sized),
- conn: *mut cuda_types::CUeglStreamConnection,
- pCudaResource: cuda_types::CUgraphicsResource,
- pStream: *mut cuda_types::CUstream,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ pCudaResource: cuda_types::cuda::CUgraphicsResource,
+ pStream: *mut cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -24164,10 +24180,10 @@ pub fn write_cuEGLStreamConsumerReleaseFrame(
}
pub fn write_cuEGLStreamProducerConnect(
writer: &mut (impl std::io::Write + ?Sized),
- conn: *mut cuda_types::CUeglStreamConnection,
- stream: cuda_types::EGLStreamKHR,
- width: cuda_types::EGLint,
- height: cuda_types::EGLint,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ stream: cuda_types::cuda::EGLStreamKHR,
+ width: cuda_types::cuda::EGLint,
+ height: cuda_types::cuda::EGLint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -24209,7 +24225,7 @@ pub fn write_cuEGLStreamProducerConnect(
}
pub fn write_cuEGLStreamProducerDisconnect(
writer: &mut (impl std::io::Write + ?Sized),
- conn: *mut cuda_types::CUeglStreamConnection,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -24224,9 +24240,9 @@ pub fn write_cuEGLStreamProducerDisconnect(
}
pub fn write_cuEGLStreamProducerPresentFrame(
writer: &mut (impl std::io::Write + ?Sized),
- conn: *mut cuda_types::CUeglStreamConnection,
- eglframe: cuda_types::CUeglFrame,
- pStream: *mut cuda_types::CUstream,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ eglframe: cuda_types::cuda::CUeglFrame,
+ pStream: *mut cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -24259,9 +24275,9 @@ pub fn write_cuEGLStreamProducerPresentFrame(
}
pub fn write_cuEGLStreamProducerReturnFrame(
writer: &mut (impl std::io::Write + ?Sized),
- conn: *mut cuda_types::CUeglStreamConnection,
- eglframe: *mut cuda_types::CUeglFrame,
- pStream: *mut cuda_types::CUstream,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ eglframe: *mut cuda_types::cuda::CUeglFrame,
+ pStream: *mut cuda_types::cuda::CUstream,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -24294,8 +24310,8 @@ pub fn write_cuEGLStreamProducerReturnFrame(
}
pub fn write_cuGraphicsResourceGetMappedEglFrame(
writer: &mut (impl std::io::Write + ?Sized),
- eglFrame: *mut cuda_types::CUeglFrame,
- resource: cuda_types::CUgraphicsResource,
+ eglFrame: *mut cuda_types::cuda::CUeglFrame,
+ resource: cuda_types::cuda::CUgraphicsResource,
index: ::core::ffi::c_uint,
mipLevel: ::core::ffi::c_uint,
) -> std::io::Result<()> {
@@ -24339,8 +24355,8 @@ pub fn write_cuGraphicsResourceGetMappedEglFrame(
}
pub fn write_cuEventCreateFromEGLSync(
writer: &mut (impl std::io::Write + ?Sized),
- phEvent: *mut cuda_types::CUevent,
- eglSync: cuda_types::EGLSyncKHR,
+ phEvent: *mut cuda_types::cuda::CUevent,
+ eglSync: cuda_types::cuda::EGLSyncKHR,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -24372,7 +24388,7 @@ pub fn write_cuEventCreateFromEGLSync(
)?;
writer.write_all(b")")
}
-impl crate::format::CudaDisplay for cuda_types::VdpStatus {
+impl crate::format::CudaDisplay for cuda_types::cuda::VdpStatus {
fn write(
&self,
_fn_name: &'static str,
@@ -24380,112 +24396,112 @@ impl crate::format::CudaDisplay for cuda_types::VdpStatus {
writer: &mut (impl std::io::Write + ?Sized),
) -> std::io::Result<()> {
match self {
- &cuda_types::VdpStatus::VDP_STATUS_OK => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_OK => {
writer.write_all(stringify!(VDP_STATUS_OK).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_NO_IMPLEMENTATION => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_NO_IMPLEMENTATION => {
writer.write_all(stringify!(VDP_STATUS_NO_IMPLEMENTATION).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_DISPLAY_PREEMPTED => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_DISPLAY_PREEMPTED => {
writer.write_all(stringify!(VDP_STATUS_DISPLAY_PREEMPTED).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_HANDLE => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_HANDLE => {
writer.write_all(stringify!(VDP_STATUS_INVALID_HANDLE).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_POINTER => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_POINTER => {
writer.write_all(stringify!(VDP_STATUS_INVALID_POINTER).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_CHROMA_TYPE => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_CHROMA_TYPE => {
writer.write_all(stringify!(VDP_STATUS_INVALID_CHROMA_TYPE).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_Y_CB_CR_FORMAT => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_Y_CB_CR_FORMAT => {
writer
.write_all(stringify!(VDP_STATUS_INVALID_Y_CB_CR_FORMAT).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_RGBA_FORMAT => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_RGBA_FORMAT => {
writer.write_all(stringify!(VDP_STATUS_INVALID_RGBA_FORMAT).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_INDEXED_FORMAT => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_INDEXED_FORMAT => {
writer
.write_all(stringify!(VDP_STATUS_INVALID_INDEXED_FORMAT).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_COLOR_STANDARD => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_COLOR_STANDARD => {
writer
.write_all(stringify!(VDP_STATUS_INVALID_COLOR_STANDARD).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_COLOR_TABLE_FORMAT => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_COLOR_TABLE_FORMAT => {
writer
.write_all(
stringify!(VDP_STATUS_INVALID_COLOR_TABLE_FORMAT).as_bytes(),
)
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_BLEND_FACTOR => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_BLEND_FACTOR => {
writer.write_all(stringify!(VDP_STATUS_INVALID_BLEND_FACTOR).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_BLEND_EQUATION => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_BLEND_EQUATION => {
writer
.write_all(stringify!(VDP_STATUS_INVALID_BLEND_EQUATION).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_FLAG => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_FLAG => {
writer.write_all(stringify!(VDP_STATUS_INVALID_FLAG).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_DECODER_PROFILE => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_DECODER_PROFILE => {
writer
.write_all(stringify!(VDP_STATUS_INVALID_DECODER_PROFILE).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE => {
writer
.write_all(
stringify!(VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE).as_bytes(),
)
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER => {
writer
.write_all(
stringify!(VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER).as_bytes(),
)
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE => {
writer
.write_all(
stringify!(VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE).as_bytes(),
)
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE => {
writer
.write_all(
stringify!(VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE)
.as_bytes(),
)
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_FUNC_ID => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_FUNC_ID => {
writer.write_all(stringify!(VDP_STATUS_INVALID_FUNC_ID).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_SIZE => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_SIZE => {
writer.write_all(stringify!(VDP_STATUS_INVALID_SIZE).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_VALUE => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_VALUE => {
writer.write_all(stringify!(VDP_STATUS_INVALID_VALUE).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_INVALID_STRUCT_VERSION => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_INVALID_STRUCT_VERSION => {
writer
.write_all(stringify!(VDP_STATUS_INVALID_STRUCT_VERSION).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_RESOURCES => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_RESOURCES => {
writer.write_all(stringify!(VDP_STATUS_RESOURCES).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_HANDLE_DEVICE_MISMATCH => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_HANDLE_DEVICE_MISMATCH => {
writer
.write_all(stringify!(VDP_STATUS_HANDLE_DEVICE_MISMATCH).as_bytes())
}
- &cuda_types::VdpStatus::VDP_STATUS_ERROR => {
+ &cuda_types::cuda::VdpStatus::VDP_STATUS_ERROR => {
writer.write_all(stringify!(VDP_STATUS_ERROR).as_bytes())
}
_ => write!(writer, "{}", self.0),
}
}
}
-impl crate::format::CudaDisplay for cuda_types::VdpGetProcAddress {
+impl crate::format::CudaDisplay for cuda_types::cuda::VdpGetProcAddress {
fn write(
&self,
_fn_name: &'static str,
@@ -24497,7 +24513,7 @@ impl crate::format::CudaDisplay for cuda_types::VdpGetProcAddress {
"{:p}",
unsafe {
std::mem::transmute::<
- cuda_types::VdpGetProcAddress,
+ cuda_types::cuda::VdpGetProcAddress,
*mut ::std::ffi::c_void,
>(*self)
},
@@ -24506,9 +24522,9 @@ impl crate::format::CudaDisplay for cuda_types::VdpGetProcAddress {
}
pub fn write_cuVDPAUGetDevice(
writer: &mut (impl std::io::Write + ?Sized),
- pDevice: *mut cuda_types::CUdevice,
- vdpDevice: cuda_types::VdpDevice,
- vdpGetProcAddress: cuda_types::VdpGetProcAddress,
+ pDevice: *mut cuda_types::cuda::CUdevice,
+ vdpDevice: cuda_types::cuda::VdpDevice,
+ vdpGetProcAddress: cuda_types::cuda::VdpGetProcAddress,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -24531,11 +24547,11 @@ pub fn write_cuVDPAUGetDevice(
}
pub fn write_cuVDPAUCtxCreate_v2(
writer: &mut (impl std::io::Write + ?Sized),
- pCtx: *mut cuda_types::CUcontext,
+ pCtx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- device: cuda_types::CUdevice,
- vdpDevice: cuda_types::VdpDevice,
- vdpGetProcAddress: cuda_types::VdpGetProcAddress,
+ device: cuda_types::cuda::CUdevice,
+ vdpDevice: cuda_types::cuda::VdpDevice,
+ vdpGetProcAddress: cuda_types::cuda::VdpGetProcAddress,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -24571,8 +24587,8 @@ pub fn write_cuVDPAUCtxCreate_v2(
}
pub fn write_cuGraphicsVDPAURegisterVideoSurface(
writer: &mut (impl std::io::Write + ?Sized),
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- vdpSurface: cuda_types::VdpVideoSurface,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ vdpSurface: cuda_types::cuda::VdpVideoSurface,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -24606,8 +24622,8 @@ pub fn write_cuGraphicsVDPAURegisterVideoSurface(
}
pub fn write_cuGraphicsVDPAURegisterOutputSurface(
writer: &mut (impl std::io::Write + ?Sized),
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- vdpSurface: cuda_types::VdpOutputSurface,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ vdpSurface: cuda_types::cuda::VdpOutputSurface,
flags: ::core::ffi::c_uint,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
@@ -24641,11 +24657,11 @@ pub fn write_cuGraphicsVDPAURegisterOutputSurface(
}
pub fn write_cuVDPAUCtxCreate(
writer: &mut (impl std::io::Write + ?Sized),
- pCtx: *mut cuda_types::CUcontext,
+ pCtx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- device: cuda_types::CUdevice,
- vdpDevice: cuda_types::VdpDevice,
- vdpGetProcAddress: cuda_types::VdpGetProcAddress,
+ device: cuda_types::cuda::CUdevice,
+ vdpDevice: cuda_types::cuda::VdpDevice,
+ vdpGetProcAddress: cuda_types::cuda::VdpGetProcAddress,
) -> std::io::Result<()> {
let mut arg_idx = 0usize;
writer.write_all(b"(")?;
@@ -24674,7 +24690,7 @@ pub fn write_cuVDPAUCtxCreate(
)?;
writer.write_all(b")")
}
-impl crate::format::CudaDisplay for cuda_types::CUresult {
+impl crate::format::CudaDisplay for cuda_types::cuda::CUresult {
fn write(
&self,
_fn_name: &'static str,
diff --git a/zluda_dump/src/lib.rs b/zluda_dump/src/lib.rs
index c2dff42..4834690 100644
--- a/zluda_dump/src/lib.rs
+++ b/zluda_dump/src/lib.rs
@@ -1,4 +1,4 @@
-use cuda_types::*;
+use cuda_types::cuda::*;
use paste::paste;
use side_by_side::CudaDynamicFns;
use std::io;
@@ -9,7 +9,7 @@ extern crate lazy_static;
extern crate cuda_types;
macro_rules! extern_redirect {
- ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path;)*) => {
+ ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:ty;)*) => {
$(
#[no_mangle]
#[allow(improper_ctypes_definitions)]
@@ -30,7 +30,7 @@ macro_rules! extern_redirect {
}
macro_rules! extern_redirect_with_post {
- ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path;)*) => {
+ ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:ty;)*) => {
$(
#[no_mangle]
#[allow(improper_ctypes_definitions)]
diff --git a/zluda_dump/src/log.rs b/zluda_dump/src/log.rs
index 384bf63..484364c 100644
--- a/zluda_dump/src/log.rs
+++ b/zluda_dump/src/log.rs
@@ -1,8 +1,5 @@
use crate::format;
-use cuda_types::CUmodule;
-use cuda_types::CUuuid;
-
-use super::CUresult;
+use cuda_types::cuda::*;
use super::Settings;
use std::error::Error;
use std::ffi::c_void;
diff --git a/zluda_dump/src/os_unix.rs b/zluda_dump/src/os_unix.rs
index a4f29b2..e3d9d79 100644
--- a/zluda_dump/src/os_unix.rs
+++ b/zluda_dump/src/os_unix.rs
@@ -1,4 +1,4 @@
-use cuda_types::CUuuid;
+use cuda_types::cuda::CUuuid;
use std::ffi::{c_void, CStr, CString};
use std::mem;
diff --git a/zluda_dump/src/side_by_side.rs b/zluda_dump/src/side_by_side.rs
index 11789ca..678f026 100644
--- a/zluda_dump/src/side_by_side.rs
+++ b/zluda_dump/src/side_by_side.rs
@@ -56,7 +56,7 @@ impl CudaDynamicFns {
}
macro_rules! emit_cuda_fn_table {
- ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path;)*) => {
+ ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:ty;)*) => {
#[derive(Default)]
#[allow(improper_ctypes)]
#[allow(improper_ctypes_definitions)]
diff --git a/zluda_dump/src/trace.rs b/zluda_dump/src/trace.rs
index 9002aca..064b51a 100644
--- a/zluda_dump/src/trace.rs
+++ b/zluda_dump/src/trace.rs
@@ -1,5 +1,5 @@
use crate::{dark_api, log, Settings};
-use cuda_types::CUmodule;
+use cuda_types::cuda::*;
use std::{
collections::HashMap,
ffi::{c_void, CStr, CString},
diff --git a/zluda_inject/Cargo.toml b/zluda_inject/Cargo.toml
index 65113a4..20e2e2d 100644
--- a/zluda_inject/Cargo.toml
+++ b/zluda_inject/Cargo.toml
@@ -9,7 +9,7 @@ name = "zluda_with"
path = "src/main.rs"
[target.'cfg(windows)'.dependencies]
-winapi = { version = "0.3", features = ["jobapi2", "processthreadsapi", "synchapi", "winbase", "std"] }
+winapi = { version = "0.3", features = ["jobapi2", "processthreadsapi", "synchapi", "winbase", "std", "processenv"] }
tempfile = "3"
argh = "0.1"
detours-sys = { path = "../detours-sys" }
diff --git a/zluda_inject/build.rs b/zluda_inject/build.rs
index ccce573..c79d2d2 100644
--- a/zluda_inject/build.rs
+++ b/zluda_inject/build.rs
@@ -7,6 +7,9 @@ use std::{
};
fn main() -> Result<(), VarError> {
+ if std::env::var_os("CARGO_CFG_WINDOWS").is_none() {
+ return Ok(());
+ }
println!("cargo:rerun-if-changed=build.rs");
if env::var("PROFILE")? != "debug" {
return Ok(());
diff --git a/zluda_inject/tests/helpers/do_cuinit_early.rs b/zluda_inject/tests/helpers/do_cuinit_early.rs
index 9743f4a..7d10855 100644
--- a/zluda_inject/tests/helpers/do_cuinit_early.rs
+++ b/zluda_inject/tests/helpers/do_cuinit_early.rs
@@ -1,6 +1,6 @@
#![crate_type = "bin"]
-#[link(name = "do_cuinit")]
+#[link(name = "do_cuinit", kind = "raw-dylib")]
extern "system" {
fn do_cuinit(flags: u32) -> u32;
}
diff --git a/zluda_ml/Cargo.toml b/zluda_ml/Cargo.toml
index d2e2378..71a0331 100644
--- a/zluda_ml/Cargo.toml
+++ b/zluda_ml/Cargo.toml
@@ -7,3 +7,7 @@ edition = "2021"
[lib]
name = "nvml"
crate-type = ["cdylib"]
+
+[dependencies]
+cuda_base = { path = "../cuda_base" }
+cuda_types = { path = "../cuda_types" }
diff --git a/zluda_ml/README b/zluda_ml/README
deleted file mode 100644
index 60a59ad..0000000
--- a/zluda_ml/README
+++ /dev/null
@@ -1,3 +0,0 @@
-bindgen "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.0\include\nvml.h" --whitelist-function="^nvml.*" --size_t-is-usize --default-enum-style=newtype --no-layout-tests --no-doc-comments --no-derive-debug -o src/nvml.rs
-sed -i -e 's/extern "C" {//g' -e 's/-> nvmlReturn_t;/-> nvmlReturn_t { crate::r#impl::unimplemented()/g' -e 's/pub fn /#[no_mangle] pub extern "C" fn /g' src/nvml.rs
-rustfmt src/nvml.rs \ No newline at end of file
diff --git a/zluda_ml/src/impl.rs b/zluda_ml/src/impl.rs
index 35f3778..818d711 100644
--- a/zluda_ml/src/impl.rs
+++ b/zluda_ml/src/impl.rs
@@ -1,4 +1,5 @@
-use crate::nvml::nvmlReturn_t;
+use cuda_types::nvml::*;
+use std::{ffi::CStr, ptr};
#[cfg(debug_assertions)]
pub(crate) fn unimplemented() -> nvmlReturn_t {
@@ -9,3 +10,35 @@ pub(crate) fn unimplemented() -> nvmlReturn_t {
pub(crate) fn unimplemented() -> nvmlReturn_t {
nvmlReturn_t::NVML_ERROR_NOT_SUPPORTED
}
+
+#[allow(non_snake_case)]
+pub(crate) fn nvmlErrorString(
+ _result: cuda_types::nvml::nvmlReturn_t,
+) -> *const ::core::ffi::c_char {
+ c"".as_ptr()
+}
+
+#[allow(non_snake_case)]
+pub(crate) fn nvmlInit_v2() -> cuda_types::nvml::nvmlReturn_t {
+ nvmlReturn_t::SUCCESS
+}
+
+const VERSION: &'static CStr = c"550.77";
+
+#[allow(non_snake_case)]
+pub(crate) fn nvmlSystemGetDriverVersion(
+ result: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+) -> cuda_types::nvml::nvmlReturn_t {
+ if result == ptr::null_mut() {
+ return nvmlReturn_t::ERROR_INVALID_ARGUMENT;
+ }
+ let version = VERSION.to_bytes_with_nul();
+ let copy_length = usize::min(length as usize, version.len());
+ let slice = unsafe { std::slice::from_raw_parts_mut(result.cast(), copy_length) };
+ slice.copy_from_slice(&version[..copy_length]);
+ if let Some(null) = slice.last_mut() {
+ *null = 0;
+ }
+ nvmlReturn_t::SUCCESS
+}
diff --git a/zluda_ml/src/lib.rs b/zluda_ml/src/lib.rs
index bb62334..690ed67 100644
--- a/zluda_ml/src/lib.rs
+++ b/zluda_ml/src/lib.rs
@@ -1,3 +1,34 @@
-pub mod r#impl;
-#[allow(warnings)]
-mod nvml; \ No newline at end of file
+mod r#impl;
+
+macro_rules! unimplemented_fn {
+ ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:ty;)*) => {
+ $(
+ #[no_mangle]
+ #[allow(improper_ctypes_definitions)]
+ pub extern $abi fn $fn_name ( $( $arg_id : $arg_type),* ) -> $ret_type {
+ r#impl::unimplemented()
+ }
+ )*
+ };
+}
+
+macro_rules! implemented_fn {
+ ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:ty;)*) => {
+ $(
+ #[no_mangle]
+ #[allow(improper_ctypes_definitions)]
+ pub extern $abi fn $fn_name ( $( $arg_id : $arg_type),* ) -> $ret_type {
+ r#impl::$fn_name($($arg_id),*)
+ }
+ )*
+ };
+}
+
+cuda_base::nvml_function_declarations!(
+ unimplemented_fn,
+ implemented_fn <= [
+ nvmlErrorString,
+ nvmlInit_v2,
+ nvmlSystemGetDriverVersion
+ ]
+);
diff --git a/zluda_ml/src/nvml.rs b/zluda_ml/src/nvml.rs
deleted file mode 100644
index 0358bc6..0000000
--- a/zluda_ml/src/nvml.rs
+++ /dev/null
@@ -1,3171 +0,0 @@
-/* automatically generated by rust-bindgen 0.57.0 */
-
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlDevice_st {
- _unused: [u8; 0],
-}
-pub type nvmlDevice_t = *mut nvmlDevice_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlPciInfo_st {
- pub busIdLegacy: [::std::os::raw::c_char; 16usize],
- pub domain: ::std::os::raw::c_uint,
- pub bus: ::std::os::raw::c_uint,
- pub device: ::std::os::raw::c_uint,
- pub pciDeviceId: ::std::os::raw::c_uint,
- pub pciSubSystemId: ::std::os::raw::c_uint,
- pub busId: [::std::os::raw::c_char; 32usize],
-}
-pub type nvmlPciInfo_t = nvmlPciInfo_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlEccErrorCounts_st {
- pub l1Cache: ::std::os::raw::c_ulonglong,
- pub l2Cache: ::std::os::raw::c_ulonglong,
- pub deviceMemory: ::std::os::raw::c_ulonglong,
- pub registerFile: ::std::os::raw::c_ulonglong,
-}
-pub type nvmlEccErrorCounts_t = nvmlEccErrorCounts_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlUtilization_st {
- pub gpu: ::std::os::raw::c_uint,
- pub memory: ::std::os::raw::c_uint,
-}
-pub type nvmlUtilization_t = nvmlUtilization_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlMemory_st {
- pub total: ::std::os::raw::c_ulonglong,
- pub free: ::std::os::raw::c_ulonglong,
- pub used: ::std::os::raw::c_ulonglong,
-}
-pub type nvmlMemory_t = nvmlMemory_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlBAR1Memory_st {
- pub bar1Total: ::std::os::raw::c_ulonglong,
- pub bar1Free: ::std::os::raw::c_ulonglong,
- pub bar1Used: ::std::os::raw::c_ulonglong,
-}
-pub type nvmlBAR1Memory_t = nvmlBAR1Memory_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlProcessInfo_st {
- pub pid: ::std::os::raw::c_uint,
- pub usedGpuMemory: ::std::os::raw::c_ulonglong,
-}
-pub type nvmlProcessInfo_t = nvmlProcessInfo_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlDeviceAttributes_st {
- pub multiprocessorCount: ::std::os::raw::c_uint,
- pub sharedCopyEngineCount: ::std::os::raw::c_uint,
- pub sharedDecoderCount: ::std::os::raw::c_uint,
- pub sharedEncoderCount: ::std::os::raw::c_uint,
- pub sharedJpegCount: ::std::os::raw::c_uint,
- pub sharedOfaCount: ::std::os::raw::c_uint,
-}
-pub type nvmlDeviceAttributes_t = nvmlDeviceAttributes_st;
-impl nvmlBridgeChipType_enum {
- pub const NVML_BRIDGE_CHIP_PLX: nvmlBridgeChipType_enum = nvmlBridgeChipType_enum(0);
-}
-impl nvmlBridgeChipType_enum {
- pub const NVML_BRIDGE_CHIP_BRO4: nvmlBridgeChipType_enum = nvmlBridgeChipType_enum(1);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlBridgeChipType_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlBridgeChipType_enum as nvmlBridgeChipType_t;
-impl nvmlNvLinkUtilizationCountUnits_enum {
- pub const NVML_NVLINK_COUNTER_UNIT_CYCLES: nvmlNvLinkUtilizationCountUnits_enum =
- nvmlNvLinkUtilizationCountUnits_enum(0);
-}
-impl nvmlNvLinkUtilizationCountUnits_enum {
- pub const NVML_NVLINK_COUNTER_UNIT_PACKETS: nvmlNvLinkUtilizationCountUnits_enum =
- nvmlNvLinkUtilizationCountUnits_enum(1);
-}
-impl nvmlNvLinkUtilizationCountUnits_enum {
- pub const NVML_NVLINK_COUNTER_UNIT_BYTES: nvmlNvLinkUtilizationCountUnits_enum =
- nvmlNvLinkUtilizationCountUnits_enum(2);
-}
-impl nvmlNvLinkUtilizationCountUnits_enum {
- pub const NVML_NVLINK_COUNTER_UNIT_RESERVED: nvmlNvLinkUtilizationCountUnits_enum =
- nvmlNvLinkUtilizationCountUnits_enum(3);
-}
-impl nvmlNvLinkUtilizationCountUnits_enum {
- pub const NVML_NVLINK_COUNTER_UNIT_COUNT: nvmlNvLinkUtilizationCountUnits_enum =
- nvmlNvLinkUtilizationCountUnits_enum(4);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlNvLinkUtilizationCountUnits_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlNvLinkUtilizationCountUnits_enum as nvmlNvLinkUtilizationCountUnits_t;
-impl nvmlNvLinkUtilizationCountPktTypes_enum {
- pub const NVML_NVLINK_COUNTER_PKTFILTER_NOP: nvmlNvLinkUtilizationCountPktTypes_enum =
- nvmlNvLinkUtilizationCountPktTypes_enum(1);
-}
-impl nvmlNvLinkUtilizationCountPktTypes_enum {
- pub const NVML_NVLINK_COUNTER_PKTFILTER_READ: nvmlNvLinkUtilizationCountPktTypes_enum =
- nvmlNvLinkUtilizationCountPktTypes_enum(2);
-}
-impl nvmlNvLinkUtilizationCountPktTypes_enum {
- pub const NVML_NVLINK_COUNTER_PKTFILTER_WRITE: nvmlNvLinkUtilizationCountPktTypes_enum =
- nvmlNvLinkUtilizationCountPktTypes_enum(4);
-}
-impl nvmlNvLinkUtilizationCountPktTypes_enum {
- pub const NVML_NVLINK_COUNTER_PKTFILTER_RATOM: nvmlNvLinkUtilizationCountPktTypes_enum =
- nvmlNvLinkUtilizationCountPktTypes_enum(8);
-}
-impl nvmlNvLinkUtilizationCountPktTypes_enum {
- pub const NVML_NVLINK_COUNTER_PKTFILTER_NRATOM: nvmlNvLinkUtilizationCountPktTypes_enum =
- nvmlNvLinkUtilizationCountPktTypes_enum(16);
-}
-impl nvmlNvLinkUtilizationCountPktTypes_enum {
- pub const NVML_NVLINK_COUNTER_PKTFILTER_FLUSH: nvmlNvLinkUtilizationCountPktTypes_enum =
- nvmlNvLinkUtilizationCountPktTypes_enum(32);
-}
-impl nvmlNvLinkUtilizationCountPktTypes_enum {
- pub const NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA: nvmlNvLinkUtilizationCountPktTypes_enum =
- nvmlNvLinkUtilizationCountPktTypes_enum(64);
-}
-impl nvmlNvLinkUtilizationCountPktTypes_enum {
- pub const NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA: nvmlNvLinkUtilizationCountPktTypes_enum =
- nvmlNvLinkUtilizationCountPktTypes_enum(128);
-}
-impl nvmlNvLinkUtilizationCountPktTypes_enum {
- pub const NVML_NVLINK_COUNTER_PKTFILTER_ALL: nvmlNvLinkUtilizationCountPktTypes_enum =
- nvmlNvLinkUtilizationCountPktTypes_enum(255);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlNvLinkUtilizationCountPktTypes_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlNvLinkUtilizationCountPktTypes_enum as nvmlNvLinkUtilizationCountPktTypes_t;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlNvLinkUtilizationControl_st {
- pub units: nvmlNvLinkUtilizationCountUnits_t,
- pub pktfilter: nvmlNvLinkUtilizationCountPktTypes_t,
-}
-pub type nvmlNvLinkUtilizationControl_t = nvmlNvLinkUtilizationControl_st;
-impl nvmlNvLinkCapability_enum {
- pub const NVML_NVLINK_CAP_P2P_SUPPORTED: nvmlNvLinkCapability_enum =
- nvmlNvLinkCapability_enum(0);
-}
-impl nvmlNvLinkCapability_enum {
- pub const NVML_NVLINK_CAP_SYSMEM_ACCESS: nvmlNvLinkCapability_enum =
- nvmlNvLinkCapability_enum(1);
-}
-impl nvmlNvLinkCapability_enum {
- pub const NVML_NVLINK_CAP_P2P_ATOMICS: nvmlNvLinkCapability_enum = nvmlNvLinkCapability_enum(2);
-}
-impl nvmlNvLinkCapability_enum {
- pub const NVML_NVLINK_CAP_SYSMEM_ATOMICS: nvmlNvLinkCapability_enum =
- nvmlNvLinkCapability_enum(3);
-}
-impl nvmlNvLinkCapability_enum {
- pub const NVML_NVLINK_CAP_SLI_BRIDGE: nvmlNvLinkCapability_enum = nvmlNvLinkCapability_enum(4);
-}
-impl nvmlNvLinkCapability_enum {
- pub const NVML_NVLINK_CAP_VALID: nvmlNvLinkCapability_enum = nvmlNvLinkCapability_enum(5);
-}
-impl nvmlNvLinkCapability_enum {
- pub const NVML_NVLINK_CAP_COUNT: nvmlNvLinkCapability_enum = nvmlNvLinkCapability_enum(6);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlNvLinkCapability_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlNvLinkCapability_enum as nvmlNvLinkCapability_t;
-impl nvmlNvLinkErrorCounter_enum {
- pub const NVML_NVLINK_ERROR_DL_REPLAY: nvmlNvLinkErrorCounter_enum =
- nvmlNvLinkErrorCounter_enum(0);
-}
-impl nvmlNvLinkErrorCounter_enum {
- pub const NVML_NVLINK_ERROR_DL_RECOVERY: nvmlNvLinkErrorCounter_enum =
- nvmlNvLinkErrorCounter_enum(1);
-}
-impl nvmlNvLinkErrorCounter_enum {
- pub const NVML_NVLINK_ERROR_DL_CRC_FLIT: nvmlNvLinkErrorCounter_enum =
- nvmlNvLinkErrorCounter_enum(2);
-}
-impl nvmlNvLinkErrorCounter_enum {
- pub const NVML_NVLINK_ERROR_DL_CRC_DATA: nvmlNvLinkErrorCounter_enum =
- nvmlNvLinkErrorCounter_enum(3);
-}
-impl nvmlNvLinkErrorCounter_enum {
- pub const NVML_NVLINK_ERROR_COUNT: nvmlNvLinkErrorCounter_enum = nvmlNvLinkErrorCounter_enum(4);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlNvLinkErrorCounter_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlNvLinkErrorCounter_enum as nvmlNvLinkErrorCounter_t;
-impl nvmlGpuLevel_enum {
- pub const NVML_TOPOLOGY_INTERNAL: nvmlGpuLevel_enum = nvmlGpuLevel_enum(0);
-}
-impl nvmlGpuLevel_enum {
- pub const NVML_TOPOLOGY_SINGLE: nvmlGpuLevel_enum = nvmlGpuLevel_enum(10);
-}
-impl nvmlGpuLevel_enum {
- pub const NVML_TOPOLOGY_MULTIPLE: nvmlGpuLevel_enum = nvmlGpuLevel_enum(20);
-}
-impl nvmlGpuLevel_enum {
- pub const NVML_TOPOLOGY_HOSTBRIDGE: nvmlGpuLevel_enum = nvmlGpuLevel_enum(30);
-}
-impl nvmlGpuLevel_enum {
- pub const NVML_TOPOLOGY_NODE: nvmlGpuLevel_enum = nvmlGpuLevel_enum(40);
-}
-impl nvmlGpuLevel_enum {
- pub const NVML_TOPOLOGY_SYSTEM: nvmlGpuLevel_enum = nvmlGpuLevel_enum(50);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlGpuLevel_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlGpuLevel_enum as nvmlGpuTopologyLevel_t;
-impl nvmlGpuP2PStatus_enum {
- pub const NVML_P2P_STATUS_OK: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(0);
-}
-impl nvmlGpuP2PStatus_enum {
- pub const NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED: nvmlGpuP2PStatus_enum =
- nvmlGpuP2PStatus_enum(1);
-}
-impl nvmlGpuP2PStatus_enum {
- pub const NVML_P2P_STATUS_GPU_NOT_SUPPORTED: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(2);
-}
-impl nvmlGpuP2PStatus_enum {
- pub const NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED: nvmlGpuP2PStatus_enum =
- nvmlGpuP2PStatus_enum(3);
-}
-impl nvmlGpuP2PStatus_enum {
- pub const NVML_P2P_STATUS_DISABLED_BY_REGKEY: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(4);
-}
-impl nvmlGpuP2PStatus_enum {
- pub const NVML_P2P_STATUS_NOT_SUPPORTED: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(5);
-}
-impl nvmlGpuP2PStatus_enum {
- pub const NVML_P2P_STATUS_UNKNOWN: nvmlGpuP2PStatus_enum = nvmlGpuP2PStatus_enum(6);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlGpuP2PStatus_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlGpuP2PStatus_enum as nvmlGpuP2PStatus_t;
-impl nvmlGpuP2PCapsIndex_enum {
- pub const NVML_P2P_CAPS_INDEX_READ: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(0);
-}
-impl nvmlGpuP2PCapsIndex_enum {
- pub const NVML_P2P_CAPS_INDEX_WRITE: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(1);
-}
-impl nvmlGpuP2PCapsIndex_enum {
- pub const NVML_P2P_CAPS_INDEX_NVLINK: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(2);
-}
-impl nvmlGpuP2PCapsIndex_enum {
- pub const NVML_P2P_CAPS_INDEX_ATOMICS: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(3);
-}
-impl nvmlGpuP2PCapsIndex_enum {
- pub const NVML_P2P_CAPS_INDEX_PROP: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(4);
-}
-impl nvmlGpuP2PCapsIndex_enum {
- pub const NVML_P2P_CAPS_INDEX_UNKNOWN: nvmlGpuP2PCapsIndex_enum = nvmlGpuP2PCapsIndex_enum(5);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlGpuP2PCapsIndex_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlGpuP2PCapsIndex_enum as nvmlGpuP2PCapsIndex_t;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlBridgeChipInfo_st {
- pub type_: nvmlBridgeChipType_t,
- pub fwVersion: ::std::os::raw::c_uint,
-}
-pub type nvmlBridgeChipInfo_t = nvmlBridgeChipInfo_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlBridgeChipHierarchy_st {
- pub bridgeCount: ::std::os::raw::c_uchar,
- pub bridgeChipInfo: [nvmlBridgeChipInfo_t; 128usize],
-}
-pub type nvmlBridgeChipHierarchy_t = nvmlBridgeChipHierarchy_st;
-impl nvmlSamplingType_enum {
- pub const NVML_TOTAL_POWER_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(0);
-}
-impl nvmlSamplingType_enum {
- pub const NVML_GPU_UTILIZATION_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(1);
-}
-impl nvmlSamplingType_enum {
- pub const NVML_MEMORY_UTILIZATION_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(2);
-}
-impl nvmlSamplingType_enum {
- pub const NVML_ENC_UTILIZATION_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(3);
-}
-impl nvmlSamplingType_enum {
- pub const NVML_DEC_UTILIZATION_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(4);
-}
-impl nvmlSamplingType_enum {
- pub const NVML_PROCESSOR_CLK_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(5);
-}
-impl nvmlSamplingType_enum {
- pub const NVML_MEMORY_CLK_SAMPLES: nvmlSamplingType_enum = nvmlSamplingType_enum(6);
-}
-impl nvmlSamplingType_enum {
- pub const NVML_SAMPLINGTYPE_COUNT: nvmlSamplingType_enum = nvmlSamplingType_enum(7);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlSamplingType_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlSamplingType_enum as nvmlSamplingType_t;
-impl nvmlPcieUtilCounter_enum {
- pub const NVML_PCIE_UTIL_TX_BYTES: nvmlPcieUtilCounter_enum = nvmlPcieUtilCounter_enum(0);
-}
-impl nvmlPcieUtilCounter_enum {
- pub const NVML_PCIE_UTIL_RX_BYTES: nvmlPcieUtilCounter_enum = nvmlPcieUtilCounter_enum(1);
-}
-impl nvmlPcieUtilCounter_enum {
- pub const NVML_PCIE_UTIL_COUNT: nvmlPcieUtilCounter_enum = nvmlPcieUtilCounter_enum(2);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlPcieUtilCounter_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlPcieUtilCounter_enum as nvmlPcieUtilCounter_t;
-impl nvmlValueType_enum {
- pub const NVML_VALUE_TYPE_DOUBLE: nvmlValueType_enum = nvmlValueType_enum(0);
-}
-impl nvmlValueType_enum {
- pub const NVML_VALUE_TYPE_UNSIGNED_INT: nvmlValueType_enum = nvmlValueType_enum(1);
-}
-impl nvmlValueType_enum {
- pub const NVML_VALUE_TYPE_UNSIGNED_LONG: nvmlValueType_enum = nvmlValueType_enum(2);
-}
-impl nvmlValueType_enum {
- pub const NVML_VALUE_TYPE_UNSIGNED_LONG_LONG: nvmlValueType_enum = nvmlValueType_enum(3);
-}
-impl nvmlValueType_enum {
- pub const NVML_VALUE_TYPE_SIGNED_LONG_LONG: nvmlValueType_enum = nvmlValueType_enum(4);
-}
-impl nvmlValueType_enum {
- pub const NVML_VALUE_TYPE_COUNT: nvmlValueType_enum = nvmlValueType_enum(5);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlValueType_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlValueType_enum as nvmlValueType_t;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub union nvmlValue_st {
- pub dVal: f64,
- pub uiVal: ::std::os::raw::c_uint,
- pub ulVal: ::std::os::raw::c_ulong,
- pub ullVal: ::std::os::raw::c_ulonglong,
- pub sllVal: ::std::os::raw::c_longlong,
- _bindgen_union_align: u64,
-}
-pub type nvmlValue_t = nvmlValue_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlSample_st {
- pub timeStamp: ::std::os::raw::c_ulonglong,
- pub sampleValue: nvmlValue_t,
-}
-pub type nvmlSample_t = nvmlSample_st;
-impl nvmlPerfPolicyType_enum {
- pub const NVML_PERF_POLICY_POWER: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(0);
-}
-impl nvmlPerfPolicyType_enum {
- pub const NVML_PERF_POLICY_THERMAL: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(1);
-}
-impl nvmlPerfPolicyType_enum {
- pub const NVML_PERF_POLICY_SYNC_BOOST: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(2);
-}
-impl nvmlPerfPolicyType_enum {
- pub const NVML_PERF_POLICY_BOARD_LIMIT: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(3);
-}
-impl nvmlPerfPolicyType_enum {
- pub const NVML_PERF_POLICY_LOW_UTILIZATION: nvmlPerfPolicyType_enum =
- nvmlPerfPolicyType_enum(4);
-}
-impl nvmlPerfPolicyType_enum {
- pub const NVML_PERF_POLICY_RELIABILITY: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(5);
-}
-impl nvmlPerfPolicyType_enum {
- pub const NVML_PERF_POLICY_TOTAL_APP_CLOCKS: nvmlPerfPolicyType_enum =
- nvmlPerfPolicyType_enum(10);
-}
-impl nvmlPerfPolicyType_enum {
- pub const NVML_PERF_POLICY_TOTAL_BASE_CLOCKS: nvmlPerfPolicyType_enum =
- nvmlPerfPolicyType_enum(11);
-}
-impl nvmlPerfPolicyType_enum {
- pub const NVML_PERF_POLICY_COUNT: nvmlPerfPolicyType_enum = nvmlPerfPolicyType_enum(12);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlPerfPolicyType_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlPerfPolicyType_enum as nvmlPerfPolicyType_t;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlViolationTime_st {
- pub referenceTime: ::std::os::raw::c_ulonglong,
- pub violationTime: ::std::os::raw::c_ulonglong,
-}
-pub type nvmlViolationTime_t = nvmlViolationTime_st;
-impl nvmlEnableState_enum {
- pub const NVML_FEATURE_DISABLED: nvmlEnableState_enum = nvmlEnableState_enum(0);
-}
-impl nvmlEnableState_enum {
- pub const NVML_FEATURE_ENABLED: nvmlEnableState_enum = nvmlEnableState_enum(1);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlEnableState_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlEnableState_enum as nvmlEnableState_t;
-impl nvmlBrandType_enum {
- pub const NVML_BRAND_UNKNOWN: nvmlBrandType_enum = nvmlBrandType_enum(0);
-}
-impl nvmlBrandType_enum {
- pub const NVML_BRAND_QUADRO: nvmlBrandType_enum = nvmlBrandType_enum(1);
-}
-impl nvmlBrandType_enum {
- pub const NVML_BRAND_TESLA: nvmlBrandType_enum = nvmlBrandType_enum(2);
-}
-impl nvmlBrandType_enum {
- pub const NVML_BRAND_NVS: nvmlBrandType_enum = nvmlBrandType_enum(3);
-}
-impl nvmlBrandType_enum {
- pub const NVML_BRAND_GRID: nvmlBrandType_enum = nvmlBrandType_enum(4);
-}
-impl nvmlBrandType_enum {
- pub const NVML_BRAND_GEFORCE: nvmlBrandType_enum = nvmlBrandType_enum(5);
-}
-impl nvmlBrandType_enum {
- pub const NVML_BRAND_TITAN: nvmlBrandType_enum = nvmlBrandType_enum(6);
-}
-impl nvmlBrandType_enum {
- pub const NVML_BRAND_COUNT: nvmlBrandType_enum = nvmlBrandType_enum(7);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlBrandType_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlBrandType_enum as nvmlBrandType_t;
-impl nvmlTemperatureThresholds_enum {
- pub const NVML_TEMPERATURE_THRESHOLD_SHUTDOWN: nvmlTemperatureThresholds_enum =
- nvmlTemperatureThresholds_enum(0);
-}
-impl nvmlTemperatureThresholds_enum {
- pub const NVML_TEMPERATURE_THRESHOLD_SLOWDOWN: nvmlTemperatureThresholds_enum =
- nvmlTemperatureThresholds_enum(1);
-}
-impl nvmlTemperatureThresholds_enum {
- pub const NVML_TEMPERATURE_THRESHOLD_MEM_MAX: nvmlTemperatureThresholds_enum =
- nvmlTemperatureThresholds_enum(2);
-}
-impl nvmlTemperatureThresholds_enum {
- pub const NVML_TEMPERATURE_THRESHOLD_GPU_MAX: nvmlTemperatureThresholds_enum =
- nvmlTemperatureThresholds_enum(3);
-}
-impl nvmlTemperatureThresholds_enum {
- pub const NVML_TEMPERATURE_THRESHOLD_COUNT: nvmlTemperatureThresholds_enum =
- nvmlTemperatureThresholds_enum(4);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlTemperatureThresholds_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlTemperatureThresholds_enum as nvmlTemperatureThresholds_t;
-impl nvmlTemperatureSensors_enum {
- pub const NVML_TEMPERATURE_GPU: nvmlTemperatureSensors_enum = nvmlTemperatureSensors_enum(0);
-}
-impl nvmlTemperatureSensors_enum {
- pub const NVML_TEMPERATURE_COUNT: nvmlTemperatureSensors_enum = nvmlTemperatureSensors_enum(1);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlTemperatureSensors_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlTemperatureSensors_enum as nvmlTemperatureSensors_t;
-impl nvmlComputeMode_enum {
- pub const NVML_COMPUTEMODE_DEFAULT: nvmlComputeMode_enum = nvmlComputeMode_enum(0);
-}
-impl nvmlComputeMode_enum {
- pub const NVML_COMPUTEMODE_EXCLUSIVE_THREAD: nvmlComputeMode_enum = nvmlComputeMode_enum(1);
-}
-impl nvmlComputeMode_enum {
- pub const NVML_COMPUTEMODE_PROHIBITED: nvmlComputeMode_enum = nvmlComputeMode_enum(2);
-}
-impl nvmlComputeMode_enum {
- pub const NVML_COMPUTEMODE_EXCLUSIVE_PROCESS: nvmlComputeMode_enum = nvmlComputeMode_enum(3);
-}
-impl nvmlComputeMode_enum {
- pub const NVML_COMPUTEMODE_COUNT: nvmlComputeMode_enum = nvmlComputeMode_enum(4);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlComputeMode_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlComputeMode_enum as nvmlComputeMode_t;
-impl nvmlMemoryErrorType_enum {
- pub const NVML_MEMORY_ERROR_TYPE_CORRECTED: nvmlMemoryErrorType_enum =
- nvmlMemoryErrorType_enum(0);
-}
-impl nvmlMemoryErrorType_enum {
- pub const NVML_MEMORY_ERROR_TYPE_UNCORRECTED: nvmlMemoryErrorType_enum =
- nvmlMemoryErrorType_enum(1);
-}
-impl nvmlMemoryErrorType_enum {
- pub const NVML_MEMORY_ERROR_TYPE_COUNT: nvmlMemoryErrorType_enum = nvmlMemoryErrorType_enum(2);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlMemoryErrorType_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlMemoryErrorType_enum as nvmlMemoryErrorType_t;
-impl nvmlEccCounterType_enum {
- pub const NVML_VOLATILE_ECC: nvmlEccCounterType_enum = nvmlEccCounterType_enum(0);
-}
-impl nvmlEccCounterType_enum {
- pub const NVML_AGGREGATE_ECC: nvmlEccCounterType_enum = nvmlEccCounterType_enum(1);
-}
-impl nvmlEccCounterType_enum {
- pub const NVML_ECC_COUNTER_TYPE_COUNT: nvmlEccCounterType_enum = nvmlEccCounterType_enum(2);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlEccCounterType_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlEccCounterType_enum as nvmlEccCounterType_t;
-impl nvmlClockType_enum {
- pub const NVML_CLOCK_GRAPHICS: nvmlClockType_enum = nvmlClockType_enum(0);
-}
-impl nvmlClockType_enum {
- pub const NVML_CLOCK_SM: nvmlClockType_enum = nvmlClockType_enum(1);
-}
-impl nvmlClockType_enum {
- pub const NVML_CLOCK_MEM: nvmlClockType_enum = nvmlClockType_enum(2);
-}
-impl nvmlClockType_enum {
- pub const NVML_CLOCK_VIDEO: nvmlClockType_enum = nvmlClockType_enum(3);
-}
-impl nvmlClockType_enum {
- pub const NVML_CLOCK_COUNT: nvmlClockType_enum = nvmlClockType_enum(4);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlClockType_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlClockType_enum as nvmlClockType_t;
-impl nvmlClockId_enum {
- pub const NVML_CLOCK_ID_CURRENT: nvmlClockId_enum = nvmlClockId_enum(0);
-}
-impl nvmlClockId_enum {
- pub const NVML_CLOCK_ID_APP_CLOCK_TARGET: nvmlClockId_enum = nvmlClockId_enum(1);
-}
-impl nvmlClockId_enum {
- pub const NVML_CLOCK_ID_APP_CLOCK_DEFAULT: nvmlClockId_enum = nvmlClockId_enum(2);
-}
-impl nvmlClockId_enum {
- pub const NVML_CLOCK_ID_CUSTOMER_BOOST_MAX: nvmlClockId_enum = nvmlClockId_enum(3);
-}
-impl nvmlClockId_enum {
- pub const NVML_CLOCK_ID_COUNT: nvmlClockId_enum = nvmlClockId_enum(4);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlClockId_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlClockId_enum as nvmlClockId_t;
-impl nvmlDriverModel_enum {
- pub const NVML_DRIVER_WDDM: nvmlDriverModel_enum = nvmlDriverModel_enum(0);
-}
-impl nvmlDriverModel_enum {
- pub const NVML_DRIVER_WDM: nvmlDriverModel_enum = nvmlDriverModel_enum(1);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlDriverModel_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlDriverModel_enum as nvmlDriverModel_t;
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_0: nvmlPStates_enum = nvmlPStates_enum(0);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_1: nvmlPStates_enum = nvmlPStates_enum(1);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_2: nvmlPStates_enum = nvmlPStates_enum(2);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_3: nvmlPStates_enum = nvmlPStates_enum(3);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_4: nvmlPStates_enum = nvmlPStates_enum(4);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_5: nvmlPStates_enum = nvmlPStates_enum(5);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_6: nvmlPStates_enum = nvmlPStates_enum(6);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_7: nvmlPStates_enum = nvmlPStates_enum(7);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_8: nvmlPStates_enum = nvmlPStates_enum(8);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_9: nvmlPStates_enum = nvmlPStates_enum(9);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_10: nvmlPStates_enum = nvmlPStates_enum(10);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_11: nvmlPStates_enum = nvmlPStates_enum(11);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_12: nvmlPStates_enum = nvmlPStates_enum(12);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_13: nvmlPStates_enum = nvmlPStates_enum(13);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_14: nvmlPStates_enum = nvmlPStates_enum(14);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_15: nvmlPStates_enum = nvmlPStates_enum(15);
-}
-impl nvmlPStates_enum {
- pub const NVML_PSTATE_UNKNOWN: nvmlPStates_enum = nvmlPStates_enum(32);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlPStates_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlPStates_enum as nvmlPstates_t;
-impl nvmlGom_enum {
- pub const NVML_GOM_ALL_ON: nvmlGom_enum = nvmlGom_enum(0);
-}
-impl nvmlGom_enum {
- pub const NVML_GOM_COMPUTE: nvmlGom_enum = nvmlGom_enum(1);
-}
-impl nvmlGom_enum {
- pub const NVML_GOM_LOW_DP: nvmlGom_enum = nvmlGom_enum(2);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlGom_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlGom_enum as nvmlGpuOperationMode_t;
-impl nvmlInforomObject_enum {
- pub const NVML_INFOROM_OEM: nvmlInforomObject_enum = nvmlInforomObject_enum(0);
-}
-impl nvmlInforomObject_enum {
- pub const NVML_INFOROM_ECC: nvmlInforomObject_enum = nvmlInforomObject_enum(1);
-}
-impl nvmlInforomObject_enum {
- pub const NVML_INFOROM_POWER: nvmlInforomObject_enum = nvmlInforomObject_enum(2);
-}
-impl nvmlInforomObject_enum {
- pub const NVML_INFOROM_COUNT: nvmlInforomObject_enum = nvmlInforomObject_enum(3);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlInforomObject_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlInforomObject_enum as nvmlInforomObject_t;
-impl nvmlReturn_enum {
- pub const NVML_SUCCESS: nvmlReturn_enum = nvmlReturn_enum(0);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_UNINITIALIZED: nvmlReturn_enum = nvmlReturn_enum(1);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_INVALID_ARGUMENT: nvmlReturn_enum = nvmlReturn_enum(2);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_NOT_SUPPORTED: nvmlReturn_enum = nvmlReturn_enum(3);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_NO_PERMISSION: nvmlReturn_enum = nvmlReturn_enum(4);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_ALREADY_INITIALIZED: nvmlReturn_enum = nvmlReturn_enum(5);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_NOT_FOUND: nvmlReturn_enum = nvmlReturn_enum(6);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_INSUFFICIENT_SIZE: nvmlReturn_enum = nvmlReturn_enum(7);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_INSUFFICIENT_POWER: nvmlReturn_enum = nvmlReturn_enum(8);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_DRIVER_NOT_LOADED: nvmlReturn_enum = nvmlReturn_enum(9);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_TIMEOUT: nvmlReturn_enum = nvmlReturn_enum(10);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_IRQ_ISSUE: nvmlReturn_enum = nvmlReturn_enum(11);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_LIBRARY_NOT_FOUND: nvmlReturn_enum = nvmlReturn_enum(12);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_FUNCTION_NOT_FOUND: nvmlReturn_enum = nvmlReturn_enum(13);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_CORRUPTED_INFOROM: nvmlReturn_enum = nvmlReturn_enum(14);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_GPU_IS_LOST: nvmlReturn_enum = nvmlReturn_enum(15);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_RESET_REQUIRED: nvmlReturn_enum = nvmlReturn_enum(16);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_OPERATING_SYSTEM: nvmlReturn_enum = nvmlReturn_enum(17);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_LIB_RM_VERSION_MISMATCH: nvmlReturn_enum = nvmlReturn_enum(18);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_IN_USE: nvmlReturn_enum = nvmlReturn_enum(19);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_MEMORY: nvmlReturn_enum = nvmlReturn_enum(20);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_NO_DATA: nvmlReturn_enum = nvmlReturn_enum(21);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_VGPU_ECC_NOT_SUPPORTED: nvmlReturn_enum = nvmlReturn_enum(22);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_INSUFFICIENT_RESOURCES: nvmlReturn_enum = nvmlReturn_enum(23);
-}
-impl nvmlReturn_enum {
- pub const NVML_ERROR_UNKNOWN: nvmlReturn_enum = nvmlReturn_enum(999);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlReturn_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlReturn_enum as nvmlReturn_t;
-impl nvmlMemoryLocation_enum {
- pub const NVML_MEMORY_LOCATION_L1_CACHE: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(0);
-}
-impl nvmlMemoryLocation_enum {
- pub const NVML_MEMORY_LOCATION_L2_CACHE: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(1);
-}
-impl nvmlMemoryLocation_enum {
- pub const NVML_MEMORY_LOCATION_DRAM: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(2);
-}
-impl nvmlMemoryLocation_enum {
- pub const NVML_MEMORY_LOCATION_DEVICE_MEMORY: nvmlMemoryLocation_enum =
- nvmlMemoryLocation_enum(2);
-}
-impl nvmlMemoryLocation_enum {
- pub const NVML_MEMORY_LOCATION_REGISTER_FILE: nvmlMemoryLocation_enum =
- nvmlMemoryLocation_enum(3);
-}
-impl nvmlMemoryLocation_enum {
- pub const NVML_MEMORY_LOCATION_TEXTURE_MEMORY: nvmlMemoryLocation_enum =
- nvmlMemoryLocation_enum(4);
-}
-impl nvmlMemoryLocation_enum {
- pub const NVML_MEMORY_LOCATION_TEXTURE_SHM: nvmlMemoryLocation_enum =
- nvmlMemoryLocation_enum(5);
-}
-impl nvmlMemoryLocation_enum {
- pub const NVML_MEMORY_LOCATION_CBU: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(6);
-}
-impl nvmlMemoryLocation_enum {
- pub const NVML_MEMORY_LOCATION_SRAM: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(7);
-}
-impl nvmlMemoryLocation_enum {
- pub const NVML_MEMORY_LOCATION_COUNT: nvmlMemoryLocation_enum = nvmlMemoryLocation_enum(8);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlMemoryLocation_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlMemoryLocation_enum as nvmlMemoryLocation_t;
-impl nvmlPageRetirementCause_enum {
- pub const NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS:
- nvmlPageRetirementCause_enum = nvmlPageRetirementCause_enum(0);
-}
-impl nvmlPageRetirementCause_enum {
- pub const NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR: nvmlPageRetirementCause_enum =
- nvmlPageRetirementCause_enum(1);
-}
-impl nvmlPageRetirementCause_enum {
- pub const NVML_PAGE_RETIREMENT_CAUSE_COUNT: nvmlPageRetirementCause_enum =
- nvmlPageRetirementCause_enum(2);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlPageRetirementCause_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlPageRetirementCause_enum as nvmlPageRetirementCause_t;
-impl nvmlRestrictedAPI_enum {
- pub const NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS: nvmlRestrictedAPI_enum =
- nvmlRestrictedAPI_enum(0);
-}
-impl nvmlRestrictedAPI_enum {
- pub const NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS: nvmlRestrictedAPI_enum =
- nvmlRestrictedAPI_enum(1);
-}
-impl nvmlRestrictedAPI_enum {
- pub const NVML_RESTRICTED_API_COUNT: nvmlRestrictedAPI_enum = nvmlRestrictedAPI_enum(2);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlRestrictedAPI_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlRestrictedAPI_enum as nvmlRestrictedAPI_t;
-impl nvmlGpuVirtualizationMode {
- pub const NVML_GPU_VIRTUALIZATION_MODE_NONE: nvmlGpuVirtualizationMode =
- nvmlGpuVirtualizationMode(0);
-}
-impl nvmlGpuVirtualizationMode {
- pub const NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH: nvmlGpuVirtualizationMode =
- nvmlGpuVirtualizationMode(1);
-}
-impl nvmlGpuVirtualizationMode {
- pub const NVML_GPU_VIRTUALIZATION_MODE_VGPU: nvmlGpuVirtualizationMode =
- nvmlGpuVirtualizationMode(2);
-}
-impl nvmlGpuVirtualizationMode {
- pub const NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU: nvmlGpuVirtualizationMode =
- nvmlGpuVirtualizationMode(3);
-}
-impl nvmlGpuVirtualizationMode {
- pub const NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA: nvmlGpuVirtualizationMode =
- nvmlGpuVirtualizationMode(4);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlGpuVirtualizationMode(pub ::std::os::raw::c_uint);
-pub use self::nvmlGpuVirtualizationMode as nvmlGpuVirtualizationMode_t;
-impl nvmlHostVgpuMode_enum {
- pub const NVML_HOST_VGPU_MODE_NON_SRIOV: nvmlHostVgpuMode_enum = nvmlHostVgpuMode_enum(0);
-}
-impl nvmlHostVgpuMode_enum {
- pub const NVML_HOST_VGPU_MODE_SRIOV: nvmlHostVgpuMode_enum = nvmlHostVgpuMode_enum(1);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlHostVgpuMode_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlHostVgpuMode_enum as nvmlHostVgpuMode_t;
-impl nvmlVgpuVmIdType {
- pub const NVML_VGPU_VM_ID_DOMAIN_ID: nvmlVgpuVmIdType = nvmlVgpuVmIdType(0);
-}
-impl nvmlVgpuVmIdType {
- pub const NVML_VGPU_VM_ID_UUID: nvmlVgpuVmIdType = nvmlVgpuVmIdType(1);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlVgpuVmIdType(pub ::std::os::raw::c_uint);
-pub use self::nvmlVgpuVmIdType as nvmlVgpuVmIdType_t;
-impl nvmlVgpuGuestInfoState_enum {
- pub const NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED: nvmlVgpuGuestInfoState_enum =
- nvmlVgpuGuestInfoState_enum(0);
-}
-impl nvmlVgpuGuestInfoState_enum {
- pub const NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED: nvmlVgpuGuestInfoState_enum =
- nvmlVgpuGuestInfoState_enum(1);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlVgpuGuestInfoState_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlVgpuGuestInfoState_enum as nvmlVgpuGuestInfoState_t;
-impl nvmlGridLicenseFeatureCode_t {
- pub const NVML_GRID_LICENSE_FEATURE_CODE_VGPU: nvmlGridLicenseFeatureCode_t =
- nvmlGridLicenseFeatureCode_t(1);
-}
-impl nvmlGridLicenseFeatureCode_t {
- pub const NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION: nvmlGridLicenseFeatureCode_t =
- nvmlGridLicenseFeatureCode_t(2);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlGridLicenseFeatureCode_t(pub ::std::os::raw::c_uint);
-pub type nvmlVgpuTypeId_t = ::std::os::raw::c_uint;
-pub type nvmlVgpuInstance_t = ::std::os::raw::c_uint;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlVgpuInstanceUtilizationSample_st {
- pub vgpuInstance: nvmlVgpuInstance_t,
- pub timeStamp: ::std::os::raw::c_ulonglong,
- pub smUtil: nvmlValue_t,
- pub memUtil: nvmlValue_t,
- pub encUtil: nvmlValue_t,
- pub decUtil: nvmlValue_t,
-}
-pub type nvmlVgpuInstanceUtilizationSample_t = nvmlVgpuInstanceUtilizationSample_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlVgpuProcessUtilizationSample_st {
- pub vgpuInstance: nvmlVgpuInstance_t,
- pub pid: ::std::os::raw::c_uint,
- pub processName: [::std::os::raw::c_char; 64usize],
- pub timeStamp: ::std::os::raw::c_ulonglong,
- pub smUtil: ::std::os::raw::c_uint,
- pub memUtil: ::std::os::raw::c_uint,
- pub encUtil: ::std::os::raw::c_uint,
- pub decUtil: ::std::os::raw::c_uint,
-}
-pub type nvmlVgpuProcessUtilizationSample_t = nvmlVgpuProcessUtilizationSample_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlProcessUtilizationSample_st {
- pub pid: ::std::os::raw::c_uint,
- pub timeStamp: ::std::os::raw::c_ulonglong,
- pub smUtil: ::std::os::raw::c_uint,
- pub memUtil: ::std::os::raw::c_uint,
- pub encUtil: ::std::os::raw::c_uint,
- pub decUtil: ::std::os::raw::c_uint,
-}
-pub type nvmlProcessUtilizationSample_t = nvmlProcessUtilizationSample_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlGridLicensableFeature_st {
- pub featureCode: nvmlGridLicenseFeatureCode_t,
- pub featureState: ::std::os::raw::c_uint,
- pub licenseInfo: [::std::os::raw::c_char; 128usize],
- pub productName: [::std::os::raw::c_char; 128usize],
- pub featureEnabled: ::std::os::raw::c_uint,
-}
-pub type nvmlGridLicensableFeature_t = nvmlGridLicensableFeature_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlGridLicensableFeatures_st {
- pub isGridLicenseSupported: ::std::os::raw::c_int,
- pub licensableFeaturesCount: ::std::os::raw::c_uint,
- pub gridLicensableFeatures: [nvmlGridLicensableFeature_t; 3usize],
-}
-pub type nvmlGridLicensableFeatures_t = nvmlGridLicensableFeatures_st;
-pub type nvmlDeviceArchitecture_t = ::std::os::raw::c_uint;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlFieldValue_st {
- pub fieldId: ::std::os::raw::c_uint,
- pub scopeId: ::std::os::raw::c_uint,
- pub timestamp: ::std::os::raw::c_longlong,
- pub latencyUsec: ::std::os::raw::c_longlong,
- pub valueType: nvmlValueType_t,
- pub nvmlReturn: nvmlReturn_t,
- pub value: nvmlValue_t,
-}
-pub type nvmlFieldValue_t = nvmlFieldValue_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlUnit_st {
- _unused: [u8; 0],
-}
-pub type nvmlUnit_t = *mut nvmlUnit_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlHwbcEntry_st {
- pub hwbcId: ::std::os::raw::c_uint,
- pub firmwareVersion: [::std::os::raw::c_char; 32usize],
-}
-pub type nvmlHwbcEntry_t = nvmlHwbcEntry_st;
-impl nvmlFanState_enum {
- pub const NVML_FAN_NORMAL: nvmlFanState_enum = nvmlFanState_enum(0);
-}
-impl nvmlFanState_enum {
- pub const NVML_FAN_FAILED: nvmlFanState_enum = nvmlFanState_enum(1);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlFanState_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlFanState_enum as nvmlFanState_t;
-impl nvmlLedColor_enum {
- pub const NVML_LED_COLOR_GREEN: nvmlLedColor_enum = nvmlLedColor_enum(0);
-}
-impl nvmlLedColor_enum {
- pub const NVML_LED_COLOR_AMBER: nvmlLedColor_enum = nvmlLedColor_enum(1);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlLedColor_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlLedColor_enum as nvmlLedColor_t;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlLedState_st {
- pub cause: [::std::os::raw::c_char; 256usize],
- pub color: nvmlLedColor_t,
-}
-pub type nvmlLedState_t = nvmlLedState_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlUnitInfo_st {
- pub name: [::std::os::raw::c_char; 96usize],
- pub id: [::std::os::raw::c_char; 96usize],
- pub serial: [::std::os::raw::c_char; 96usize],
- pub firmwareVersion: [::std::os::raw::c_char; 96usize],
-}
-pub type nvmlUnitInfo_t = nvmlUnitInfo_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlPSUInfo_st {
- pub state: [::std::os::raw::c_char; 256usize],
- pub current: ::std::os::raw::c_uint,
- pub voltage: ::std::os::raw::c_uint,
- pub power: ::std::os::raw::c_uint,
-}
-pub type nvmlPSUInfo_t = nvmlPSUInfo_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlUnitFanInfo_st {
- pub speed: ::std::os::raw::c_uint,
- pub state: nvmlFanState_t,
-}
-pub type nvmlUnitFanInfo_t = nvmlUnitFanInfo_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlUnitFanSpeeds_st {
- pub fans: [nvmlUnitFanInfo_t; 24usize],
- pub count: ::std::os::raw::c_uint,
-}
-pub type nvmlUnitFanSpeeds_t = nvmlUnitFanSpeeds_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlEventSet_st {
- _unused: [u8; 0],
-}
-pub type nvmlEventSet_t = *mut nvmlEventSet_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlEventData_st {
- pub device: nvmlDevice_t,
- pub eventType: ::std::os::raw::c_ulonglong,
- pub eventData: ::std::os::raw::c_ulonglong,
- pub gpuInstanceId: ::std::os::raw::c_uint,
- pub computeInstanceId: ::std::os::raw::c_uint,
-}
-pub type nvmlEventData_t = nvmlEventData_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlAccountingStats_st {
- pub gpuUtilization: ::std::os::raw::c_uint,
- pub memoryUtilization: ::std::os::raw::c_uint,
- pub maxMemoryUsage: ::std::os::raw::c_ulonglong,
- pub time: ::std::os::raw::c_ulonglong,
- pub startTime: ::std::os::raw::c_ulonglong,
- pub isRunning: ::std::os::raw::c_uint,
- pub reserved: [::std::os::raw::c_uint; 5usize],
-}
-pub type nvmlAccountingStats_t = nvmlAccountingStats_st;
-impl nvmlEncoderQueryType_enum {
- pub const NVML_ENCODER_QUERY_H264: nvmlEncoderQueryType_enum = nvmlEncoderQueryType_enum(0);
-}
-impl nvmlEncoderQueryType_enum {
- pub const NVML_ENCODER_QUERY_HEVC: nvmlEncoderQueryType_enum = nvmlEncoderQueryType_enum(1);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlEncoderQueryType_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlEncoderQueryType_enum as nvmlEncoderType_t;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlEncoderSessionInfo_st {
- pub sessionId: ::std::os::raw::c_uint,
- pub pid: ::std::os::raw::c_uint,
- pub vgpuInstance: nvmlVgpuInstance_t,
- pub codecType: nvmlEncoderType_t,
- pub hResolution: ::std::os::raw::c_uint,
- pub vResolution: ::std::os::raw::c_uint,
- pub averageFps: ::std::os::raw::c_uint,
- pub averageLatency: ::std::os::raw::c_uint,
-}
-pub type nvmlEncoderSessionInfo_t = nvmlEncoderSessionInfo_st;
-impl nvmlFBCSessionType_enum {
- pub const NVML_FBC_SESSION_TYPE_UNKNOWN: nvmlFBCSessionType_enum = nvmlFBCSessionType_enum(0);
-}
-impl nvmlFBCSessionType_enum {
- pub const NVML_FBC_SESSION_TYPE_TOSYS: nvmlFBCSessionType_enum = nvmlFBCSessionType_enum(1);
-}
-impl nvmlFBCSessionType_enum {
- pub const NVML_FBC_SESSION_TYPE_CUDA: nvmlFBCSessionType_enum = nvmlFBCSessionType_enum(2);
-}
-impl nvmlFBCSessionType_enum {
- pub const NVML_FBC_SESSION_TYPE_VID: nvmlFBCSessionType_enum = nvmlFBCSessionType_enum(3);
-}
-impl nvmlFBCSessionType_enum {
- pub const NVML_FBC_SESSION_TYPE_HWENC: nvmlFBCSessionType_enum = nvmlFBCSessionType_enum(4);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlFBCSessionType_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlFBCSessionType_enum as nvmlFBCSessionType_t;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlFBCStats_st {
- pub sessionsCount: ::std::os::raw::c_uint,
- pub averageFPS: ::std::os::raw::c_uint,
- pub averageLatency: ::std::os::raw::c_uint,
-}
-pub type nvmlFBCStats_t = nvmlFBCStats_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlFBCSessionInfo_st {
- pub sessionId: ::std::os::raw::c_uint,
- pub pid: ::std::os::raw::c_uint,
- pub vgpuInstance: nvmlVgpuInstance_t,
- pub displayOrdinal: ::std::os::raw::c_uint,
- pub sessionType: nvmlFBCSessionType_t,
- pub sessionFlags: ::std::os::raw::c_uint,
- pub hMaxResolution: ::std::os::raw::c_uint,
- pub vMaxResolution: ::std::os::raw::c_uint,
- pub hResolution: ::std::os::raw::c_uint,
- pub vResolution: ::std::os::raw::c_uint,
- pub averageFPS: ::std::os::raw::c_uint,
- pub averageLatency: ::std::os::raw::c_uint,
-}
-pub type nvmlFBCSessionInfo_t = nvmlFBCSessionInfo_st;
-impl nvmlDetachGpuState_enum {
- pub const NVML_DETACH_GPU_KEEP: nvmlDetachGpuState_enum = nvmlDetachGpuState_enum(0);
-}
-impl nvmlDetachGpuState_enum {
- pub const NVML_DETACH_GPU_REMOVE: nvmlDetachGpuState_enum = nvmlDetachGpuState_enum(1);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlDetachGpuState_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlDetachGpuState_enum as nvmlDetachGpuState_t;
-impl nvmlPcieLinkState_enum {
- pub const NVML_PCIE_LINK_KEEP: nvmlPcieLinkState_enum = nvmlPcieLinkState_enum(0);
-}
-impl nvmlPcieLinkState_enum {
- pub const NVML_PCIE_LINK_SHUT_DOWN: nvmlPcieLinkState_enum = nvmlPcieLinkState_enum(1);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlPcieLinkState_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlPcieLinkState_enum as nvmlPcieLinkState_t;
-
-#[no_mangle]
-pub extern "C" fn nvmlInit_v2() -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlInit() -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlInitWithFlags(flags: ::std::os::raw::c_uint) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlShutdown() -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlErrorString(result: nvmlReturn_t) -> *const ::std::os::raw::c_char {
- c"".as_ptr()
-}
-
-#[no_mangle]
-pub unsafe extern "C" fn nvmlSystemGetDriverVersion(
- version: *mut ::std::os::raw::c_char,
- length: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlSystemGetNVMLVersion(
- version: *mut ::std::os::raw::c_char,
- length: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlSystemGetCudaDriverVersion(
- cudaDriverVersion: *mut ::std::os::raw::c_int,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlSystemGetCudaDriverVersion_v2(
- cudaDriverVersion: *mut ::std::os::raw::c_int,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlSystemGetProcessName(
- pid: ::std::os::raw::c_uint,
- name: *mut ::std::os::raw::c_char,
- length: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlUnitGetCount(unitCount: *mut ::std::os::raw::c_uint) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlUnitGetHandleByIndex(
- index: ::std::os::raw::c_uint,
- unit: *mut nvmlUnit_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlUnitGetUnitInfo(unit: nvmlUnit_t, info: *mut nvmlUnitInfo_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlUnitGetLedState(
- unit: nvmlUnit_t,
- state: *mut nvmlLedState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlUnitGetPsuInfo(unit: nvmlUnit_t, psu: *mut nvmlPSUInfo_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlUnitGetTemperature(
- unit: nvmlUnit_t,
- type_: ::std::os::raw::c_uint,
- temp: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlUnitGetFanSpeedInfo(
- unit: nvmlUnit_t,
- fanSpeeds: *mut nvmlUnitFanSpeeds_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlUnitGetDevices(
- unit: nvmlUnit_t,
- deviceCount: *mut ::std::os::raw::c_uint,
- devices: *mut nvmlDevice_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlSystemGetHicVersion(
- hwbcCount: *mut ::std::os::raw::c_uint,
- hwbcEntries: *mut nvmlHwbcEntry_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetCount_v2(deviceCount: *mut ::std::os::raw::c_uint) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetAttributes(
- device: nvmlDevice_t,
- attributes: *mut nvmlDeviceAttributes_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetHandleByIndex_v2(
- index: ::std::os::raw::c_uint,
- device: *mut nvmlDevice_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetHandleBySerial(
- serial: *const ::std::os::raw::c_char,
- device: *mut nvmlDevice_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetHandleByUUID(
- uuid: *const ::std::os::raw::c_char,
- device: *mut nvmlDevice_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetHandleByPciBusId_v2(
- pciBusId: *const ::std::os::raw::c_char,
- device: *mut nvmlDevice_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetName(
- device: nvmlDevice_t,
- name: *mut ::std::os::raw::c_char,
- length: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetBrand(
- device: nvmlDevice_t,
- type_: *mut nvmlBrandType_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetIndex(
- device: nvmlDevice_t,
- index: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetSerial(
- device: nvmlDevice_t,
- serial: *mut ::std::os::raw::c_char,
- length: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-pub type nvmlAffinityScope_t = ::std::os::raw::c_uint;
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetMemoryAffinity(
- device: nvmlDevice_t,
- nodeSetSize: ::std::os::raw::c_uint,
- nodeSet: *mut ::std::os::raw::c_ulong,
- scope: nvmlAffinityScope_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetCpuAffinityWithinScope(
- device: nvmlDevice_t,
- cpuSetSize: ::std::os::raw::c_uint,
- cpuSet: *mut ::std::os::raw::c_ulong,
- scope: nvmlAffinityScope_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetCpuAffinity(
- device: nvmlDevice_t,
- cpuSetSize: ::std::os::raw::c_uint,
- cpuSet: *mut ::std::os::raw::c_ulong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetCpuAffinity(device: nvmlDevice_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceClearCpuAffinity(device: nvmlDevice_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetTopologyCommonAncestor(
- device1: nvmlDevice_t,
- device2: nvmlDevice_t,
- pathInfo: *mut nvmlGpuTopologyLevel_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetTopologyNearestGpus(
- device: nvmlDevice_t,
- level: nvmlGpuTopologyLevel_t,
- count: *mut ::std::os::raw::c_uint,
- deviceArray: *mut nvmlDevice_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlSystemGetTopologyGpuSet(
- cpuNumber: ::std::os::raw::c_uint,
- count: *mut ::std::os::raw::c_uint,
- deviceArray: *mut nvmlDevice_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetP2PStatus(
- device1: nvmlDevice_t,
- device2: nvmlDevice_t,
- p2pIndex: nvmlGpuP2PCapsIndex_t,
- p2pStatus: *mut nvmlGpuP2PStatus_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetUUID(
- device: nvmlDevice_t,
- uuid: *mut ::std::os::raw::c_char,
- length: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetMdevUUID(
- vgpuInstance: nvmlVgpuInstance_t,
- mdevUuid: *mut ::std::os::raw::c_char,
- size: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetMinorNumber(
- device: nvmlDevice_t,
- minorNumber: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetBoardPartNumber(
- device: nvmlDevice_t,
- partNumber: *mut ::std::os::raw::c_char,
- length: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetInforomVersion(
- device: nvmlDevice_t,
- object: nvmlInforomObject_t,
- version: *mut ::std::os::raw::c_char,
- length: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetInforomImageVersion(
- device: nvmlDevice_t,
- version: *mut ::std::os::raw::c_char,
- length: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetInforomConfigurationChecksum(
- device: nvmlDevice_t,
- checksum: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceValidateInforom(device: nvmlDevice_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetDisplayMode(
- device: nvmlDevice_t,
- display: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetDisplayActive(
- device: nvmlDevice_t,
- isActive: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetPersistenceMode(
- device: nvmlDevice_t,
- mode: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetPciInfo_v3(
- device: nvmlDevice_t,
- pci: *mut nvmlPciInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetMaxPcieLinkGeneration(
- device: nvmlDevice_t,
- maxLinkGen: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetMaxPcieLinkWidth(
- device: nvmlDevice_t,
- maxLinkWidth: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetCurrPcieLinkGeneration(
- device: nvmlDevice_t,
- currLinkGen: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetCurrPcieLinkWidth(
- device: nvmlDevice_t,
- currLinkWidth: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetPcieThroughput(
- device: nvmlDevice_t,
- counter: nvmlPcieUtilCounter_t,
- value: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetPcieReplayCounter(
- device: nvmlDevice_t,
- value: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetClockInfo(
- device: nvmlDevice_t,
- type_: nvmlClockType_t,
- clock: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetMaxClockInfo(
- device: nvmlDevice_t,
- type_: nvmlClockType_t,
- clock: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetApplicationsClock(
- device: nvmlDevice_t,
- clockType: nvmlClockType_t,
- clockMHz: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetDefaultApplicationsClock(
- device: nvmlDevice_t,
- clockType: nvmlClockType_t,
- clockMHz: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceResetApplicationsClocks(device: nvmlDevice_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetClock(
- device: nvmlDevice_t,
- clockType: nvmlClockType_t,
- clockId: nvmlClockId_t,
- clockMHz: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetMaxCustomerBoostClock(
- device: nvmlDevice_t,
- clockType: nvmlClockType_t,
- clockMHz: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetSupportedMemoryClocks(
- device: nvmlDevice_t,
- count: *mut ::std::os::raw::c_uint,
- clocksMHz: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetSupportedGraphicsClocks(
- device: nvmlDevice_t,
- memoryClockMHz: ::std::os::raw::c_uint,
- count: *mut ::std::os::raw::c_uint,
- clocksMHz: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetAutoBoostedClocksEnabled(
- device: nvmlDevice_t,
- isEnabled: *mut nvmlEnableState_t,
- defaultIsEnabled: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetAutoBoostedClocksEnabled(
- device: nvmlDevice_t,
- enabled: nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetDefaultAutoBoostedClocksEnabled(
- device: nvmlDevice_t,
- enabled: nvmlEnableState_t,
- flags: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetFanSpeed(
- device: nvmlDevice_t,
- speed: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetFanSpeed_v2(
- device: nvmlDevice_t,
- fan: ::std::os::raw::c_uint,
- speed: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetTemperature(
- device: nvmlDevice_t,
- sensorType: nvmlTemperatureSensors_t,
- temp: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetTemperatureThreshold(
- device: nvmlDevice_t,
- thresholdType: nvmlTemperatureThresholds_t,
- temp: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetPerformanceState(
- device: nvmlDevice_t,
- pState: *mut nvmlPstates_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetCurrentClocksThrottleReasons(
- device: nvmlDevice_t,
- clocksThrottleReasons: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetSupportedClocksThrottleReasons(
- device: nvmlDevice_t,
- supportedClocksThrottleReasons: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetPowerState(
- device: nvmlDevice_t,
- pState: *mut nvmlPstates_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetPowerManagementMode(
- device: nvmlDevice_t,
- mode: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetPowerManagementLimit(
- device: nvmlDevice_t,
- limit: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetPowerManagementLimitConstraints(
- device: nvmlDevice_t,
- minLimit: *mut ::std::os::raw::c_uint,
- maxLimit: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetPowerManagementDefaultLimit(
- device: nvmlDevice_t,
- defaultLimit: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetPowerUsage(
- device: nvmlDevice_t,
- power: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetTotalEnergyConsumption(
- device: nvmlDevice_t,
- energy: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetEnforcedPowerLimit(
- device: nvmlDevice_t,
- limit: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetGpuOperationMode(
- device: nvmlDevice_t,
- current: *mut nvmlGpuOperationMode_t,
- pending: *mut nvmlGpuOperationMode_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetMemoryInfo(
- device: nvmlDevice_t,
- memory: *mut nvmlMemory_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetComputeMode(
- device: nvmlDevice_t,
- mode: *mut nvmlComputeMode_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetCudaComputeCapability(
- device: nvmlDevice_t,
- major: *mut ::std::os::raw::c_int,
- minor: *mut ::std::os::raw::c_int,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetEccMode(
- device: nvmlDevice_t,
- current: *mut nvmlEnableState_t,
- pending: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetBoardId(
- device: nvmlDevice_t,
- boardId: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetMultiGpuBoard(
- device: nvmlDevice_t,
- multiGpuBool: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetTotalEccErrors(
- device: nvmlDevice_t,
- errorType: nvmlMemoryErrorType_t,
- counterType: nvmlEccCounterType_t,
- eccCounts: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetDetailedEccErrors(
- device: nvmlDevice_t,
- errorType: nvmlMemoryErrorType_t,
- counterType: nvmlEccCounterType_t,
- eccCounts: *mut nvmlEccErrorCounts_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetMemoryErrorCounter(
- device: nvmlDevice_t,
- errorType: nvmlMemoryErrorType_t,
- counterType: nvmlEccCounterType_t,
- locationType: nvmlMemoryLocation_t,
- count: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetUtilizationRates(
- device: nvmlDevice_t,
- utilization: *mut nvmlUtilization_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetEncoderUtilization(
- device: nvmlDevice_t,
- utilization: *mut ::std::os::raw::c_uint,
- samplingPeriodUs: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetEncoderCapacity(
- device: nvmlDevice_t,
- encoderQueryType: nvmlEncoderType_t,
- encoderCapacity: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetEncoderStats(
- device: nvmlDevice_t,
- sessionCount: *mut ::std::os::raw::c_uint,
- averageFps: *mut ::std::os::raw::c_uint,
- averageLatency: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetEncoderSessions(
- device: nvmlDevice_t,
- sessionCount: *mut ::std::os::raw::c_uint,
- sessionInfos: *mut nvmlEncoderSessionInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetDecoderUtilization(
- device: nvmlDevice_t,
- utilization: *mut ::std::os::raw::c_uint,
- samplingPeriodUs: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetFBCStats(
- device: nvmlDevice_t,
- fbcStats: *mut nvmlFBCStats_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetFBCSessions(
- device: nvmlDevice_t,
- sessionCount: *mut ::std::os::raw::c_uint,
- sessionInfo: *mut nvmlFBCSessionInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetDriverModel(
- device: nvmlDevice_t,
- current: *mut nvmlDriverModel_t,
- pending: *mut nvmlDriverModel_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetVbiosVersion(
- device: nvmlDevice_t,
- version: *mut ::std::os::raw::c_char,
- length: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetBridgeChipInfo(
- device: nvmlDevice_t,
- bridgeHierarchy: *mut nvmlBridgeChipHierarchy_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetComputeRunningProcesses(
- device: nvmlDevice_t,
- infoCount: *mut ::std::os::raw::c_uint,
- infos: *mut nvmlProcessInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetGraphicsRunningProcesses(
- device: nvmlDevice_t,
- infoCount: *mut ::std::os::raw::c_uint,
- infos: *mut nvmlProcessInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceOnSameBoard(
- device1: nvmlDevice_t,
- device2: nvmlDevice_t,
- onSameBoard: *mut ::std::os::raw::c_int,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetAPIRestriction(
- device: nvmlDevice_t,
- apiType: nvmlRestrictedAPI_t,
- isRestricted: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetSamples(
- device: nvmlDevice_t,
- type_: nvmlSamplingType_t,
- lastSeenTimeStamp: ::std::os::raw::c_ulonglong,
- sampleValType: *mut nvmlValueType_t,
- sampleCount: *mut ::std::os::raw::c_uint,
- samples: *mut nvmlSample_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetBAR1MemoryInfo(
- device: nvmlDevice_t,
- bar1Memory: *mut nvmlBAR1Memory_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetViolationStatus(
- device: nvmlDevice_t,
- perfPolicyType: nvmlPerfPolicyType_t,
- violTime: *mut nvmlViolationTime_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetAccountingMode(
- device: nvmlDevice_t,
- mode: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetAccountingStats(
- device: nvmlDevice_t,
- pid: ::std::os::raw::c_uint,
- stats: *mut nvmlAccountingStats_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetAccountingPids(
- device: nvmlDevice_t,
- count: *mut ::std::os::raw::c_uint,
- pids: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetAccountingBufferSize(
- device: nvmlDevice_t,
- bufferSize: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetRetiredPages(
- device: nvmlDevice_t,
- cause: nvmlPageRetirementCause_t,
- pageCount: *mut ::std::os::raw::c_uint,
- addresses: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetRetiredPages_v2(
- device: nvmlDevice_t,
- cause: nvmlPageRetirementCause_t,
- pageCount: *mut ::std::os::raw::c_uint,
- addresses: *mut ::std::os::raw::c_ulonglong,
- timestamps: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetRetiredPagesPendingStatus(
- device: nvmlDevice_t,
- isPending: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetRemappedRows(
- device: nvmlDevice_t,
- corrRows: *mut ::std::os::raw::c_uint,
- uncRows: *mut ::std::os::raw::c_uint,
- isPending: *mut ::std::os::raw::c_uint,
- failureOccurred: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetArchitecture(
- device: nvmlDevice_t,
- arch: *mut nvmlDeviceArchitecture_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlUnitSetLedState(unit: nvmlUnit_t, color: nvmlLedColor_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetPersistenceMode(
- device: nvmlDevice_t,
- mode: nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetComputeMode(
- device: nvmlDevice_t,
- mode: nvmlComputeMode_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetEccMode(
- device: nvmlDevice_t,
- ecc: nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceClearEccErrorCounts(
- device: nvmlDevice_t,
- counterType: nvmlEccCounterType_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetDriverModel(
- device: nvmlDevice_t,
- driverModel: nvmlDriverModel_t,
- flags: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetGpuLockedClocks(
- device: nvmlDevice_t,
- minGpuClockMHz: ::std::os::raw::c_uint,
- maxGpuClockMHz: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceResetGpuLockedClocks(device: nvmlDevice_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetApplicationsClocks(
- device: nvmlDevice_t,
- memClockMHz: ::std::os::raw::c_uint,
- graphicsClockMHz: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetPowerManagementLimit(
- device: nvmlDevice_t,
- limit: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetGpuOperationMode(
- device: nvmlDevice_t,
- mode: nvmlGpuOperationMode_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetAPIRestriction(
- device: nvmlDevice_t,
- apiType: nvmlRestrictedAPI_t,
- isRestricted: nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetAccountingMode(
- device: nvmlDevice_t,
- mode: nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceClearAccountingPids(device: nvmlDevice_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetNvLinkState(
- device: nvmlDevice_t,
- link: ::std::os::raw::c_uint,
- isActive: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetNvLinkVersion(
- device: nvmlDevice_t,
- link: ::std::os::raw::c_uint,
- version: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetNvLinkCapability(
- device: nvmlDevice_t,
- link: ::std::os::raw::c_uint,
- capability: nvmlNvLinkCapability_t,
- capResult: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetNvLinkRemotePciInfo_v2(
- device: nvmlDevice_t,
- link: ::std::os::raw::c_uint,
- pci: *mut nvmlPciInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetNvLinkErrorCounter(
- device: nvmlDevice_t,
- link: ::std::os::raw::c_uint,
- counter: nvmlNvLinkErrorCounter_t,
- counterValue: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceResetNvLinkErrorCounters(
- device: nvmlDevice_t,
- link: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetNvLinkUtilizationControl(
- device: nvmlDevice_t,
- link: ::std::os::raw::c_uint,
- counter: ::std::os::raw::c_uint,
- control: *mut nvmlNvLinkUtilizationControl_t,
- reset: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetNvLinkUtilizationControl(
- device: nvmlDevice_t,
- link: ::std::os::raw::c_uint,
- counter: ::std::os::raw::c_uint,
- control: *mut nvmlNvLinkUtilizationControl_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetNvLinkUtilizationCounter(
- device: nvmlDevice_t,
- link: ::std::os::raw::c_uint,
- counter: ::std::os::raw::c_uint,
- rxcounter: *mut ::std::os::raw::c_ulonglong,
- txcounter: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceFreezeNvLinkUtilizationCounter(
- device: nvmlDevice_t,
- link: ::std::os::raw::c_uint,
- counter: ::std::os::raw::c_uint,
- freeze: nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceResetNvLinkUtilizationCounter(
- device: nvmlDevice_t,
- link: ::std::os::raw::c_uint,
- counter: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlEventSetCreate(set: *mut nvmlEventSet_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceRegisterEvents(
- device: nvmlDevice_t,
- eventTypes: ::std::os::raw::c_ulonglong,
- set: nvmlEventSet_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetSupportedEventTypes(
- device: nvmlDevice_t,
- eventTypes: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlEventSetWait_v2(
- set: nvmlEventSet_t,
- data: *mut nvmlEventData_t,
- timeoutms: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlEventSetFree(set: nvmlEventSet_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceModifyDrainState(
- pciInfo: *mut nvmlPciInfo_t,
- newState: nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceQueryDrainState(
- pciInfo: *mut nvmlPciInfo_t,
- currentState: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceRemoveGpu_v2(
- pciInfo: *mut nvmlPciInfo_t,
- gpuState: nvmlDetachGpuState_t,
- linkState: nvmlPcieLinkState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceDiscoverGpus(pciInfo: *mut nvmlPciInfo_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetFieldValues(
- device: nvmlDevice_t,
- valuesCount: ::std::os::raw::c_int,
- values: *mut nvmlFieldValue_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetVirtualizationMode(
- device: nvmlDevice_t,
- pVirtualMode: *mut nvmlGpuVirtualizationMode_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetHostVgpuMode(
- device: nvmlDevice_t,
- pHostVgpuMode: *mut nvmlHostVgpuMode_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetVirtualizationMode(
- device: nvmlDevice_t,
- virtualMode: nvmlGpuVirtualizationMode_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetGridLicensableFeatures_v3(
- device: nvmlDevice_t,
- pGridLicensableFeatures: *mut nvmlGridLicensableFeatures_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetProcessUtilization(
- device: nvmlDevice_t,
- utilization: *mut nvmlProcessUtilizationSample_t,
- processSamplesCount: *mut ::std::os::raw::c_uint,
- lastSeenTimeStamp: ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetSupportedVgpus(
- device: nvmlDevice_t,
- vgpuCount: *mut ::std::os::raw::c_uint,
- vgpuTypeIds: *mut nvmlVgpuTypeId_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetCreatableVgpus(
- device: nvmlDevice_t,
- vgpuCount: *mut ::std::os::raw::c_uint,
- vgpuTypeIds: *mut nvmlVgpuTypeId_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuTypeGetClass(
- vgpuTypeId: nvmlVgpuTypeId_t,
- vgpuTypeClass: *mut ::std::os::raw::c_char,
- size: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuTypeGetName(
- vgpuTypeId: nvmlVgpuTypeId_t,
- vgpuTypeName: *mut ::std::os::raw::c_char,
- size: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuTypeGetDeviceID(
- vgpuTypeId: nvmlVgpuTypeId_t,
- deviceID: *mut ::std::os::raw::c_ulonglong,
- subsystemID: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuTypeGetFramebufferSize(
- vgpuTypeId: nvmlVgpuTypeId_t,
- fbSize: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuTypeGetNumDisplayHeads(
- vgpuTypeId: nvmlVgpuTypeId_t,
- numDisplayHeads: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuTypeGetResolution(
- vgpuTypeId: nvmlVgpuTypeId_t,
- displayIndex: ::std::os::raw::c_uint,
- xdim: *mut ::std::os::raw::c_uint,
- ydim: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuTypeGetLicense(
- vgpuTypeId: nvmlVgpuTypeId_t,
- vgpuTypeLicenseString: *mut ::std::os::raw::c_char,
- size: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuTypeGetFrameRateLimit(
- vgpuTypeId: nvmlVgpuTypeId_t,
- frameRateLimit: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuTypeGetMaxInstances(
- device: nvmlDevice_t,
- vgpuTypeId: nvmlVgpuTypeId_t,
- vgpuInstanceCount: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuTypeGetMaxInstancesPerVm(
- vgpuTypeId: nvmlVgpuTypeId_t,
- vgpuInstanceCountPerVm: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetActiveVgpus(
- device: nvmlDevice_t,
- vgpuCount: *mut ::std::os::raw::c_uint,
- vgpuInstances: *mut nvmlVgpuInstance_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetVmID(
- vgpuInstance: nvmlVgpuInstance_t,
- vmId: *mut ::std::os::raw::c_char,
- size: ::std::os::raw::c_uint,
- vmIdType: *mut nvmlVgpuVmIdType_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetUUID(
- vgpuInstance: nvmlVgpuInstance_t,
- uuid: *mut ::std::os::raw::c_char,
- size: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetVmDriverVersion(
- vgpuInstance: nvmlVgpuInstance_t,
- version: *mut ::std::os::raw::c_char,
- length: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetFbUsage(
- vgpuInstance: nvmlVgpuInstance_t,
- fbUsage: *mut ::std::os::raw::c_ulonglong,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetLicenseStatus(
- vgpuInstance: nvmlVgpuInstance_t,
- licensed: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetType(
- vgpuInstance: nvmlVgpuInstance_t,
- vgpuTypeId: *mut nvmlVgpuTypeId_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetFrameRateLimit(
- vgpuInstance: nvmlVgpuInstance_t,
- frameRateLimit: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetEccMode(
- vgpuInstance: nvmlVgpuInstance_t,
- eccMode: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetEncoderCapacity(
- vgpuInstance: nvmlVgpuInstance_t,
- encoderCapacity: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceSetEncoderCapacity(
- vgpuInstance: nvmlVgpuInstance_t,
- encoderCapacity: ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetEncoderStats(
- vgpuInstance: nvmlVgpuInstance_t,
- sessionCount: *mut ::std::os::raw::c_uint,
- averageFps: *mut ::std::os::raw::c_uint,
- averageLatency: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetEncoderSessions(
- vgpuInstance: nvmlVgpuInstance_t,
- sessionCount: *mut ::std::os::raw::c_uint,
- sessionInfo: *mut nvmlEncoderSessionInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetFBCStats(
- vgpuInstance: nvmlVgpuInstance_t,
- fbcStats: *mut nvmlFBCStats_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetFBCSessions(
- vgpuInstance: nvmlVgpuInstance_t,
- sessionCount: *mut ::std::os::raw::c_uint,
- sessionInfo: *mut nvmlFBCSessionInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlVgpuVersion_st {
- pub minVersion: ::std::os::raw::c_uint,
- pub maxVersion: ::std::os::raw::c_uint,
-}
-pub type nvmlVgpuVersion_t = nvmlVgpuVersion_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlVgpuMetadata_st {
- pub version: ::std::os::raw::c_uint,
- pub revision: ::std::os::raw::c_uint,
- pub guestInfoState: nvmlVgpuGuestInfoState_t,
- pub guestDriverVersion: [::std::os::raw::c_char; 80usize],
- pub hostDriverVersion: [::std::os::raw::c_char; 80usize],
- pub reserved: [::std::os::raw::c_uint; 6usize],
- pub vgpuVirtualizationCaps: ::std::os::raw::c_uint,
- pub guestVgpuVersion: ::std::os::raw::c_uint,
- pub opaqueDataSize: ::std::os::raw::c_uint,
- pub opaqueData: [::std::os::raw::c_char; 4usize],
-}
-pub type nvmlVgpuMetadata_t = nvmlVgpuMetadata_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlVgpuPgpuMetadata_st {
- pub version: ::std::os::raw::c_uint,
- pub revision: ::std::os::raw::c_uint,
- pub hostDriverVersion: [::std::os::raw::c_char; 80usize],
- pub pgpuVirtualizationCaps: ::std::os::raw::c_uint,
- pub reserved: [::std::os::raw::c_uint; 5usize],
- pub hostSupportedVgpuRange: nvmlVgpuVersion_t,
- pub opaqueDataSize: ::std::os::raw::c_uint,
- pub opaqueData: [::std::os::raw::c_char; 4usize],
-}
-pub type nvmlVgpuPgpuMetadata_t = nvmlVgpuPgpuMetadata_st;
-impl nvmlVgpuVmCompatibility_enum {
- pub const NVML_VGPU_VM_COMPATIBILITY_NONE: nvmlVgpuVmCompatibility_enum =
- nvmlVgpuVmCompatibility_enum(0);
-}
-impl nvmlVgpuVmCompatibility_enum {
- pub const NVML_VGPU_VM_COMPATIBILITY_COLD: nvmlVgpuVmCompatibility_enum =
- nvmlVgpuVmCompatibility_enum(1);
-}
-impl nvmlVgpuVmCompatibility_enum {
- pub const NVML_VGPU_VM_COMPATIBILITY_HIBERNATE: nvmlVgpuVmCompatibility_enum =
- nvmlVgpuVmCompatibility_enum(2);
-}
-impl nvmlVgpuVmCompatibility_enum {
- pub const NVML_VGPU_VM_COMPATIBILITY_SLEEP: nvmlVgpuVmCompatibility_enum =
- nvmlVgpuVmCompatibility_enum(4);
-}
-impl nvmlVgpuVmCompatibility_enum {
- pub const NVML_VGPU_VM_COMPATIBILITY_LIVE: nvmlVgpuVmCompatibility_enum =
- nvmlVgpuVmCompatibility_enum(8);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlVgpuVmCompatibility_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlVgpuVmCompatibility_enum as nvmlVgpuVmCompatibility_t;
-impl nvmlVgpuPgpuCompatibilityLimitCode_enum {
- pub const NVML_VGPU_COMPATIBILITY_LIMIT_NONE: nvmlVgpuPgpuCompatibilityLimitCode_enum =
- nvmlVgpuPgpuCompatibilityLimitCode_enum(0);
-}
-impl nvmlVgpuPgpuCompatibilityLimitCode_enum {
- pub const NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER: nvmlVgpuPgpuCompatibilityLimitCode_enum =
- nvmlVgpuPgpuCompatibilityLimitCode_enum(1);
-}
-impl nvmlVgpuPgpuCompatibilityLimitCode_enum {
- pub const NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER: nvmlVgpuPgpuCompatibilityLimitCode_enum =
- nvmlVgpuPgpuCompatibilityLimitCode_enum(2);
-}
-impl nvmlVgpuPgpuCompatibilityLimitCode_enum {
- pub const NVML_VGPU_COMPATIBILITY_LIMIT_GPU: nvmlVgpuPgpuCompatibilityLimitCode_enum =
- nvmlVgpuPgpuCompatibilityLimitCode_enum(4);
-}
-impl nvmlVgpuPgpuCompatibilityLimitCode_enum {
- pub const NVML_VGPU_COMPATIBILITY_LIMIT_OTHER: nvmlVgpuPgpuCompatibilityLimitCode_enum =
- nvmlVgpuPgpuCompatibilityLimitCode_enum(2147483648);
-}
-#[repr(transparent)]
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct nvmlVgpuPgpuCompatibilityLimitCode_enum(pub ::std::os::raw::c_uint);
-pub use self::nvmlVgpuPgpuCompatibilityLimitCode_enum as nvmlVgpuPgpuCompatibilityLimitCode_t;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlVgpuPgpuCompatibility_st {
- pub vgpuVmCompatibility: nvmlVgpuVmCompatibility_t,
- pub compatibilityLimitCode: nvmlVgpuPgpuCompatibilityLimitCode_t,
-}
-pub type nvmlVgpuPgpuCompatibility_t = nvmlVgpuPgpuCompatibility_st;
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetMetadata(
- vgpuInstance: nvmlVgpuInstance_t,
- vgpuMetadata: *mut nvmlVgpuMetadata_t,
- bufferSize: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetVgpuMetadata(
- device: nvmlDevice_t,
- pgpuMetadata: *mut nvmlVgpuPgpuMetadata_t,
- bufferSize: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlGetVgpuCompatibility(
- vgpuMetadata: *mut nvmlVgpuMetadata_t,
- pgpuMetadata: *mut nvmlVgpuPgpuMetadata_t,
- compatibilityInfo: *mut nvmlVgpuPgpuCompatibility_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetPgpuMetadataString(
- device: nvmlDevice_t,
- pgpuMetadata: *mut ::std::os::raw::c_char,
- bufferSize: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlGetVgpuVersion(
- supported: *mut nvmlVgpuVersion_t,
- current: *mut nvmlVgpuVersion_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlSetVgpuVersion(vgpuVersion: *mut nvmlVgpuVersion_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetVgpuUtilization(
- device: nvmlDevice_t,
- lastSeenTimeStamp: ::std::os::raw::c_ulonglong,
- sampleValType: *mut nvmlValueType_t,
- vgpuInstanceSamplesCount: *mut ::std::os::raw::c_uint,
- utilizationSamples: *mut nvmlVgpuInstanceUtilizationSample_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetVgpuProcessUtilization(
- device: nvmlDevice_t,
- lastSeenTimeStamp: ::std::os::raw::c_ulonglong,
- vgpuProcessSamplesCount: *mut ::std::os::raw::c_uint,
- utilizationSamples: *mut nvmlVgpuProcessUtilizationSample_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetAccountingMode(
- vgpuInstance: nvmlVgpuInstance_t,
- mode: *mut nvmlEnableState_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetAccountingPids(
- vgpuInstance: nvmlVgpuInstance_t,
- count: *mut ::std::os::raw::c_uint,
- pids: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceGetAccountingStats(
- vgpuInstance: nvmlVgpuInstance_t,
- pid: ::std::os::raw::c_uint,
- stats: *mut nvmlAccountingStats_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlVgpuInstanceClearAccountingPids(
- vgpuInstance: nvmlVgpuInstance_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlBlacklistDeviceInfo_st {
- pub pciInfo: nvmlPciInfo_t,
- pub uuid: [::std::os::raw::c_char; 80usize],
-}
-pub type nvmlBlacklistDeviceInfo_t = nvmlBlacklistDeviceInfo_st;
-
-#[no_mangle]
-pub extern "C" fn nvmlGetBlacklistDeviceCount(
- deviceCount: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlGetBlacklistDeviceInfoByIndex(
- index: ::std::os::raw::c_uint,
- info: *mut nvmlBlacklistDeviceInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlGpuInstancePlacement_st {
- pub start: ::std::os::raw::c_uint,
- pub size: ::std::os::raw::c_uint,
-}
-pub type nvmlGpuInstancePlacement_t = nvmlGpuInstancePlacement_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlGpuInstanceProfileInfo_st {
- pub id: ::std::os::raw::c_uint,
- pub isP2pSupported: ::std::os::raw::c_uint,
- pub sliceCount: ::std::os::raw::c_uint,
- pub instanceCount: ::std::os::raw::c_uint,
- pub multiprocessorCount: ::std::os::raw::c_uint,
- pub copyEngineCount: ::std::os::raw::c_uint,
- pub decoderCount: ::std::os::raw::c_uint,
- pub encoderCount: ::std::os::raw::c_uint,
- pub jpegCount: ::std::os::raw::c_uint,
- pub ofaCount: ::std::os::raw::c_uint,
- pub memorySizeMB: ::std::os::raw::c_ulonglong,
-}
-pub type nvmlGpuInstanceProfileInfo_t = nvmlGpuInstanceProfileInfo_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlGpuInstanceInfo_st {
- pub device: nvmlDevice_t,
- pub id: ::std::os::raw::c_uint,
- pub profileId: ::std::os::raw::c_uint,
- pub placement: nvmlGpuInstancePlacement_t,
-}
-pub type nvmlGpuInstanceInfo_t = nvmlGpuInstanceInfo_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlGpuInstance_st {
- _unused: [u8; 0],
-}
-pub type nvmlGpuInstance_t = *mut nvmlGpuInstance_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlComputeInstanceProfileInfo_st {
- pub id: ::std::os::raw::c_uint,
- pub sliceCount: ::std::os::raw::c_uint,
- pub instanceCount: ::std::os::raw::c_uint,
- pub multiprocessorCount: ::std::os::raw::c_uint,
- pub sharedCopyEngineCount: ::std::os::raw::c_uint,
- pub sharedDecoderCount: ::std::os::raw::c_uint,
- pub sharedEncoderCount: ::std::os::raw::c_uint,
- pub sharedJpegCount: ::std::os::raw::c_uint,
- pub sharedOfaCount: ::std::os::raw::c_uint,
-}
-pub type nvmlComputeInstanceProfileInfo_t = nvmlComputeInstanceProfileInfo_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlComputeInstanceInfo_st {
- pub device: nvmlDevice_t,
- pub gpuInstance: nvmlGpuInstance_t,
- pub id: ::std::os::raw::c_uint,
- pub profileId: ::std::os::raw::c_uint,
-}
-pub type nvmlComputeInstanceInfo_t = nvmlComputeInstanceInfo_st;
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct nvmlComputeInstance_st {
- _unused: [u8; 0],
-}
-pub type nvmlComputeInstance_t = *mut nvmlComputeInstance_st;
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceSetMigMode(
- device: nvmlDevice_t,
- mode: ::std::os::raw::c_uint,
- activationStatus: *mut nvmlReturn_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetMigMode(
- device: nvmlDevice_t,
- currentMode: *mut ::std::os::raw::c_uint,
- pendingMode: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetGpuInstanceProfileInfo(
- device: nvmlDevice_t,
- profile: ::std::os::raw::c_uint,
- info: *mut nvmlGpuInstanceProfileInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetGpuInstancePossiblePlacements(
- device: nvmlDevice_t,
- profileId: ::std::os::raw::c_uint,
- placements: *mut nvmlGpuInstancePlacement_t,
- count: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetGpuInstanceRemainingCapacity(
- device: nvmlDevice_t,
- profileId: ::std::os::raw::c_uint,
- count: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceCreateGpuInstance(
- device: nvmlDevice_t,
- profileId: ::std::os::raw::c_uint,
- gpuInstance: *mut nvmlGpuInstance_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlGpuInstanceDestroy(gpuInstance: nvmlGpuInstance_t) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetGpuInstances(
- device: nvmlDevice_t,
- profileId: ::std::os::raw::c_uint,
- gpuInstances: *mut nvmlGpuInstance_t,
- count: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetGpuInstanceById(
- device: nvmlDevice_t,
- id: ::std::os::raw::c_uint,
- gpuInstance: *mut nvmlGpuInstance_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlGpuInstanceGetInfo(
- gpuInstance: nvmlGpuInstance_t,
- info: *mut nvmlGpuInstanceInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlGpuInstanceGetComputeInstanceProfileInfo(
- gpuInstance: nvmlGpuInstance_t,
- profile: ::std::os::raw::c_uint,
- engProfile: ::std::os::raw::c_uint,
- info: *mut nvmlComputeInstanceProfileInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlGpuInstanceGetComputeInstanceRemainingCapacity(
- gpuInstance: nvmlGpuInstance_t,
- profileId: ::std::os::raw::c_uint,
- count: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlGpuInstanceCreateComputeInstance(
- gpuInstance: nvmlGpuInstance_t,
- profileId: ::std::os::raw::c_uint,
- computeInstance: *mut nvmlComputeInstance_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlComputeInstanceDestroy(
- computeInstance: nvmlComputeInstance_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlGpuInstanceGetComputeInstances(
- gpuInstance: nvmlGpuInstance_t,
- profileId: ::std::os::raw::c_uint,
- computeInstances: *mut nvmlComputeInstance_t,
- count: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlGpuInstanceGetComputeInstanceById(
- gpuInstance: nvmlGpuInstance_t,
- id: ::std::os::raw::c_uint,
- computeInstance: *mut nvmlComputeInstance_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlComputeInstanceGetInfo(
- computeInstance: nvmlComputeInstance_t,
- info: *mut nvmlComputeInstanceInfo_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceIsMigDeviceHandle(
- device: nvmlDevice_t,
- isMigDevice: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetGpuInstanceId(
- device: nvmlDevice_t,
- id: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetComputeInstanceId(
- device: nvmlDevice_t,
- id: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetMaxMigDeviceCount(
- device: nvmlDevice_t,
- count: *mut ::std::os::raw::c_uint,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetMigDeviceHandleByIndex(
- device: nvmlDevice_t,
- index: ::std::os::raw::c_uint,
- migDevice: *mut nvmlDevice_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}
-
-#[no_mangle]
-pub extern "C" fn nvmlDeviceGetDeviceHandleFromMigDeviceHandle(
- migDevice: nvmlDevice_t,
- device: *mut nvmlDevice_t,
-) -> nvmlReturn_t {
- crate::r#impl::unimplemented()
-}