aboutsummaryrefslogtreecommitdiffhomepage
path: root/cuda_base
diff options
context:
space:
mode:
Diffstat (limited to 'cuda_base')
-rw-r--r--cuda_base/Cargo.toml2
-rw-r--r--cuda_base/src/cuda.rs3671
-rw-r--r--cuda_base/src/lib.rs13
-rw-r--r--cuda_base/src/nvml.rs7857
4 files changed, 9747 insertions, 1796 deletions
diff --git a/cuda_base/Cargo.toml b/cuda_base/Cargo.toml
index 9c9d531..b2bbdaa 100644
--- a/cuda_base/Cargo.toml
+++ b/cuda_base/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
quote = "1.0"
-syn = { version = "2.0", features = ["full", "visit-mut"] }
+syn = { version = "2.0", features = ["full", "visit-mut", "extra-traits"] }
proc-macro2 = "1.0"
rustc-hash = "1.1.0"
diff --git a/cuda_base/src/cuda.rs b/cuda_base/src/cuda.rs
index 2cc5a56..37aadf1 100644
--- a/cuda_base/src/cuda.rs
+++ b/cuda_base/src/cuda.rs
@@ -20,9 +20,9 @@ extern "system" {
::CUresult,
::cudaGetErrorString*/
fn cuGetErrorString(
- error: cuda_types::CUresult,
+ error: cuda_types::cuda::CUresult,
pStr: *mut *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the string representation of an error code enum name
Sets \p *pStr to the address of a NULL-terminated string representation
@@ -41,9 +41,9 @@ extern "system" {
::CUresult,
::cudaGetErrorName*/
fn cuGetErrorName(
- error: cuda_types::CUresult,
+ error: cuda_types::cuda::CUresult,
pStr: *mut *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initialize the CUDA driver API
Initializes the driver API and must be called before any other function from
the driver API in the current process. Currently, the \p Flags parameter must be 0. If ::cuInit()
@@ -59,7 +59,7 @@ extern "system" {
::CUDA_ERROR_SYSTEM_DRIVER_MISMATCH,
::CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE
\notefnerr*/
- fn cuInit(Flags: ::core::ffi::c_uint) -> cuda_types::CUresult;
+ fn cuInit(Flags: ::core::ffi::c_uint) -> cuda_types::cuda::CUresult;
/** \brief Returns the latest CUDA version supported by driver
Returns in \p *driverVersion the version of CUDA supported by
@@ -82,7 +82,7 @@ extern "system" {
::cudaRuntimeGetVersion*/
fn cuDriverGetVersion(
driverVersion: *mut ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a handle to a compute device
Returns in \p *device a device handle given an ordinal in the range <b>[0,
@@ -109,9 +109,9 @@ extern "system" {
::cuDeviceTotalMem,
::cuDeviceGetExecAffinitySupport*/
fn cuDeviceGet(
- device: *mut cuda_types::CUdevice,
+ device: *mut cuda_types::cuda::CUdevice,
ordinal: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the number of compute-capable devices
Returns in \p *count the number of devices with compute capability greater
@@ -137,7 +137,7 @@ extern "system" {
::cuDeviceTotalMem,
::cuDeviceGetExecAffinitySupport,
::cudaGetDeviceCount*/
- fn cuDeviceGetCount(count: *mut ::core::ffi::c_int) -> cuda_types::CUresult;
+ fn cuDeviceGetCount(count: *mut ::core::ffi::c_int) -> cuda_types::cuda::CUresult;
/** \brief Returns an identifier string for the device
Returns an ASCII string identifying the device \p dev in the NULL-terminated
@@ -169,8 +169,8 @@ extern "system" {
fn cuDeviceGetName(
name: *mut ::core::ffi::c_char,
len: ::core::ffi::c_int,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Return an UUID for the device
Note there is a later version of this API, ::cuDeviceGetUuid_v2. It will
@@ -201,9 +201,9 @@ extern "system" {
::cuDeviceGetExecAffinitySupport,
::cudaGetDeviceProperties*/
fn cuDeviceGetUuid(
- uuid: *mut cuda_types::CUuuid,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ uuid: *mut cuda_types::cuda::CUuuid,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Return an UUID for the device (11.4+)
Returns 16-octets identifying the device \p dev in the structure
@@ -230,9 +230,9 @@ extern "system" {
::cuDeviceTotalMem,
::cudaGetDeviceProperties*/
fn cuDeviceGetUuid_v2(
- uuid: *mut cuda_types::CUuuid,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ uuid: *mut cuda_types::cuda::CUuuid,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Return an LUID and device node mask for the device
Return identifying information (\p luid and \p deviceNodeMask) to allow
@@ -261,8 +261,8 @@ extern "system" {
fn cuDeviceGetLuid(
luid: *mut ::core::ffi::c_char,
deviceNodeMask: *mut ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the total amount of memory on the device
Returns in \p *bytes the total amount of memory available on the device
@@ -290,8 +290,8 @@ extern "system" {
::cudaMemGetInfo*/
fn cuDeviceTotalMem_v2(
bytes: *mut usize,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the maximum number of elements allocatable in a 1D linear texture for a given texture element size.
Returns in \p maxWidthInElements the maximum number of texture elements allocatable in a 1D linear texture
@@ -321,10 +321,10 @@ extern "system" {
::cuDeviceTotalMem*/
fn cuDeviceGetTexture1DLinearMaxWidth(
maxWidthInElements: *mut usize,
- format: cuda_types::CUarray_format,
+ format: cuda_types::cuda::CUarray_format,
numChannels: ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns information about the device
Returns in \p *pi the integer value of the attribute \p attrib on device
@@ -546,9 +546,9 @@ extern "system" {
::cudaGetDeviceProperties*/
fn cuDeviceGetAttribute(
pi: *mut ::core::ffi::c_int,
- attrib: cuda_types::CUdevice_attribute,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ attrib: cuda_types::cuda::CUdevice_attribute,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Return NvSciSync attributes that this device can support.
Returns in \p nvSciSyncAttrList, the properties of NvSciSync that
@@ -610,9 +610,9 @@ extern "system" {
::cuWaitExternalSemaphoresAsync*/
fn cuDeviceGetNvSciSyncAttributes(
nvSciSyncAttrList: *mut ::core::ffi::c_void,
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
flags: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the current memory pool of a device
The memory pool must be local to the specified device.
@@ -628,9 +628,9 @@ extern "system" {
\sa ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, ::cuMemPoolDestroy, ::cuMemAllocFromPoolAsync*/
fn cuDeviceSetMemPool(
- dev: cuda_types::CUdevice,
- pool: cuda_types::CUmemoryPool,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ pool: cuda_types::cuda::CUmemoryPool,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the current mempool for a device
Returns the last pool provided to ::cuDeviceSetMemPool for this device
@@ -644,9 +644,9 @@ extern "system" {
\sa ::cuDeviceGetDefaultMemPool, ::cuMemPoolCreate, ::cuDeviceSetMemPool*/
fn cuDeviceGetMemPool(
- pool: *mut cuda_types::CUmemoryPool,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ pool: *mut cuda_types::cuda::CUmemoryPool,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the default mempool of a device
The default mempool of a device contains device memory from that device.
@@ -662,9 +662,9 @@ extern "system" {
\sa ::cuMemAllocAsync, ::cuMemPoolTrimTo, ::cuMemPoolGetAttribute, ::cuMemPoolSetAttribute, cuMemPoolSetAccess, ::cuDeviceGetMemPool, ::cuMemPoolCreate*/
fn cuDeviceGetDefaultMemPool(
- pool_out: *mut cuda_types::CUmemoryPool,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ pool_out: *mut cuda_types::cuda::CUmemoryPool,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns information about the execution affinity support of the device.
Returns in \p *pi whether execution affinity type \p type is supported by device \p dev.
@@ -694,9 +694,9 @@ extern "system" {
::cuDeviceTotalMem*/
fn cuDeviceGetExecAffinitySupport(
pi: *mut ::core::ffi::c_int,
- type_: cuda_types::CUexecAffinityType,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ type_: cuda_types::cuda::CUexecAffinityType,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Blocks until remote writes are visible to the specified scope
Blocks until GPUDirect RDMA writes to the target context via mappings
@@ -725,9 +725,9 @@ extern "system" {
\notefnerr
*/
fn cuFlushGPUDirectRDMAWrites(
- target: cuda_types::CUflushGPUDirectRDMAWritesTarget,
- scope: cuda_types::CUflushGPUDirectRDMAWritesScope,
- ) -> cuda_types::CUresult;
+ target: cuda_types::cuda::CUflushGPUDirectRDMAWritesTarget,
+ scope: cuda_types::cuda::CUflushGPUDirectRDMAWritesScope,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns properties for a selected device
\deprecated
@@ -789,9 +789,9 @@ int textureAlign
::cuDeviceGet,
::cuDeviceTotalMem*/
fn cuDeviceGetProperties(
- prop: *mut cuda_types::CUdevprop,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ prop: *mut cuda_types::cuda::CUdevprop,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the compute capability of the device
\deprecated
@@ -825,8 +825,8 @@ int textureAlign
fn cuDeviceComputeCapability(
major: *mut ::core::ffi::c_int,
minor: *mut ::core::ffi::c_int,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Retain the primary context on the GPU
Retains the primary context on the device.
@@ -874,9 +874,9 @@ int textureAlign
::cuCtxSetLimit,
::cuCtxSynchronize*/
fn cuDevicePrimaryCtxRetain(
- pctx: *mut cuda_types::CUcontext,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ pctx: *mut cuda_types::cuda::CUcontext,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Release the primary context on the GPU
Releases the primary context interop on the device.
@@ -914,7 +914,9 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSetLimit,
::cuCtxSynchronize*/
- fn cuDevicePrimaryCtxRelease_v2(dev: cuda_types::CUdevice) -> cuda_types::CUresult;
+ fn cuDevicePrimaryCtxRelease_v2(
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Set flags for the primary context
Sets the flags for the primary context on the device overwriting perviously
@@ -1008,9 +1010,9 @@ int textureAlign
::cuCtxSetFlags,
::cudaSetDeviceFlags*/
fn cuDevicePrimaryCtxSetFlags_v2(
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get the state of the primary context
Returns in \p *flags the flags for the primary context of \p dev, and in
@@ -1035,10 +1037,10 @@ int textureAlign
::cuCtxSetFlags,
::cudaGetDeviceFlags*/
fn cuDevicePrimaryCtxGetState(
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
flags: *mut ::core::ffi::c_uint,
active: *mut ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroy all allocations and reset all state on the primary context
Explicitly destroys and cleans up all resources associated with the current
@@ -1075,7 +1077,9 @@ int textureAlign
::cuCtxSetLimit,
::cuCtxSynchronize,
::cudaDeviceReset*/
- fn cuDevicePrimaryCtxReset_v2(dev: cuda_types::CUdevice) -> cuda_types::CUresult;
+ fn cuDevicePrimaryCtxReset_v2(
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a CUDA context
\note In most cases it is recommended to use ::cuDevicePrimaryCtxRetain.
@@ -1201,10 +1205,10 @@ int textureAlign
::cuCoredumpSetAttribute,
::cuCtxSynchronize*/
fn cuCtxCreate_v2(
- pctx: *mut cuda_types::CUcontext,
+ pctx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a CUDA context with execution affinity
Creates a new CUDA context with execution affinity and associates it with
@@ -1338,12 +1342,12 @@ int textureAlign
::cuCoredumpSetAttribute,
::CUexecAffinityParam*/
fn cuCtxCreate_v3(
- pctx: *mut cuda_types::CUcontext,
- paramsArray: *mut cuda_types::CUexecAffinityParam,
+ pctx: *mut cuda_types::cuda::CUcontext,
+ paramsArray: *mut cuda_types::cuda::CUexecAffinityParam,
numParams: ::core::ffi::c_int,
flags: ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroy a CUDA context
Destroys the CUDA context specified by \p ctx. The context \p ctx will be
@@ -1385,7 +1389,7 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSetLimit,
::cuCtxSynchronize*/
- fn cuCtxDestroy_v2(ctx: cuda_types::CUcontext) -> cuda_types::CUresult;
+ fn cuCtxDestroy_v2(ctx: cuda_types::cuda::CUcontext) -> cuda_types::cuda::CUresult;
/** \brief Pushes a context on the current CPU thread
Pushes the given context \p ctx onto the CPU thread's stack of current
@@ -1416,7 +1420,9 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSetLimit,
::cuCtxSynchronize*/
- fn cuCtxPushCurrent_v2(ctx: cuda_types::CUcontext) -> cuda_types::CUresult;
+ fn cuCtxPushCurrent_v2(
+ ctx: cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Pops the current CUDA context from the current CPU thread.
Pops the current CUDA context from the CPU thread and passes back the
@@ -1447,7 +1453,9 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSetLimit,
::cuCtxSynchronize*/
- fn cuCtxPopCurrent_v2(pctx: *mut cuda_types::CUcontext) -> cuda_types::CUresult;
+ fn cuCtxPopCurrent_v2(
+ pctx: *mut cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Binds the specified CUDA context to the calling CPU thread
Binds the specified CUDA context to the calling CPU thread.
@@ -1474,7 +1482,7 @@ int textureAlign
::cuCtxCreate,
::cuCtxDestroy,
::cudaSetDevice*/
- fn cuCtxSetCurrent(ctx: cuda_types::CUcontext) -> cuda_types::CUresult;
+ fn cuCtxSetCurrent(ctx: cuda_types::cuda::CUcontext) -> cuda_types::cuda::CUresult;
/** \brief Returns the CUDA context bound to the calling CPU thread.
Returns in \p *pctx the CUDA context bound to the calling CPU thread.
@@ -1494,7 +1502,9 @@ int textureAlign
::cuCtxCreate,
::cuCtxDestroy,
::cudaGetDevice*/
- fn cuCtxGetCurrent(pctx: *mut cuda_types::CUcontext) -> cuda_types::CUresult;
+ fn cuCtxGetCurrent(
+ pctx: *mut cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the device ID for the current context
Returns in \p *device the ordinal of the current context's device.
@@ -1521,7 +1531,9 @@ int textureAlign
::cuCtxSetLimit,
::cuCtxSynchronize,
::cudaGetDevice*/
- fn cuCtxGetDevice(device: *mut cuda_types::CUdevice) -> cuda_types::CUresult;
+ fn cuCtxGetDevice(
+ device: *mut cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the flags for the current context
Returns in \p *flags the flags of the current context. See ::cuCtxCreate
@@ -1547,7 +1559,7 @@ int textureAlign
::cuCtxGetStreamPriorityRange,
::cuCtxSetFlags,
::cudaGetDeviceFlags*/
- fn cuCtxGetFlags(flags: *mut ::core::ffi::c_uint) -> cuda_types::CUresult;
+ fn cuCtxGetFlags(flags: *mut ::core::ffi::c_uint) -> cuda_types::cuda::CUresult;
/** \brief Sets the flags for the current context
Sets the flags for the current context overwriting previously set ones. See
@@ -1574,7 +1586,7 @@ int textureAlign
::cuCtxGetFlags,
::cudaGetDeviceFlags,
::cuDevicePrimaryCtxSetFlags,*/
- fn cuCtxSetFlags(flags: ::core::ffi::c_uint) -> cuda_types::CUresult;
+ fn cuCtxSetFlags(flags: ::core::ffi::c_uint) -> cuda_types::cuda::CUresult;
/** \brief Returns the unique Id associated with the context supplied
Returns in \p ctxId the unique Id which is associated with a given context.
@@ -1603,9 +1615,9 @@ int textureAlign
::cuCtxGetLimit,
::cuCtxPushCurrent*/
fn cuCtxGetId(
- ctx: cuda_types::CUcontext,
+ ctx: cuda_types::cuda::CUcontext,
ctxId: *mut ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Block for a context's tasks to complete
Blocks until the device has completed all preceding requested tasks.
@@ -1632,7 +1644,7 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSetLimit,
::cudaDeviceSynchronize*/
- fn cuCtxSynchronize() -> cuda_types::CUresult;
+ fn cuCtxSynchronize() -> cuda_types::cuda::CUresult;
/** \brief Set resource limits
Setting \p limit to \p value is a request by the application to update
@@ -1728,7 +1740,10 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSynchronize,
::cudaDeviceSetLimit*/
- fn cuCtxSetLimit(limit: cuda_types::CUlimit, value: usize) -> cuda_types::CUresult;
+ fn cuCtxSetLimit(
+ limit: cuda_types::cuda::CUlimit,
+ value: usize,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns resource limits
Returns in \p *pvalue the current size of \p limit. The supported
@@ -1769,8 +1784,8 @@ int textureAlign
::cudaDeviceGetLimit*/
fn cuCtxGetLimit(
pvalue: *mut usize,
- limit: cuda_types::CUlimit,
- ) -> cuda_types::CUresult;
+ limit: cuda_types::cuda::CUlimit,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the preferred cache configuration for the current context.
On devices where the L1 cache and shared memory use the same hardware
@@ -1812,8 +1827,8 @@ int textureAlign
::cuFuncSetCacheConfig,
::cudaDeviceGetCacheConfig*/
fn cuCtxGetCacheConfig(
- pconfig: *mut cuda_types::CUfunc_cache,
- ) -> cuda_types::CUresult;
+ pconfig: *mut cuda_types::cuda::CUfunc_cache,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the preferred cache configuration for the current context.
On devices where the L1 cache and shared memory use the same hardware
@@ -1862,7 +1877,9 @@ int textureAlign
::cuFuncSetCacheConfig,
::cudaDeviceSetCacheConfig,
::cuKernelSetCacheConfig*/
- fn cuCtxSetCacheConfig(config: cuda_types::CUfunc_cache) -> cuda_types::CUresult;
+ fn cuCtxSetCacheConfig(
+ config: cuda_types::cuda::CUfunc_cache,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the context's API version.
Returns a version number in \p version corresponding to the capabilities of
@@ -1898,9 +1915,9 @@ int textureAlign
::cuCtxSetLimit,
::cuCtxSynchronize*/
fn cuCtxGetApiVersion(
- ctx: cuda_types::CUcontext,
+ ctx: cuda_types::cuda::CUcontext,
version: *mut ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns numerical values that correspond to the least and
greatest stream priorities.
@@ -1940,7 +1957,7 @@ int textureAlign
fn cuCtxGetStreamPriorityRange(
leastPriority: *mut ::core::ffi::c_int,
greatestPriority: *mut ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Resets all persisting lines in cache to normal status.
::cuCtxResetPersistingL2Cache Resets all persisting lines in cache to normal
@@ -1953,7 +1970,7 @@ int textureAlign
\sa
::CUaccessPolicyWindow*/
- fn cuCtxResetPersistingL2Cache() -> cuda_types::CUresult;
+ fn cuCtxResetPersistingL2Cache() -> cuda_types::cuda::CUresult;
/** \brief Returns the execution affinity setting for the current context.
Returns in \p *pExecAffinity the current value of \p type. The supported
@@ -1975,9 +1992,9 @@ int textureAlign
\sa
::CUexecAffinityParam*/
fn cuCtxGetExecAffinity(
- pExecAffinity: *mut cuda_types::CUexecAffinityParam,
- type_: cuda_types::CUexecAffinityType,
- ) -> cuda_types::CUresult;
+ pExecAffinity: *mut cuda_types::cuda::CUexecAffinityParam,
+ type_: cuda_types::cuda::CUexecAffinityType,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Increment a context's usage-count
\deprecated
@@ -2016,9 +2033,9 @@ int textureAlign
::cuCtxSetLimit,
::cuCtxSynchronize*/
fn cuCtxAttach(
- pctx: *mut cuda_types::CUcontext,
+ pctx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Decrement a context's usage-count
\deprecated
@@ -2051,7 +2068,7 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuCtxSetLimit,
::cuCtxSynchronize*/
- fn cuCtxDetach(ctx: cuda_types::CUcontext) -> cuda_types::CUresult;
+ fn cuCtxDetach(ctx: cuda_types::cuda::CUcontext) -> cuda_types::cuda::CUresult;
/** \brief Returns the current shared memory configuration for the current context.
\deprecated
@@ -2093,8 +2110,8 @@ int textureAlign
::cuFuncSetCacheConfig,
::cudaDeviceGetSharedMemConfig*/
fn cuCtxGetSharedMemConfig(
- pConfig: *mut cuda_types::CUsharedconfig,
- ) -> cuda_types::CUresult;
+ pConfig: *mut cuda_types::cuda::CUsharedconfig,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the shared memory configuration for the current context.
\deprecated
@@ -2147,8 +2164,8 @@ int textureAlign
::cuFuncSetCacheConfig,
::cudaDeviceSetSharedMemConfig*/
fn cuCtxSetSharedMemConfig(
- config: cuda_types::CUsharedconfig,
- ) -> cuda_types::CUresult;
+ config: cuda_types::cuda::CUsharedconfig,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Loads a compute module
Takes a filename \p fname and loads the corresponding module \p module into
@@ -2187,9 +2204,9 @@ int textureAlign
::cuModuleLoadFatBinary,
::cuModuleUnload*/
fn cuModuleLoad(
- module: *mut cuda_types::CUmodule,
+ module: *mut cuda_types::cuda::CUmodule,
fname: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Load a module's data
Takes a pointer \p image and loads the corresponding module \p module into
@@ -2223,9 +2240,9 @@ int textureAlign
::cuModuleLoadFatBinary,
::cuModuleUnload*/
fn cuModuleLoadData(
- module: *mut cuda_types::CUmodule,
+ module: *mut cuda_types::cuda::CUmodule,
image: *const ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Load a module's data with options
Takes a pointer \p image and loads the corresponding module \p module into
@@ -2262,12 +2279,12 @@ int textureAlign
::cuModuleLoadFatBinary,
::cuModuleUnload*/
fn cuModuleLoadDataEx(
- module: *mut cuda_types::CUmodule,
+ module: *mut cuda_types::cuda::CUmodule,
image: *const ::core::ffi::c_void,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Load a module's data
Takes a pointer \p fatCubin and loads the corresponding module \p module
@@ -2308,9 +2325,9 @@ int textureAlign
::cuModuleLoadDataEx,
::cuModuleUnload*/
fn cuModuleLoadFatBinary(
- module: *mut cuda_types::CUmodule,
+ module: *mut cuda_types::cuda::CUmodule,
fatCubin: *const ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unloads a module
Unloads a module \p hmod from the current context. Attempting to unload
@@ -2336,7 +2353,7 @@ int textureAlign
::cuModuleLoadData,
::cuModuleLoadDataEx,
::cuModuleLoadFatBinary*/
- fn cuModuleUnload(hmod: cuda_types::CUmodule) -> cuda_types::CUresult;
+ fn cuModuleUnload(hmod: cuda_types::cuda::CUmodule) -> cuda_types::cuda::CUresult;
/** \brief Query lazy loading mode
Returns lazy loading mode
@@ -2352,8 +2369,8 @@ int textureAlign
\sa
::cuModuleLoad,*/
fn cuModuleGetLoadingMode(
- mode: *mut cuda_types::CUmoduleLoadingMode,
- ) -> cuda_types::CUresult;
+ mode: *mut cuda_types::cuda::CUmoduleLoadingMode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a function handle
Returns in \p *hfunc the handle of the function of name \p name located in
@@ -2381,10 +2398,10 @@ int textureAlign
::cuModuleLoadFatBinary,
::cuModuleUnload*/
fn cuModuleGetFunction(
- hfunc: *mut cuda_types::CUfunction,
- hmod: cuda_types::CUmodule,
+ hfunc: *mut cuda_types::cuda::CUfunction,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the number of functions within a module
Returns in \p count the number of functions in \p mod.
@@ -2398,8 +2415,8 @@ int textureAlign
::CUDA_ERROR_INVALID_VALUE*/
fn cuModuleGetFunctionCount(
count: *mut ::core::ffi::c_uint,
- mod_: cuda_types::CUmodule,
- ) -> cuda_types::CUresult;
+ mod_: cuda_types::cuda::CUmodule,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the function handles within a module.
Returns in \p functions a maximum number of \p numFunctions function handles within \p mod. When
@@ -2424,10 +2441,10 @@ int textureAlign
::cuFuncIsLoaded,
::cuFuncLoad*/
fn cuModuleEnumerateFunctions(
- functions: *mut cuda_types::CUfunction,
+ functions: *mut cuda_types::cuda::CUfunction,
numFunctions: ::core::ffi::c_uint,
- mod_: cuda_types::CUmodule,
- ) -> cuda_types::CUresult;
+ mod_: cuda_types::cuda::CUmodule,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a global pointer from a module
Returns in \p *dptr and \p *bytes the base pointer and size of the
@@ -2460,11 +2477,11 @@ int textureAlign
::cudaGetSymbolAddress,
::cudaGetSymbolSize*/
fn cuModuleGetGlobal_v2(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytes: *mut usize,
- hmod: cuda_types::CUmodule,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a pending JIT linker invocation.
If the call is successful, the caller owns the returned CUlinkState, which
@@ -2505,10 +2522,10 @@ int textureAlign
::cuLinkDestroy*/
fn cuLinkCreate_v2(
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- stateOut: *mut cuda_types::CUlinkState,
- ) -> cuda_types::CUresult;
+ stateOut: *mut cuda_types::cuda::CUlinkState,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Add an input to a pending linker invocation
Ownership of \p data is retained by the caller. No reference is retained to any
@@ -2545,15 +2562,15 @@ int textureAlign
::cuLinkComplete,
::cuLinkDestroy*/
fn cuLinkAddData_v2(
- state: cuda_types::CUlinkState,
- type_: cuda_types::CUjitInputType,
+ state: cuda_types::cuda::CUlinkState,
+ type_: cuda_types::cuda::CUjitInputType,
data: *mut ::core::ffi::c_void,
size: usize,
name: *const ::core::ffi::c_char,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Add a file input to a pending linker invocation
No reference is retained to any inputs after this call returns.
@@ -2591,13 +2608,13 @@ int textureAlign
::cuLinkComplete,
::cuLinkDestroy*/
fn cuLinkAddFile_v2(
- state: cuda_types::CUlinkState,
- type_: cuda_types::CUjitInputType,
+ state: cuda_types::cuda::CUlinkState,
+ type_: cuda_types::cuda::CUjitInputType,
path: *const ::core::ffi::c_char,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Complete a pending linker invocation
Completes the pending linker action and returns the cubin image for the linked
@@ -2620,10 +2637,10 @@ int textureAlign
::cuLinkDestroy,
::cuModuleLoadData*/
fn cuLinkComplete(
- state: cuda_types::CUlinkState,
+ state: cuda_types::cuda::CUlinkState,
cubinOut: *mut *mut ::core::ffi::c_void,
sizeOut: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys state for a JIT linker invocation.
\param state State object for the linker invocation
@@ -2633,7 +2650,7 @@ int textureAlign
::CUDA_ERROR_INVALID_HANDLE
\sa ::cuLinkCreate*/
- fn cuLinkDestroy(state: cuda_types::CUlinkState) -> cuda_types::CUresult;
+ fn cuLinkDestroy(state: cuda_types::cuda::CUlinkState) -> cuda_types::cuda::CUresult;
/** \brief Returns a handle to a texture reference
\deprecated
@@ -2667,10 +2684,10 @@ int textureAlign
::cuModuleLoadFatBinary,
::cuModuleUnload*/
fn cuModuleGetTexRef(
- pTexRef: *mut cuda_types::CUtexref,
- hmod: cuda_types::CUmodule,
+ pTexRef: *mut cuda_types::cuda::CUtexref,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a handle to a surface reference
\deprecated
@@ -2702,10 +2719,10 @@ int textureAlign
::cuModuleLoadFatBinary,
::cuModuleUnload*/
fn cuModuleGetSurfRef(
- pSurfRef: *mut cuda_types::CUsurfref,
- hmod: cuda_types::CUmodule,
+ pSurfRef: *mut cuda_types::cuda::CUsurfref,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Load a library with specified code and options
Takes a pointer \p code and loads the corresponding library \p library based on
@@ -2758,15 +2775,15 @@ int textureAlign
::cuModuleLoadData,
::cuModuleLoadDataEx*/
fn cuLibraryLoadData(
- library: *mut cuda_types::CUlibrary,
+ library: *mut cuda_types::cuda::CUlibrary,
code: *const ::core::ffi::c_void,
- jitOptions: *mut cuda_types::CUjit_option,
+ jitOptions: *mut cuda_types::cuda::CUjit_option,
jitOptionsValues: *mut *mut ::core::ffi::c_void,
numJitOptions: ::core::ffi::c_uint,
- libraryOptions: *mut cuda_types::CUlibraryOption,
+ libraryOptions: *mut cuda_types::cuda::CUlibraryOption,
libraryOptionValues: *mut *mut ::core::ffi::c_void,
numLibraryOptions: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Load a library with specified file and options
Takes a pointer \p code and loads the corresponding library \p library based on
@@ -2819,15 +2836,15 @@ int textureAlign
::cuModuleLoadData,
::cuModuleLoadDataEx*/
fn cuLibraryLoadFromFile(
- library: *mut cuda_types::CUlibrary,
+ library: *mut cuda_types::cuda::CUlibrary,
fileName: *const ::core::ffi::c_char,
- jitOptions: *mut cuda_types::CUjit_option,
+ jitOptions: *mut cuda_types::cuda::CUjit_option,
jitOptionsValues: *mut *mut ::core::ffi::c_void,
numJitOptions: ::core::ffi::c_uint,
- libraryOptions: *mut cuda_types::CUlibraryOption,
+ libraryOptions: *mut cuda_types::cuda::CUlibraryOption,
libraryOptionValues: *mut *mut ::core::ffi::c_void,
numLibraryOptions: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unloads a library
Unloads the library specified with \p library
@@ -2843,7 +2860,9 @@ int textureAlign
\sa ::cuLibraryLoadData,
::cuLibraryLoadFromFile,
::cuModuleUnload*/
- fn cuLibraryUnload(library: cuda_types::CUlibrary) -> cuda_types::CUresult;
+ fn cuLibraryUnload(
+ library: cuda_types::cuda::CUlibrary,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a kernel handle
Returns in \p pKernel the handle of the kernel with name \p name located in library \p library.
@@ -2868,10 +2887,10 @@ int textureAlign
::cuLibraryGetModule,
::cuModuleGetFunction*/
fn cuLibraryGetKernel(
- pKernel: *mut cuda_types::CUkernel,
- library: cuda_types::CUlibrary,
+ pKernel: *mut cuda_types::cuda::CUkernel,
+ library: cuda_types::cuda::CUlibrary,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the number of kernels within a library
Returns in \p count the number of kernels in \p lib.
@@ -2885,8 +2904,8 @@ int textureAlign
::CUDA_ERROR_INVALID_VALUE*/
fn cuLibraryGetKernelCount(
count: *mut ::core::ffi::c_uint,
- lib: cuda_types::CUlibrary,
- ) -> cuda_types::CUresult;
+ lib: cuda_types::cuda::CUlibrary,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Retrieve the kernel handles within a library.
Returns in \p kernels a maximum number of \p numKernels kernel handles within \p lib.
@@ -2903,10 +2922,10 @@ int textureAlign
\sa ::cuLibraryGetKernelCount*/
fn cuLibraryEnumerateKernels(
- kernels: *mut cuda_types::CUkernel,
+ kernels: *mut cuda_types::cuda::CUkernel,
numKernels: ::core::ffi::c_uint,
- lib: cuda_types::CUlibrary,
- ) -> cuda_types::CUresult;
+ lib: cuda_types::cuda::CUlibrary,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a module handle
Returns in \p pMod the module handle associated with the current context located in
@@ -2930,9 +2949,9 @@ int textureAlign
::cuLibraryUnload,
::cuModuleGetFunction*/
fn cuLibraryGetModule(
- pMod: *mut cuda_types::CUmodule,
- library: cuda_types::CUlibrary,
- ) -> cuda_types::CUresult;
+ pMod: *mut cuda_types::cuda::CUmodule,
+ library: cuda_types::cuda::CUlibrary,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a function handle
Returns in \p pFunc the handle of the function for the requested kernel \p kernel and
@@ -2958,9 +2977,9 @@ int textureAlign
::cuLibraryGetModule,
::cuModuleGetFunction*/
fn cuKernelGetFunction(
- pFunc: *mut cuda_types::CUfunction,
- kernel: cuda_types::CUkernel,
- ) -> cuda_types::CUresult;
+ pFunc: *mut cuda_types::cuda::CUfunction,
+ kernel: cuda_types::cuda::CUkernel,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a global device pointer
Returns in \p *dptr and \p *bytes the base pointer and size of the global with
@@ -2990,11 +3009,11 @@ int textureAlign
::cuLibraryGetModule,
cuModuleGetGlobal*/
fn cuLibraryGetGlobal(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytes: *mut usize,
- library: cuda_types::CUlibrary,
+ library: cuda_types::cuda::CUlibrary,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a pointer to managed memory
Returns in \p *dptr and \p *bytes the base pointer and size of the managed memory with
@@ -3024,11 +3043,11 @@ int textureAlign
::cuLibraryLoadFromFile,
::cuLibraryUnload*/
fn cuLibraryGetManaged(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytes: *mut usize,
- library: cuda_types::CUlibrary,
+ library: cuda_types::cuda::CUlibrary,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a pointer to a unified function
Returns in \p *fptr the function pointer to a unified function denoted by \p symbol.
@@ -3053,9 +3072,9 @@ int textureAlign
::cuLibraryUnload*/
fn cuLibraryGetUnifiedFunction(
fptr: *mut *mut ::core::ffi::c_void,
- library: cuda_types::CUlibrary,
+ library: cuda_types::cuda::CUlibrary,
symbol: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns information about a kernel
Returns in \p *pi the integer value of the attribute \p attrib for the kernel
@@ -3142,10 +3161,10 @@ int textureAlign
::cuFuncGetAttribute*/
fn cuKernelGetAttribute(
pi: *mut ::core::ffi::c_int,
- attrib: cuda_types::CUfunction_attribute,
- kernel: cuda_types::CUkernel,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ attrib: cuda_types::cuda::CUfunction_attribute,
+ kernel: cuda_types::cuda::CUkernel,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets information about a kernel
This call sets the value of a specified attribute \p attrib on the kernel \p kernel
@@ -3221,11 +3240,11 @@ int textureAlign
::cuModuleGetFunction,
::cuFuncSetAttribute*/
fn cuKernelSetAttribute(
- attrib: cuda_types::CUfunction_attribute,
+ attrib: cuda_types::cuda::CUfunction_attribute,
val: ::core::ffi::c_int,
- kernel: cuda_types::CUkernel,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ kernel: cuda_types::cuda::CUkernel,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the preferred cache configuration for a device kernel.
On devices where the L1 cache and shared memory use the same hardware
@@ -3282,10 +3301,10 @@ int textureAlign
::cuCtxSetCacheConfig,
::cuLaunchKernel*/
fn cuKernelSetCacheConfig(
- kernel: cuda_types::CUkernel,
- config: cuda_types::CUfunc_cache,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ kernel: cuda_types::cuda::CUkernel,
+ config: cuda_types::cuda::CUfunc_cache,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the function name for a ::CUkernel handle
Returns in \p **name the function name associated with the kernel handle \p hfunc .
@@ -3305,8 +3324,8 @@ int textureAlign
*/
fn cuKernelGetName(
name: *mut *const ::core::ffi::c_char,
- hfunc: cuda_types::CUkernel,
- ) -> cuda_types::CUresult;
+ hfunc: cuda_types::cuda::CUkernel,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the offset and size of a kernel parameter in the device-side parameter layout
Queries the kernel parameter at \p paramIndex into \p kernel's list of parameters, and returns
@@ -3328,11 +3347,11 @@ int textureAlign
\sa ::cuFuncGetParamInfo*/
fn cuKernelGetParamInfo(
- kernel: cuda_types::CUkernel,
+ kernel: cuda_types::cuda::CUkernel,
paramIndex: usize,
paramOffset: *mut usize,
paramSize: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets free and total memory
Returns in \p *total the total amount of memory available to the the current context.
@@ -3372,7 +3391,10 @@ int textureAlign
::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMemGetInfo*/
- fn cuMemGetInfo_v2(free: *mut usize, total: *mut usize) -> cuda_types::CUresult;
+ fn cuMemGetInfo_v2(
+ free: *mut usize,
+ total: *mut usize,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocates device memory
Allocates \p bytesize bytes of linear memory on the device and returns in
@@ -3404,9 +3426,9 @@ int textureAlign
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMalloc*/
fn cuMemAlloc_v2(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocates pitched device memory
Allocates at least \p WidthInBytes * \p Height bytes of linear memory on
@@ -3466,12 +3488,12 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMallocPitch*/
fn cuMemAllocPitch_v2(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
pPitch: *mut usize,
WidthInBytes: usize,
Height: usize,
ElementSizeBytes: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Frees device memory
Frees the memory space pointed to by \p dptr, which must have been returned
@@ -3504,7 +3526,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaFree*/
- fn cuMemFree_v2(dptr: cuda_types::CUdeviceptr) -> cuda_types::CUresult;
+ fn cuMemFree_v2(dptr: cuda_types::cuda::CUdeviceptr) -> cuda_types::cuda::CUresult;
/** \brief Get information on memory allocations
Returns the base address in \p *pbase and size in \p *psize of the
@@ -3536,10 +3558,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32*/
fn cuMemGetAddressRange_v2(
- pbase: *mut cuda_types::CUdeviceptr,
+ pbase: *mut cuda_types::cuda::CUdeviceptr,
psize: *mut usize,
- dptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ dptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocates page-locked host memory
Allocates \p bytesize bytes of host memory that is page-locked and
@@ -3590,7 +3612,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
fn cuMemAllocHost_v2(
pp: *mut *mut ::core::ffi::c_void,
bytesize: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Frees page-locked host memory
Frees the memory space pointed to by \p p, which must have been returned by
@@ -3617,7 +3639,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaFreeHost*/
- fn cuMemFreeHost(p: *mut ::core::ffi::c_void) -> cuda_types::CUresult;
+ fn cuMemFreeHost(p: *mut ::core::ffi::c_void) -> cuda_types::cuda::CUresult;
/** \brief Allocates page-locked host memory
Allocates \p bytesize bytes of host memory that is page-locked and accessible
@@ -3701,7 +3723,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
pp: *mut *mut ::core::ffi::c_void,
bytesize: usize,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Passes back device pointer of mapped pinned memory
Passes back the device pointer \p pdptr corresponding to the mapped, pinned
@@ -3752,10 +3774,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaHostGetDevicePointer*/
fn cuMemHostGetDevicePointer_v2(
- pdptr: *mut cuda_types::CUdeviceptr,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr,
p: *mut ::core::ffi::c_void,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Passes back flags that were used for a pinned allocation
Passes back the flags \p pFlags that were specified when allocating
@@ -3782,7 +3804,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
fn cuMemHostGetFlags(
pFlags: *mut ::core::ffi::c_uint,
p: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocates memory that will be automatically managed by the Unified Memory system
Allocates \p bytesize bytes of managed memory on the device and returns in
@@ -3890,10 +3912,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuDeviceGetAttribute, ::cuStreamAttachMemAsync,
::cudaMallocManaged*/
fn cuMemAllocManaged(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Registers a callback function to receive async notifications
Registers \p callbackFunc to receive async notifications.
@@ -3928,11 +3950,11 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
\sa
::cuDeviceUnregisterAsyncNotification*/
fn cuDeviceRegisterAsyncNotification(
- device: cuda_types::CUdevice,
- callbackFunc: cuda_types::CUasyncCallback,
+ device: cuda_types::cuda::CUdevice,
+ callbackFunc: cuda_types::cuda::CUasyncCallback,
userData: *mut ::core::ffi::c_void,
- callback: *mut cuda_types::CUasyncCallbackHandle,
- ) -> cuda_types::CUresult;
+ callback: *mut cuda_types::cuda::CUasyncCallbackHandle,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unregisters an async notification callback
Unregisters \p callback so that the corresponding callback function will stop receiving
@@ -3953,9 +3975,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
\sa
::cuDeviceRegisterAsyncNotification*/
fn cuDeviceUnregisterAsyncNotification(
- device: cuda_types::CUdevice,
- callback: cuda_types::CUasyncCallbackHandle,
- ) -> cuda_types::CUresult;
+ device: cuda_types::cuda::CUdevice,
+ callback: cuda_types::cuda::CUasyncCallbackHandle,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a handle to a compute device
Returns in \p *device a device handle given a PCI bus ID string.
@@ -3982,9 +4004,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuDeviceGetPCIBusId,
::cudaDeviceGetByPCIBusId*/
fn cuDeviceGetByPCIBusId(
- dev: *mut cuda_types::CUdevice,
+ dev: *mut cuda_types::cuda::CUdevice,
pciBusId: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a PCI Bus Id string for the device
Returns an ASCII string identifying the device \p dev in the NULL-terminated
@@ -4016,8 +4038,8 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
fn cuDeviceGetPCIBusId(
pciBusId: *mut ::core::ffi::c_char,
len: ::core::ffi::c_int,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets an interprocess handle for a previously allocated event
Takes as input a previously allocated event. This event must have been
@@ -4062,9 +4084,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuIpcCloseMemHandle,
::cudaIpcGetEventHandle*/
fn cuIpcGetEventHandle(
- pHandle: *mut cuda_types::CUipcEventHandle,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ pHandle: *mut cuda_types::cuda::CUipcEventHandle,
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Opens an interprocess event handle for use in the current process
Opens an interprocess event handle exported from another process with
@@ -4104,9 +4126,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuIpcCloseMemHandle,
::cudaIpcOpenEventHandle*/
fn cuIpcOpenEventHandle(
- phEvent: *mut cuda_types::CUevent,
- handle: cuda_types::CUipcEventHandle,
- ) -> cuda_types::CUresult;
+ phEvent: *mut cuda_types::cuda::CUevent,
+ handle: cuda_types::cuda::CUipcEventHandle,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets an interprocess memory handle for an existing device memory
allocation
@@ -4147,9 +4169,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuIpcCloseMemHandle,
::cudaIpcGetMemHandle*/
fn cuIpcGetMemHandle(
- pHandle: *mut cuda_types::CUipcMemHandle,
- dptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ pHandle: *mut cuda_types::cuda::CUipcMemHandle,
+ dptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Opens an interprocess memory handle exported from another process
and returns a device pointer usable in the local process.
@@ -4207,10 +4229,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuDeviceCanAccessPeer,
::cudaIpcOpenMemHandle*/
fn cuIpcOpenMemHandle_v2(
- pdptr: *mut cuda_types::CUdeviceptr,
- handle: cuda_types::CUipcMemHandle,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr,
+ handle: cuda_types::cuda::CUipcMemHandle,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Attempts to close memory mapped with ::cuIpcOpenMemHandle
Decrements the reference count of the memory returned by ::cuIpcOpenMemHandle by 1.
@@ -4243,7 +4265,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuIpcGetMemHandle,
::cuIpcOpenMemHandle,
::cudaIpcCloseMemHandle*/
- fn cuIpcCloseMemHandle(dptr: cuda_types::CUdeviceptr) -> cuda_types::CUresult;
+ fn cuIpcCloseMemHandle(
+ dptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Registers an existing host memory range for use by CUDA
Page-locks the memory range specified by \p p and \p bytesize and maps it
@@ -4335,7 +4359,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
p: *mut ::core::ffi::c_void,
bytesize: usize,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unregisters a memory range that was registered with cuMemHostRegister.
Unmaps the memory range whose base address is specified by \p p, and makes
@@ -4358,7 +4382,7 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
\sa
::cuMemHostRegister,
::cudaHostUnregister*/
- fn cuMemHostUnregister(p: *mut ::core::ffi::c_void) -> cuda_types::CUresult;
+ fn cuMemHostUnregister(p: *mut ::core::ffi::c_void) -> cuda_types::cuda::CUresult;
/** \brief Copies memory
Copies data between two pointers.
@@ -4396,10 +4420,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cudaMemcpyToSymbol,
::cudaMemcpyFromSymbol*/
fn cuMemcpy_ptds(
- dst: cuda_types::CUdeviceptr,
- src: cuda_types::CUdeviceptr,
+ dst: cuda_types::cuda::CUdeviceptr,
+ src: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies device memory between two contexts
Copies from device memory in one context to device memory in another
@@ -4427,12 +4451,12 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemcpy3DPeerAsync,
::cudaMemcpyPeer*/
fn cuMemcpyPeer_ptds(
- dstDevice: cuda_types::CUdeviceptr,
- dstContext: cuda_types::CUcontext,
- srcDevice: cuda_types::CUdeviceptr,
- srcContext: cuda_types::CUcontext,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ dstContext: cuda_types::cuda::CUcontext,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
+ srcContext: cuda_types::cuda::CUcontext,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Host to Device
Copies from host memory to device memory. \p dstDevice and \p srcHost are
@@ -4466,10 +4490,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cudaMemcpy,
::cudaMemcpyToSymbol*/
fn cuMemcpyHtoD_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Device to Host
Copies from device to host memory. \p dstHost and \p srcDevice specify the
@@ -4504,9 +4528,9 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cudaMemcpyFromSymbol*/
fn cuMemcpyDtoH_v2_ptds(
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Device to Device
Copies from device memory to device memory. \p dstDevice and \p srcDevice
@@ -4540,10 +4564,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cudaMemcpyToSymbol,
::cudaMemcpyFromSymbol*/
fn cuMemcpyDtoD_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
- srcDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Device to Array
Copies from device memory to a 1D CUDA array. \p dstArray and \p dstOffset
@@ -4577,11 +4601,11 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMemcpyToArray*/
fn cuMemcpyDtoA_v2_ptds(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Array to Device
Copies from one 1D CUDA array to device memory. \p dstDevice specifies the
@@ -4617,11 +4641,11 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMemcpyFromArray*/
fn cuMemcpyAtoD_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
- srcArray: cuda_types::CUarray,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Host to Array
Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset
@@ -4656,11 +4680,11 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMemcpyToArray*/
fn cuMemcpyHtoA_v2_ptds(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Array to Host
Copies from one 1D CUDA array to host memory. \p dstHost specifies the base
@@ -4696,10 +4720,10 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cudaMemcpyFromArray*/
fn cuMemcpyAtoH_v2_ptds(
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Array to Array
Copies from one 1D CUDA array to another. \p dstArray and \p srcArray
@@ -4737,12 +4761,12 @@ T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMemcpyArrayToArray*/
fn cuMemcpyAtoA_v2_ptds(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory for 2D arrays
Perform a 2D memory copy according to the parameters specified in \p pCopy.
@@ -4904,8 +4928,8 @@ CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes;
::cudaMemcpy2DToArray,
::cudaMemcpy2DFromArray*/
fn cuMemcpy2D_v2_ptds(
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory for 2D arrays
Perform a 2D memory copy according to the parameters specified in \p pCopy.
@@ -5065,8 +5089,8 @@ CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes;
::cudaMemcpy2DToArray,
::cudaMemcpy2DFromArray*/
fn cuMemcpy2DUnaligned_v2_ptds(
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory for 3D arrays
Perform a 3D memory copy according to the parameters specified in
@@ -5233,8 +5257,8 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMemcpy3D*/
fn cuMemcpy3D_v2_ptds(
- pCopy: *const cuda_types::CUDA_MEMCPY3D,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory between contexts
Perform a 3D memory copy according to the parameters specified in
@@ -5256,8 +5280,8 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemcpy3DPeerAsync,
::cudaMemcpy3DPeer*/
fn cuMemcpy3DPeer_ptds(
- pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_PEER,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory asynchronously
Copies data between two pointers.
@@ -5300,11 +5324,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cudaMemcpyToSymbolAsync,
::cudaMemcpyFromSymbolAsync*/
fn cuMemcpyAsync_ptsz(
- dst: cuda_types::CUdeviceptr,
- src: cuda_types::CUdeviceptr,
+ dst: cuda_types::cuda::CUdeviceptr,
+ src: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies device memory between two contexts asynchronously.
Copies from device memory in one context to device memory in another
@@ -5335,13 +5359,13 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemcpy3DPeerAsync,
::cudaMemcpyPeerAsync*/
fn cuMemcpyPeerAsync_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
- dstContext: cuda_types::CUcontext,
- srcDevice: cuda_types::CUdeviceptr,
- srcContext: cuda_types::CUcontext,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ dstContext: cuda_types::cuda::CUcontext,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
+ srcContext: cuda_types::cuda::CUcontext,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Host to Device
Copies from host memory to device memory. \p dstDevice and \p srcHost are
@@ -5380,11 +5404,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cudaMemcpyAsync,
::cudaMemcpyToSymbolAsync*/
fn cuMemcpyHtoDAsync_v2_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Device to Host
Copies from device to host memory. \p dstHost and \p srcDevice specify the
@@ -5424,10 +5448,10 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cudaMemcpyFromSymbolAsync*/
fn cuMemcpyDtoHAsync_v2_ptsz(
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Device to Device
Copies from device memory to device memory. \p dstDevice and \p srcDevice
@@ -5466,11 +5490,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cudaMemcpyToSymbolAsync,
::cudaMemcpyFromSymbolAsync*/
fn cuMemcpyDtoDAsync_v2_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
- srcDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Host to Array
Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset
@@ -5510,12 +5534,12 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemcpyToArrayAsync*/
fn cuMemcpyHtoAAsync_v2_ptsz(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory from Array to Host
Copies from one 1D CUDA array to host memory. \p dstHost specifies the base
@@ -5556,11 +5580,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cudaMemcpyFromArrayAsync*/
fn cuMemcpyAtoHAsync_v2_ptsz(
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory for 2D arrays
Perform a 2D memory copy according to the parameters specified in \p pCopy.
@@ -5727,9 +5751,9 @@ CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes;
::cudaMemcpy2DToArrayAsync,
::cudaMemcpy2DFromArrayAsync*/
fn cuMemcpy2DAsync_v2_ptsz(
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory for 3D arrays
Perform a 3D memory copy according to the parameters specified in
@@ -5901,9 +5925,9 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemcpy3DAsync*/
fn cuMemcpy3DAsync_v2_ptsz(
- pCopy: *const cuda_types::CUDA_MEMCPY3D,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies memory between contexts asynchronously.
Perform a 3D memory copy according to the parameters specified in
@@ -5927,9 +5951,9 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemcpy3DPeerAsync,
::cudaMemcpy3DPeerAsync*/
fn cuMemcpy3DPeerAsync_ptsz(
- pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_PEER,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes device memory
Sets the memory range of \p N 8-bit values to the specified value
@@ -5962,10 +5986,10 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset*/
fn cuMemsetD8_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
uc: ::core::ffi::c_uchar,
N: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes device memory
Sets the memory range of \p N 16-bit values to the specified value
@@ -5998,10 +6022,10 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset*/
fn cuMemsetD16_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
us: ::core::ffi::c_ushort,
N: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes device memory
Sets the memory range of \p N 32-bit values to the specified value
@@ -6034,10 +6058,10 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32Async,
::cudaMemset*/
fn cuMemsetD32_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
ui: ::core::ffi::c_uint,
N: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes device memory
Sets the 2D memory range of \p Width 8-bit values to the specified value
@@ -6075,12 +6099,12 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset2D*/
fn cuMemsetD2D8_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
uc: ::core::ffi::c_uchar,
Width: usize,
Height: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes device memory
Sets the 2D memory range of \p Width 16-bit values to the specified value
@@ -6119,12 +6143,12 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset2D*/
fn cuMemsetD2D16_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
us: ::core::ffi::c_ushort,
Width: usize,
Height: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes device memory
Sets the 2D memory range of \p Width 32-bit values to the specified value
@@ -6163,12 +6187,12 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset2D*/
fn cuMemsetD2D32_v2_ptds(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
ui: ::core::ffi::c_uint,
Width: usize,
Height: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets device memory
Sets the memory range of \p N 8-bit values to the specified value
@@ -6203,11 +6227,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemsetAsync*/
fn cuMemsetD8Async_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
uc: ::core::ffi::c_uchar,
N: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets device memory
Sets the memory range of \p N 16-bit values to the specified value
@@ -6242,11 +6266,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemsetAsync*/
fn cuMemsetD16Async_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
us: ::core::ffi::c_ushort,
N: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets device memory
Sets the memory range of \p N 32-bit values to the specified value
@@ -6280,11 +6304,11 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, ::cuMemsetD32,
::cudaMemsetAsync*/
fn cuMemsetD32Async_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
ui: ::core::ffi::c_uint,
N: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets device memory
Sets the 2D memory range of \p Width 8-bit values to the specified value
@@ -6324,13 +6348,13 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset2DAsync*/
fn cuMemsetD2D8Async_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
uc: ::core::ffi::c_uchar,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets device memory
Sets the 2D memory range of \p Width 16-bit values to the specified value
@@ -6371,13 +6395,13 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset2DAsync*/
fn cuMemsetD2D16Async_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
us: ::core::ffi::c_ushort,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets device memory
Sets the 2D memory range of \p Width 32-bit values to the specified value
@@ -6418,13 +6442,13 @@ CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
::cuMemsetD32, ::cuMemsetD32Async,
::cudaMemset2DAsync*/
fn cuMemsetD2D32Async_ptsz(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
ui: ::core::ffi::c_uint,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a 1D or 2D CUDA array
Creates a CUDA array according to the ::CUDA_ARRAY_DESCRIPTOR structure
@@ -6526,9 +6550,9 @@ desc.Height = height;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMallocArray*/
fn cuArrayCreate_v2(
- pHandle: *mut cuda_types::CUarray,
- pAllocateArray: *const cuda_types::CUDA_ARRAY_DESCRIPTOR,
- ) -> cuda_types::CUresult;
+ pHandle: *mut cuda_types::cuda::CUarray,
+ pAllocateArray: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get a 1D or 2D CUDA array descriptor
Returns in \p *pArrayDescriptor a descriptor containing information on the
@@ -6560,9 +6584,9 @@ desc.Height = height;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaArrayGetInfo*/
fn cuArrayGetDescriptor_v2(
- pArrayDescriptor: *mut cuda_types::CUDA_ARRAY_DESCRIPTOR,
- hArray: cuda_types::CUarray,
- ) -> cuda_types::CUresult;
+ pArrayDescriptor: *mut cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR,
+ hArray: cuda_types::cuda::CUarray,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the layout properties of a sparse CUDA array
Returns the layout properties of a sparse CUDA array in \p sparseProperties
@@ -6584,9 +6608,9 @@ desc.Height = height;
\param[in] array - CUDA array to get the sparse properties of
\sa ::cuMipmappedArrayGetSparseProperties, ::cuMemMapArrayAsync*/
fn cuArrayGetSparseProperties(
- sparseProperties: *mut cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES,
- array: cuda_types::CUarray,
- ) -> cuda_types::CUresult;
+ sparseProperties: *mut cuda_types::cuda::CUDA_ARRAY_SPARSE_PROPERTIES,
+ array: cuda_types::cuda::CUarray,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the layout properties of a sparse CUDA mipmapped array
Returns the sparse array layout properties in \p sparseProperties
@@ -6609,9 +6633,9 @@ desc.Height = height;
\param[in] mipmap - CUDA mipmapped array to get the sparse properties of
\sa ::cuArrayGetSparseProperties, ::cuMemMapArrayAsync*/
fn cuMipmappedArrayGetSparseProperties(
- sparseProperties: *mut cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES,
- mipmap: cuda_types::CUmipmappedArray,
- ) -> cuda_types::CUresult;
+ sparseProperties: *mut cuda_types::cuda::CUDA_ARRAY_SPARSE_PROPERTIES,
+ mipmap: cuda_types::cuda::CUmipmappedArray,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the memory requirements of a CUDA array
Returns the memory requirements of a CUDA array in \p memoryRequirements
@@ -6632,10 +6656,10 @@ desc.Height = height;
\param[in] device - Device to get the memory requirements for
\sa ::cuMipmappedArrayGetMemoryRequirements, ::cuMemMapArrayAsync*/
fn cuArrayGetMemoryRequirements(
- memoryRequirements: *mut cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS,
- array: cuda_types::CUarray,
- device: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ memoryRequirements: *mut cuda_types::cuda::CUDA_ARRAY_MEMORY_REQUIREMENTS,
+ array: cuda_types::cuda::CUarray,
+ device: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the memory requirements of a CUDA mipmapped array
Returns the memory requirements of a CUDA mipmapped array in \p memoryRequirements
@@ -6657,10 +6681,10 @@ desc.Height = height;
\param[in] device - Device to get the memory requirements for
\sa ::cuArrayGetMemoryRequirements, ::cuMemMapArrayAsync*/
fn cuMipmappedArrayGetMemoryRequirements(
- memoryRequirements: *mut cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS,
- mipmap: cuda_types::CUmipmappedArray,
- device: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ memoryRequirements: *mut cuda_types::cuda::CUDA_ARRAY_MEMORY_REQUIREMENTS,
+ mipmap: cuda_types::cuda::CUmipmappedArray,
+ device: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets a CUDA array plane from a CUDA array
Returns in \p pPlaneArray a CUDA array that represents a single format plane
@@ -6691,10 +6715,10 @@ desc.Height = height;
::cuArrayCreate,
::cudaArrayGetPlane*/
fn cuArrayGetPlane(
- pPlaneArray: *mut cuda_types::CUarray,
- hArray: cuda_types::CUarray,
+ pPlaneArray: *mut cuda_types::cuda::CUarray,
+ hArray: cuda_types::cuda::CUarray,
planeIdx: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a CUDA array
Destroys the CUDA array \p hArray.
@@ -6722,7 +6746,7 @@ desc.Height = height;
::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaFreeArray*/
- fn cuArrayDestroy(hArray: cuda_types::CUarray) -> cuda_types::CUresult;
+ fn cuArrayDestroy(hArray: cuda_types::cuda::CUarray) -> cuda_types::cuda::CUresult;
/** \brief Creates a 3D CUDA array
Creates a CUDA array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure
@@ -6900,9 +6924,9 @@ desc.Depth = depth;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaMalloc3DArray*/
fn cuArray3DCreate_v2(
- pHandle: *mut cuda_types::CUarray,
- pAllocateArray: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR,
- ) -> cuda_types::CUresult;
+ pHandle: *mut cuda_types::cuda::CUarray,
+ pAllocateArray: *const cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get a 3D CUDA array descriptor
Returns in \p *pArrayDescriptor a descriptor containing information on the
@@ -6938,9 +6962,9 @@ desc.Depth = depth;
::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
::cudaArrayGetInfo*/
fn cuArray3DGetDescriptor_v2(
- pArrayDescriptor: *mut cuda_types::CUDA_ARRAY3D_DESCRIPTOR,
- hArray: cuda_types::CUarray,
- ) -> cuda_types::CUresult;
+ pArrayDescriptor: *mut cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR,
+ hArray: cuda_types::cuda::CUarray,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a CUDA mipmapped array
Creates a CUDA mipmapped array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure
@@ -7080,10 +7104,10 @@ CU_AD_FORMAT_FLOAT = 0x20
::cuArrayCreate,
::cudaMallocMipmappedArray*/
fn cuMipmappedArrayCreate(
- pHandle: *mut cuda_types::CUmipmappedArray,
- pMipmappedArrayDesc: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR,
+ pHandle: *mut cuda_types::cuda::CUmipmappedArray,
+ pMipmappedArrayDesc: *const cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR,
numMipmapLevels: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets a mipmap level of a CUDA mipmapped array
Returns in \p *pLevelArray a CUDA array that represents a single mipmap level
@@ -7111,10 +7135,10 @@ CU_AD_FORMAT_FLOAT = 0x20
::cuArrayCreate,
::cudaGetMipmappedArrayLevel*/
fn cuMipmappedArrayGetLevel(
- pLevelArray: *mut cuda_types::CUarray,
- hMipmappedArray: cuda_types::CUmipmappedArray,
+ pLevelArray: *mut cuda_types::cuda::CUarray,
+ hMipmappedArray: cuda_types::cuda::CUmipmappedArray,
level: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a CUDA mipmapped array
Destroys the CUDA mipmapped array \p hMipmappedArray.
@@ -7137,8 +7161,8 @@ CU_AD_FORMAT_FLOAT = 0x20
::cuArrayCreate,
::cudaFreeMipmappedArray*/
fn cuMipmappedArrayDestroy(
- hMipmappedArray: cuda_types::CUmipmappedArray,
- ) -> cuda_types::CUresult;
+ hMipmappedArray: cuda_types::cuda::CUmipmappedArray,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Retrieve handle for an address range
Get a handle of the specified type to an address range. The address range
@@ -7172,11 +7196,11 @@ CU_AD_FORMAT_FLOAT = 0x20
CUDA_ERROR_NOT_SUPPORTED*/
fn cuMemGetHandleForAddressRange(
handle: *mut ::core::ffi::c_void,
- dptr: cuda_types::CUdeviceptr,
+ dptr: cuda_types::cuda::CUdeviceptr,
size: usize,
- handleType: cuda_types::CUmemRangeHandleType,
+ handleType: cuda_types::cuda::CUmemRangeHandleType,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocate an address range reservation.
Reserves a virtual address range based on the given parameters, giving
@@ -7201,12 +7225,12 @@ CU_AD_FORMAT_FLOAT = 0x20
\sa ::cuMemAddressFree*/
fn cuMemAddressReserve(
- ptr: *mut cuda_types::CUdeviceptr,
+ ptr: *mut cuda_types::cuda::CUdeviceptr,
size: usize,
alignment: usize,
- addr: cuda_types::CUdeviceptr,
+ addr: cuda_types::cuda::CUdeviceptr,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Free an address range reservation.
Frees a virtual address range reserved by cuMemAddressReserve. The size
@@ -7225,9 +7249,9 @@ CU_AD_FORMAT_FLOAT = 0x20
\sa ::cuMemAddressReserve*/
fn cuMemAddressFree(
- ptr: cuda_types::CUdeviceptr,
+ ptr: cuda_types::cuda::CUdeviceptr,
size: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a CUDA memory handle representing a memory allocation of a given size described by the given properties
This creates a memory allocation on the target device specified through the
@@ -7280,11 +7304,11 @@ CU_AD_FORMAT_FLOAT = 0x20
\sa ::cuMemRelease, ::cuMemExportToShareableHandle, ::cuMemImportFromShareableHandle*/
fn cuMemCreate(
- handle: *mut cuda_types::CUmemGenericAllocationHandle,
+ handle: *mut cuda_types::cuda::CUmemGenericAllocationHandle,
size: usize,
- prop: *const cuda_types::CUmemAllocationProp,
+ prop: *const cuda_types::cuda::CUmemAllocationProp,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Release a memory handle representing a memory allocation which was previously allocated through cuMemCreate.
Frees the memory that was allocated on a device through cuMemCreate.
@@ -7309,8 +7333,8 @@ CU_AD_FORMAT_FLOAT = 0x20
\sa ::cuMemCreate*/
fn cuMemRelease(
- handle: cuda_types::CUmemGenericAllocationHandle,
- ) -> cuda_types::CUresult;
+ handle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Maps an allocation handle to a reserved virtual address range.
Maps bytes of memory represented by \p handle starting from byte \p offset to
@@ -7358,12 +7382,12 @@ CU_AD_FORMAT_FLOAT = 0x20
\sa ::cuMemUnmap, ::cuMemSetAccess, ::cuMemCreate, ::cuMemAddressReserve, ::cuMemImportFromShareableHandle*/
fn cuMemMap(
- ptr: cuda_types::CUdeviceptr,
+ ptr: cuda_types::cuda::CUdeviceptr,
size: usize,
offset: usize,
- handle: cuda_types::CUmemGenericAllocationHandle,
+ handle: cuda_types::cuda::CUmemGenericAllocationHandle,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Maps or unmaps subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays
Performs map or unmap operations on subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays.
@@ -7496,10 +7520,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMipmappedArrayCreate, ::cuArrayCreate, ::cuArray3DCreate, ::cuMemCreate, ::cuArrayGetSparseProperties, ::cuMipmappedArrayGetSparseProperties*/
fn cuMemMapArrayAsync_ptsz(
- mapInfoList: *mut cuda_types::CUarrayMapInfo,
+ mapInfoList: *mut cuda_types::cuda::CUarrayMapInfo,
count: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unmap the backing memory of a given address range.
The range must be the entire contiguous address range that was mapped to. In
@@ -7525,7 +7549,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\note_sync
\sa ::cuMemCreate, ::cuMemAddressReserve*/
- fn cuMemUnmap(ptr: cuda_types::CUdeviceptr, size: usize) -> cuda_types::CUresult;
+ fn cuMemUnmap(
+ ptr: cuda_types::cuda::CUdeviceptr,
+ size: usize,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Set the access flags for each location specified in \p desc for the given virtual address range
Given the virtual address range via \p ptr and \p size, and the locations
@@ -7557,11 +7584,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemSetAccess, ::cuMemCreate, :cuMemMap*/
fn cuMemSetAccess(
- ptr: cuda_types::CUdeviceptr,
+ ptr: cuda_types::cuda::CUdeviceptr,
size: usize,
- desc: *const cuda_types::CUmemAccessDesc,
+ desc: *const cuda_types::cuda::CUmemAccessDesc,
count: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get the access \p flags set for the given \p location and \p ptr
\param[out] flags - Flags set for this location
@@ -7579,9 +7606,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemSetAccess*/
fn cuMemGetAccess(
flags: *mut ::core::ffi::c_ulonglong,
- location: *const cuda_types::CUmemLocation,
- ptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ location: *const cuda_types::cuda::CUmemLocation,
+ ptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Exports an allocation to a requested shareable handle type
Given a CUDA memory handle, create a shareable memory
@@ -7613,10 +7640,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemImportFromShareableHandle*/
fn cuMemExportToShareableHandle(
shareableHandle: *mut ::core::ffi::c_void,
- handle: cuda_types::CUmemGenericAllocationHandle,
- handleType: cuda_types::CUmemAllocationHandleType,
+ handle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ handleType: cuda_types::cuda::CUmemAllocationHandleType,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Imports an allocation from a requested shareable handle type.
If the current process cannot support the memory described by this shareable
@@ -7645,10 +7672,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemExportToShareableHandle, ::cuMemMap, ::cuMemRelease*/
fn cuMemImportFromShareableHandle(
- handle: *mut cuda_types::CUmemGenericAllocationHandle,
+ handle: *mut cuda_types::cuda::CUmemGenericAllocationHandle,
osHandle: *mut ::core::ffi::c_void,
- shHandleType: cuda_types::CUmemAllocationHandleType,
- ) -> cuda_types::CUresult;
+ shHandleType: cuda_types::cuda::CUmemAllocationHandleType,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Calculates either the minimal or recommended granularity
Calculates either the minimal or recommended granularity
@@ -7669,9 +7696,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemCreate, ::cuMemMap*/
fn cuMemGetAllocationGranularity(
granularity: *mut usize,
- prop: *const cuda_types::CUmemAllocationProp,
- option: cuda_types::CUmemAllocationGranularity_flags,
- ) -> cuda_types::CUresult;
+ prop: *const cuda_types::cuda::CUmemAllocationProp,
+ option: cuda_types::cuda::CUmemAllocationGranularity_flags,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Retrieve the contents of the property structure defining properties for this handle
\param[out] prop - Pointer to a properties structure which will hold the information about this handle
@@ -7686,9 +7713,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemCreate, ::cuMemImportFromShareableHandle*/
fn cuMemGetAllocationPropertiesFromHandle(
- prop: *mut cuda_types::CUmemAllocationProp,
- handle: cuda_types::CUmemGenericAllocationHandle,
- ) -> cuda_types::CUresult;
+ prop: *mut cuda_types::cuda::CUmemAllocationProp,
+ handle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Given an address \p addr, returns the allocation handle of the backing memory allocation.
The handle is guaranteed to be the same handle value used to map the memory. If the address
@@ -7710,9 +7737,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemCreate, ::cuMemRelease, ::cuMemMap*/
fn cuMemRetainAllocationHandle(
- handle: *mut cuda_types::CUmemGenericAllocationHandle,
+ handle: *mut cuda_types::cuda::CUmemGenericAllocationHandle,
addr: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Frees memory with stream ordered semantics
Inserts a free operation into \p hStream.
@@ -7732,9 +7759,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context),
::CUDA_ERROR_NOT_SUPPORTED*/
fn cuMemFreeAsync_ptsz(
- dptr: cuda_types::CUdeviceptr,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ dptr: cuda_types::cuda::CUdeviceptr,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocates memory with stream ordered semantics
Inserts an allocation operation into \p hStream.
@@ -7765,10 +7792,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate,
::cuMemPoolSetAccess, ::cuMemPoolSetAttribute*/
fn cuMemAllocAsync_ptsz(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Tries to release memory back to the OS
Releases memory back to the OS until the pool contains fewer than minBytesToKeep
@@ -7792,9 +7819,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool,
::cuDeviceGetMemPool, ::cuMemPoolCreate*/
fn cuMemPoolTrimTo(
- pool: cuda_types::CUmemoryPool,
+ pool: cuda_types::cuda::CUmemoryPool,
minBytesToKeep: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets attributes of a memory pool
Supported attributes are:
@@ -7835,10 +7862,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool,
::cuDeviceGetMemPool, ::cuMemPoolCreate*/
fn cuMemPoolSetAttribute(
- pool: cuda_types::CUmemoryPool,
- attr: cuda_types::CUmemPool_attribute,
+ pool: cuda_types::cuda::CUmemoryPool,
+ attr: cuda_types::cuda::CUmemPool_attribute,
value: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets attributes of a memory pool
Supported attributes are:
@@ -7883,10 +7910,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool,
::cuDeviceGetMemPool, ::cuMemPoolCreate*/
fn cuMemPoolGetAttribute(
- pool: cuda_types::CUmemoryPool,
- attr: cuda_types::CUmemPool_attribute,
+ pool: cuda_types::cuda::CUmemoryPool,
+ attr: cuda_types::cuda::CUmemPool_attribute,
value: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Controls visibility of pools between devices
\param[in] pool - The pool being modified
@@ -7901,10 +7928,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool,
::cuDeviceGetMemPool, ::cuMemPoolCreate*/
fn cuMemPoolSetAccess(
- pool: cuda_types::CUmemoryPool,
- map: *const cuda_types::CUmemAccessDesc,
+ pool: cuda_types::cuda::CUmemoryPool,
+ map: *const cuda_types::cuda::CUmemAccessDesc,
count: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the accessibility of a pool from a device
Returns the accessibility of the pool's memory from the specified location.
@@ -7916,10 +7943,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool,
::cuDeviceGetMemPool, ::cuMemPoolCreate*/
fn cuMemPoolGetAccess(
- flags: *mut cuda_types::CUmemAccess_flags,
- memPool: cuda_types::CUmemoryPool,
- location: *mut cuda_types::CUmemLocation,
- ) -> cuda_types::CUresult;
+ flags: *mut cuda_types::cuda::CUmemAccess_flags,
+ memPool: cuda_types::cuda::CUmemoryPool,
+ location: *mut cuda_types::cuda::CUmemLocation,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a memory pool
Creates a CUDA memory pool and returns the handle in \p pool. The \p poolProps determines
@@ -7958,9 +7985,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuDeviceSetMemPool, ::cuDeviceGetMemPool, ::cuDeviceGetDefaultMemPool,
::cuMemAllocFromPoolAsync, ::cuMemPoolExportToShareableHandle*/
fn cuMemPoolCreate(
- pool: *mut cuda_types::CUmemoryPool,
- poolProps: *const cuda_types::CUmemPoolProps,
- ) -> cuda_types::CUresult;
+ pool: *mut cuda_types::cuda::CUmemoryPool,
+ poolProps: *const cuda_types::cuda::CUmemPoolProps,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys the specified memory pool
If any pointers obtained from this pool haven't been freed or
@@ -7980,7 +8007,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemFreeAsync, ::cuDeviceSetMemPool, ::cuDeviceGetMemPool,
::cuDeviceGetDefaultMemPool, ::cuMemPoolCreate*/
- fn cuMemPoolDestroy(pool: cuda_types::CUmemoryPool) -> cuda_types::CUresult;
+ fn cuMemPoolDestroy(
+ pool: cuda_types::cuda::CUmemoryPool,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allocates memory from a specified pool with stream ordered semantics.
Inserts an allocation operation into \p hStream.
@@ -8016,11 +8045,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuDeviceGetMemPool, ::cuMemPoolCreate, ::cuMemPoolSetAccess,
::cuMemPoolSetAttribute*/
fn cuMemAllocFromPoolAsync_ptsz(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- pool: cuda_types::CUmemoryPool,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pool: cuda_types::cuda::CUmemoryPool,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Exports a memory pool to the requested handle type.
Given an IPC capable mempool, create an OS handle to share the pool with another process.
@@ -8048,10 +8077,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuMemPoolSetAccess, ::cuMemPoolSetAttribute*/
fn cuMemPoolExportToShareableHandle(
handle_out: *mut ::core::ffi::c_void,
- pool: cuda_types::CUmemoryPool,
- handleType: cuda_types::CUmemAllocationHandleType,
+ pool: cuda_types::cuda::CUmemoryPool,
+ handleType: cuda_types::cuda::CUmemAllocationHandleType,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief imports a memory pool from a shared handle.
Specific allocations can be imported from the imported pool with cuMemPoolImportPointer.
@@ -8078,11 +8107,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolExportPointer, ::cuMemPoolImportPointer*/
fn cuMemPoolImportFromShareableHandle(
- pool_out: *mut cuda_types::CUmemoryPool,
+ pool_out: *mut cuda_types::cuda::CUmemoryPool,
handle: *mut ::core::ffi::c_void,
- handleType: cuda_types::CUmemAllocationHandleType,
+ handleType: cuda_types::cuda::CUmemAllocationHandleType,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Export data to share a memory pool allocation between processes.
Constructs \p shareData_out for sharing a specific allocation from an already shared memory pool.
@@ -8100,9 +8129,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolImportFromShareableHandle, ::cuMemPoolImportPointer*/
fn cuMemPoolExportPointer(
- shareData_out: *mut cuda_types::CUmemPoolPtrExportData,
- ptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ shareData_out: *mut cuda_types::cuda::CUmemPoolPtrExportData,
+ ptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Import a memory pool allocation from another process.
Returns in \p ptr_out a pointer to the imported memory.
@@ -8129,10 +8158,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolImportFromShareableHandle, ::cuMemPoolExportPointer*/
fn cuMemPoolImportPointer(
- ptr_out: *mut cuda_types::CUdeviceptr,
- pool: cuda_types::CUmemoryPool,
- shareData: *mut cuda_types::CUmemPoolPtrExportData,
- ) -> cuda_types::CUresult;
+ ptr_out: *mut cuda_types::cuda::CUdeviceptr,
+ pool: cuda_types::cuda::CUmemoryPool,
+ shareData: *mut cuda_types::cuda::CUmemPoolPtrExportData,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a generic allocation handle representing a multicast object described by the given properties.
This creates a multicast object as described by \p prop. The number of
@@ -8173,9 +8202,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMulticastAddDevice, ::cuMulticastBindMem, ::cuMulticastBindAddr, ::cuMulticastUnbind
\sa ::cuMemCreate, ::cuMemRelease, ::cuMemExportToShareableHandle, ::cuMemImportFromShareableHandle*/
fn cuMulticastCreate(
- mcHandle: *mut cuda_types::CUmemGenericAllocationHandle,
- prop: *const cuda_types::CUmulticastObjectProp,
- ) -> cuda_types::CUresult;
+ mcHandle: *mut cuda_types::cuda::CUmemGenericAllocationHandle,
+ prop: *const cuda_types::cuda::CUmulticastObjectProp,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Associate a device to a multicast object.
Associates a device to a multicast object. The added device will be a part of
@@ -8206,9 +8235,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMulticastCreate, ::cuMulticastBindMem, ::cuMulticastBindAddr*/
fn cuMulticastAddDevice(
- mcHandle: cuda_types::CUmemGenericAllocationHandle,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ mcHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Bind a memory allocation represented by a handle to a multicast object.
Binds a memory allocation specified by \p memHandle and created via
@@ -8255,13 +8284,13 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMulticastCreate, ::cuMulticastAddDevice, ::cuMemCreate*/
fn cuMulticastBindMem(
- mcHandle: cuda_types::CUmemGenericAllocationHandle,
+ mcHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
mcOffset: usize,
- memHandle: cuda_types::CUmemGenericAllocationHandle,
+ memHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
memOffset: usize,
size: usize,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Bind a memory allocation represented by a virtual address to a multicast object.
Binds a memory allocation specified by its mapped address \p memptr to a
@@ -8306,12 +8335,12 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMulticastCreate, ::cuMulticastAddDevice, ::cuMemCreate*/
fn cuMulticastBindAddr(
- mcHandle: cuda_types::CUmemGenericAllocationHandle,
+ mcHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
mcOffset: usize,
- memptr: cuda_types::CUdeviceptr,
+ memptr: cuda_types::cuda::CUdeviceptr,
size: usize,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unbind any memory allocations bound to a multicast object at a given offset and upto a given size.
Unbinds any memory allocations hosted on \p dev and bound to a multicast
@@ -8343,11 +8372,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMulticastBindMem, ::cuMulticastBindAddr*/
fn cuMulticastUnbind(
- mcHandle: cuda_types::CUmemGenericAllocationHandle,
- dev: cuda_types::CUdevice,
+ mcHandle: cuda_types::cuda::CUmemGenericAllocationHandle,
+ dev: cuda_types::cuda::CUdevice,
mcOffset: usize,
size: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Calculates either the minimal or recommended granularity for multicast object
Calculates either the minimal or recommended granularity for a given set of
@@ -8370,9 +8399,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
\sa ::cuMulticastCreate, ::cuMulticastBindMem, ::cuMulticastBindAddr, ::cuMulticastUnbind*/
fn cuMulticastGetGranularity(
granularity: *mut usize,
- prop: *const cuda_types::CUmulticastObjectProp,
- option: cuda_types::CUmulticastGranularity_flags,
- ) -> cuda_types::CUresult;
+ prop: *const cuda_types::cuda::CUmulticastObjectProp,
+ option: cuda_types::cuda::CUmulticastGranularity_flags,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns information about a pointer
The supported attributes are:
@@ -8561,9 +8590,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cudaPointerGetAttributes*/
fn cuPointerGetAttribute(
data: *mut ::core::ffi::c_void,
- attribute: cuda_types::CUpointer_attribute,
- ptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ attribute: cuda_types::cuda::CUpointer_attribute,
+ ptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Prefetches memory to the specified destination device
Note there is a later version of this API, ::cuMemPrefetchAsync_v2. It will
@@ -8632,11 +8661,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuMemcpy3DPeerAsync, ::cuMemAdvise, ::cuMemPrefetchAsync
::cudaMemPrefetchAsync_v2*/
fn cuMemPrefetchAsync_ptsz(
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- dstDevice: cuda_types::CUdevice,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ dstDevice: cuda_types::cuda::CUdevice,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Prefetches memory to the specified destination location
Prefetches memory to the specified destination location. \p devPtr is the
@@ -8711,12 +8740,12 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuMemcpy3DPeerAsync, ::cuMemAdvise, ::cuMemPrefetchAsync
::cudaMemPrefetchAsync_v2*/
fn cuMemPrefetchAsync_v2_ptsz(
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- location: cuda_types::CUmemLocation,
+ location: cuda_types::cuda::CUmemLocation,
flags: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Advise about the usage of a given memory range
Note there is a later version of this API, ::cuMemAdvise_v2. It will
@@ -8829,11 +8858,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuMemcpy3DPeerAsync, ::cuMemPrefetchAsync, ::cuMemAdvise_v2
::cudaMemAdvise*/
fn cuMemAdvise(
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- advice: cuda_types::CUmem_advise,
- device: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ advice: cuda_types::cuda::CUmem_advise,
+ device: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Advise about the usage of a given memory range
Advise the Unified Memory subsystem about the usage pattern for the memory range
@@ -8952,11 +8981,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuMemcpy3DPeerAsync, ::cuMemPrefetchAsync, ::cuMemAdvise
::cudaMemAdvise*/
fn cuMemAdvise_v2(
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- advice: cuda_types::CUmem_advise,
- location: cuda_types::CUmemLocation,
- ) -> cuda_types::CUresult;
+ advice: cuda_types::cuda::CUmem_advise,
+ location: cuda_types::cuda::CUmemLocation,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query an attribute of a given memory range
Query an attribute about the memory range starting at \p devPtr with a size of \p count bytes. The
@@ -9037,10 +9066,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
fn cuMemRangeGetAttribute(
data: *mut ::core::ffi::c_void,
dataSize: usize,
- attribute: cuda_types::CUmem_range_attribute,
- devPtr: cuda_types::CUdeviceptr,
+ attribute: cuda_types::cuda::CUmem_range_attribute,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query attributes of a given memory range.
Query attributes of the memory range starting at \p devPtr with a size of \p count bytes. The
@@ -9084,11 +9113,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
fn cuMemRangeGetAttributes(
data: *mut *mut ::core::ffi::c_void,
dataSizes: *mut usize,
- attributes: *mut cuda_types::CUmem_range_attribute,
+ attributes: *mut cuda_types::cuda::CUmem_range_attribute,
numAttributes: usize,
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Set attributes on a previously allocated memory region
The supported attributes are:
@@ -9129,9 +9158,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuMemHostUnregister*/
fn cuPointerSetAttribute(
value: *const ::core::ffi::c_void,
- attribute: cuda_types::CUpointer_attribute,
- ptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ attribute: cuda_types::cuda::CUpointer_attribute,
+ ptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns information about a pointer.
The supported attributes are (refer to ::cuPointerGetAttribute for attribute descriptions and restrictions):
@@ -9179,10 +9208,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cudaPointerGetAttributes*/
fn cuPointerGetAttributes(
numAttributes: ::core::ffi::c_uint,
- attributes: *mut cuda_types::CUpointer_attribute,
+ attributes: *mut cuda_types::cuda::CUpointer_attribute,
data: *mut *mut ::core::ffi::c_void,
- ptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ ptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a stream
Creates a stream and returns a handle in \p phStream. The \p Flags argument
@@ -9217,9 +9246,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cudaStreamCreate,
::cudaStreamCreateWithFlags*/
fn cuStreamCreate(
- phStream: *mut cuda_types::CUstream,
+ phStream: *mut cuda_types::cuda::CUstream,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a stream with the given priority
Creates a stream with the specified priority and returns a handle in \p phStream.
@@ -9268,10 +9297,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuStreamAddCallback,
::cudaStreamCreateWithPriority*/
fn cuStreamCreateWithPriority(
- phStream: *mut cuda_types::CUstream,
+ phStream: *mut cuda_types::cuda::CUstream,
flags: ::core::ffi::c_uint,
priority: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query the priority of a given stream
Query the priority of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority
@@ -9299,9 +9328,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuStreamGetFlags,
::cudaStreamGetPriority*/
fn cuStreamGetPriority_ptsz(
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
priority: *mut ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query the flags of a given stream
Query the flags of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority
@@ -9327,9 +9356,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuStreamGetPriority,
::cudaStreamGetFlags*/
fn cuStreamGetFlags_ptsz(
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
flags: *mut ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the unique Id associated with the stream handle supplied
Returns in \p streamId the unique Id which is associated with the given stream handle.
@@ -9360,9 +9389,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuStreamGetPriority,
::cudaStreamGetId*/
fn cuStreamGetId_ptsz(
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
streamId: *mut ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query the context associated with a stream
Returns the CUDA context that the stream is associated with.
@@ -9404,9 +9433,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cudaStreamCreate,
::cudaStreamCreateWithFlags*/
fn cuStreamGetCtx_ptsz(
- hStream: cuda_types::CUstream,
- pctx: *mut cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ pctx: *mut cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Make a compute stream wait on an event
Makes all future work submitted to \p hStream wait for all work captured in
@@ -9441,10 +9470,10 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuStreamDestroy,
::cudaStreamWaitEvent*/
fn cuStreamWaitEvent_ptsz(
- hStream: cuda_types::CUstream,
- hEvent: cuda_types::CUevent,
+ hStream: cuda_types::cuda::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Add a callback to a compute stream
\note This function is slated for eventual deprecation and removal. If
@@ -9517,11 +9546,11 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuLaunchHostFunc,
::cudaStreamAddCallback*/
fn cuStreamAddCallback_ptsz(
- hStream: cuda_types::CUstream,
- callback: cuda_types::CUstreamCallback,
+ hStream: cuda_types::cuda::CUstream,
+ callback: cuda_types::cuda::CUstreamCallback,
userData: *mut ::core::ffi::c_void,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Begins graph capture on a stream
Begin graph capture on \p hStream. When a stream is in capture mode, all operations
@@ -9557,9 +9586,9 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuStreamEndCapture,
::cuThreadExchangeStreamCaptureMode*/
fn cuStreamBeginCapture_v2_ptsz(
- hStream: cuda_types::CUstream,
- mode: cuda_types::CUstreamCaptureMode,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ mode: cuda_types::cuda::CUstreamCaptureMode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Begins graph capture on a stream to an existing graph
Begin graph capture on \p hStream, placing new nodes into an existing graph. When a stream is
@@ -9603,13 +9632,13 @@ CU_MEM_OPERATION_TYPE_UNMAP = 2
::cuThreadExchangeStreamCaptureMode,
::cuGraphAddNode,*/
fn cuStreamBeginCaptureToGraph_ptsz(
- hStream: cuda_types::CUstream,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- mode: cuda_types::CUstreamCaptureMode,
- ) -> cuda_types::CUresult;
+ mode: cuda_types::cuda::CUstreamCaptureMode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Swaps the stream capture interaction mode for a thread
Sets the calling thread's stream capture interaction mode to the value contained
@@ -9660,8 +9689,8 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
\sa
::cuStreamBeginCapture*/
fn cuThreadExchangeStreamCaptureMode(
- mode: *mut cuda_types::CUstreamCaptureMode,
- ) -> cuda_types::CUresult;
+ mode: *mut cuda_types::cuda::CUstreamCaptureMode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Ends capture on a stream, returning the captured graph
End capture on \p hStream, returning the captured graph via \p phGraph.
@@ -9690,9 +9719,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamIsCapturing,
::cuGraphDestroy*/
fn cuStreamEndCapture_ptsz(
- hStream: cuda_types::CUstream,
- phGraph: *mut cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ phGraph: *mut cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a stream's capture status
Return the capture status of \p hStream via \p captureStatus. After a successful
@@ -9730,9 +9759,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamBeginCapture,
::cuStreamEndCapture*/
fn cuStreamIsCapturing_ptsz(
- hStream: cuda_types::CUstream,
- captureStatus: *mut cuda_types::CUstreamCaptureStatus,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query a stream's capture state
Query stream state related to stream capture.
@@ -9779,13 +9808,13 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamIsCapturing,
::cuStreamUpdateCaptureDependencies*/
fn cuStreamGetCaptureInfo_v2_ptsz(
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- graph_out: *mut cuda_types::CUgraph,
- dependencies_out: *mut *const cuda_types::CUgraphNode,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ graph_out: *mut cuda_types::cuda::CUgraph,
+ dependencies_out: *mut *const cuda_types::cuda::CUgraphNode,
numDependencies_out: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query a stream's capture state (12.3+)
Query stream state related to stream capture.
@@ -9843,14 +9872,14 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamIsCapturing,
::cuStreamUpdateCaptureDependencies*/
fn cuStreamGetCaptureInfo_v3_ptsz(
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- graph_out: *mut cuda_types::CUgraph,
- dependencies_out: *mut *const cuda_types::CUgraphNode,
- edgeData_out: *mut *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ graph_out: *mut cuda_types::cuda::CUgraph,
+ dependencies_out: *mut *const cuda_types::cuda::CUgraphNode,
+ edgeData_out: *mut *const cuda_types::cuda::CUgraphEdgeData,
numDependencies_out: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Update the set of dependencies in a capturing stream (11.3+)
Modifies the dependency set of a capturing stream. The dependency set is the set
@@ -9884,11 +9913,11 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamBeginCapture,
::cuStreamGetCaptureInfo,*/
fn cuStreamUpdateCaptureDependencies_ptsz(
- hStream: cuda_types::CUstream,
- dependencies: *mut cuda_types::CUgraphNode,
+ hStream: cuda_types::cuda::CUstream,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
numDependencies: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Update the set of dependencies in a capturing stream (12.3+)
Modifies the dependency set of a capturing stream. The dependency set is the set
@@ -9921,12 +9950,12 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamBeginCapture,
::cuStreamGetCaptureInfo,*/
fn cuStreamUpdateCaptureDependencies_v2_ptsz(
- hStream: cuda_types::CUstream,
- dependencies: *mut cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Attach memory to a stream asynchronously
Enqueues an operation in \p hStream to specify stream association of
@@ -10012,11 +10041,11 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuMemAllocManaged,
::cudaStreamAttachMemAsync*/
fn cuStreamAttachMemAsync_ptsz(
- hStream: cuda_types::CUstream,
- dptr: cuda_types::CUdeviceptr,
+ hStream: cuda_types::cuda::CUstream,
+ dptr: cuda_types::cuda::CUdeviceptr,
length: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Determine status of a compute stream
Returns ::CUDA_SUCCESS if all operations in the stream specified by
@@ -10043,7 +10072,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamSynchronize,
::cuStreamAddCallback,
::cudaStreamQuery*/
- fn cuStreamQuery_ptsz(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
+ fn cuStreamQuery_ptsz(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Wait until a stream's tasks are completed
Waits until the device has completed all operations in the stream specified
@@ -10069,7 +10100,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamQuery,
::cuStreamAddCallback,
::cudaStreamSynchronize*/
- fn cuStreamSynchronize_ptsz(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
+ fn cuStreamSynchronize_ptsz(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a stream
Destroys the stream specified by \p hStream.
@@ -10096,7 +10129,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuStreamSynchronize,
::cuStreamAddCallback,
::cudaStreamDestroy*/
- fn cuStreamDestroy_v2(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
+ fn cuStreamDestroy_v2(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies attributes from source stream to destination stream.
Copies attributes from source stream \p src to destination stream \p dst.
@@ -10114,9 +10149,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
\sa
::CUaccessPolicyWindow*/
fn cuStreamCopyAttributes_ptsz(
- dst: cuda_types::CUstream,
- src: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ dst: cuda_types::cuda::CUstream,
+ src: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Queries stream attribute.
Queries attribute \p attr from \p hStream and stores it in corresponding
@@ -10135,10 +10170,10 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
\sa
::CUaccessPolicyWindow*/
fn cuStreamGetAttribute_ptsz(
- hStream: cuda_types::CUstream,
- attr: cuda_types::CUstreamAttrID,
- value_out: *mut cuda_types::CUstreamAttrValue,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ attr: cuda_types::cuda::CUstreamAttrID,
+ value_out: *mut cuda_types::cuda::CUstreamAttrValue,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets stream attribute.
Sets attribute \p attr on \p hStream from corresponding attribute of
@@ -10158,10 +10193,10 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
\sa
::CUaccessPolicyWindow*/
fn cuStreamSetAttribute_ptsz(
- hStream: cuda_types::CUstream,
- attr: cuda_types::CUstreamAttrID,
- value: *const cuda_types::CUstreamAttrValue,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ attr: cuda_types::cuda::CUstreamAttrID,
+ value: *const cuda_types::cuda::CUstreamAttrValue,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an event
Creates an event *phEvent for the current context with the flags specified via
@@ -10200,9 +10235,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cudaEventCreate,
::cudaEventCreateWithFlags*/
fn cuEventCreate(
- phEvent: *mut cuda_types::CUevent,
+ phEvent: *mut cuda_types::cuda::CUevent,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Records an event
Captures in \p hEvent the contents of \p hStream at the time of this call.
@@ -10242,9 +10277,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cudaEventRecord,
::cuEventRecordWithFlags*/
fn cuEventRecord_ptsz(
- hEvent: cuda_types::CUevent,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hEvent: cuda_types::cuda::CUevent,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Records an event
Captures in \p hEvent the contents of \p hStream at the time of this call.
@@ -10291,10 +10326,10 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuEventRecord,
::cudaEventRecord*/
fn cuEventRecordWithFlags_ptsz(
- hEvent: cuda_types::CUevent,
- hStream: cuda_types::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
+ hStream: cuda_types::cuda::CUstream,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Queries an event's status
Queries the status of all work currently captured by \p hEvent. See
@@ -10323,7 +10358,7 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuEventDestroy,
::cuEventElapsedTime,
::cudaEventQuery*/
- fn cuEventQuery(hEvent: cuda_types::CUevent) -> cuda_types::CUresult;
+ fn cuEventQuery(hEvent: cuda_types::cuda::CUevent) -> cuda_types::cuda::CUresult;
/** \brief Waits for an event to complete
Waits until the completion of all work currently captured in \p hEvent.
@@ -10351,7 +10386,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuEventDestroy,
::cuEventElapsedTime,
::cudaEventSynchronize*/
- fn cuEventSynchronize(hEvent: cuda_types::CUevent) -> cuda_types::CUresult;
+ fn cuEventSynchronize(
+ hEvent: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys an event
Destroys the event specified by \p hEvent.
@@ -10377,7 +10414,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cuEventSynchronize,
::cuEventElapsedTime,
::cudaEventDestroy*/
- fn cuEventDestroy_v2(hEvent: cuda_types::CUevent) -> cuda_types::CUresult;
+ fn cuEventDestroy_v2(
+ hEvent: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Computes the elapsed time between two events
Computes the elapsed time between two events (in milliseconds with a
@@ -10421,9 +10460,9 @@ cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
::cudaEventElapsedTime*/
fn cuEventElapsedTime(
pMilliseconds: *mut f32,
- hStart: cuda_types::CUevent,
- hEnd: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hStart: cuda_types::cuda::CUevent,
+ hEnd: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Imports an external memory object
Imports an externally allocated memory object and returns
@@ -10586,9 +10625,9 @@ CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8
::cuExternalMemoryGetMappedBuffer,
::cuExternalMemoryGetMappedMipmappedArray*/
fn cuImportExternalMemory(
- extMem_out: *mut cuda_types::CUexternalMemory,
- memHandleDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_HANDLE_DESC,
- ) -> cuda_types::CUresult;
+ extMem_out: *mut cuda_types::cuda::CUexternalMemory,
+ memHandleDesc: *const cuda_types::cuda::CUDA_EXTERNAL_MEMORY_HANDLE_DESC,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Maps a buffer onto an imported memory object
Maps a buffer onto an imported memory object and returns a device
@@ -10640,10 +10679,10 @@ unsigned int flags;
::cuDestroyExternalMemory,
::cuExternalMemoryGetMappedMipmappedArray*/
fn cuExternalMemoryGetMappedBuffer(
- devPtr: *mut cuda_types::CUdeviceptr,
- extMem: cuda_types::CUexternalMemory,
- bufferDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_BUFFER_DESC,
- ) -> cuda_types::CUresult;
+ devPtr: *mut cuda_types::cuda::CUdeviceptr,
+ extMem: cuda_types::cuda::CUexternalMemory,
+ bufferDesc: *const cuda_types::cuda::CUDA_EXTERNAL_MEMORY_BUFFER_DESC,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Maps a CUDA mipmapped array onto an external memory object
Maps a CUDA mipmapped array onto an external object and returns a
@@ -10694,10 +10733,10 @@ unsigned int numLevels;
::cuDestroyExternalMemory,
::cuExternalMemoryGetMappedBuffer*/
fn cuExternalMemoryGetMappedMipmappedArray(
- mipmap: *mut cuda_types::CUmipmappedArray,
- extMem: cuda_types::CUexternalMemory,
- mipmapDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC,
- ) -> cuda_types::CUresult;
+ mipmap: *mut cuda_types::cuda::CUmipmappedArray,
+ extMem: cuda_types::cuda::CUexternalMemory,
+ mipmapDesc: *const cuda_types::cuda::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys an external memory object.
Destroys the specified external memory object. Any existing buffers
@@ -10717,8 +10756,8 @@ unsigned int numLevels;
::cuExternalMemoryGetMappedBuffer,
::cuExternalMemoryGetMappedMipmappedArray*/
fn cuDestroyExternalMemory(
- extMem: cuda_types::CUexternalMemory,
- ) -> cuda_types::CUresult;
+ extMem: cuda_types::cuda::CUexternalMemory,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Imports an external semaphore
Imports an externally allocated synchronization object and returns
@@ -10874,9 +10913,9 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuSignalExternalSemaphoresAsync,
::cuWaitExternalSemaphoresAsync*/
fn cuImportExternalSemaphore(
- extSem_out: *mut cuda_types::CUexternalSemaphore,
- semHandleDesc: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC,
- ) -> cuda_types::CUresult;
+ extSem_out: *mut cuda_types::cuda::CUexternalSemaphore,
+ semHandleDesc: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Signals a set of external semaphore objects
Enqueues a signal operation on a set of externally allocated
@@ -10956,11 +10995,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuDestroyExternalSemaphore,
::cuWaitExternalSemaphoresAsync*/
fn cuSignalExternalSemaphoresAsync_ptsz(
- extSemArray: *const cuda_types::CUexternalSemaphore,
- paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
+ extSemArray: *const cuda_types::cuda::CUexternalSemaphore,
+ paramsArray: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
numExtSems: ::core::ffi::c_uint,
- stream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ stream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Waits on a set of external semaphore objects
Enqueues a wait operation on a set of externally allocated
@@ -11034,11 +11073,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuDestroyExternalSemaphore,
::cuSignalExternalSemaphoresAsync*/
fn cuWaitExternalSemaphoresAsync_ptsz(
- extSemArray: *const cuda_types::CUexternalSemaphore,
- paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
+ extSemArray: *const cuda_types::cuda::CUexternalSemaphore,
+ paramsArray: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
numExtSems: ::core::ffi::c_uint,
- stream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ stream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys an external semaphore
Destroys an external semaphore object and releases any references
@@ -11057,8 +11096,8 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuSignalExternalSemaphoresAsync,
::cuWaitExternalSemaphoresAsync*/
fn cuDestroyExternalSemaphore(
- extSem: cuda_types::CUexternalSemaphore,
- ) -> cuda_types::CUresult;
+ extSem: cuda_types::cuda::CUexternalSemaphore,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Wait on a memory location
Enqueues a synchronization of the stream on the given memory location. Work
@@ -11100,11 +11139,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuMemHostRegister,
::cuStreamWaitEvent*/
fn cuStreamWaitValue32_v2_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Wait on a memory location
Enqueues a synchronization of the stream on the given memory location. Work
@@ -11145,11 +11184,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuMemHostRegister,
::cuStreamWaitEvent*/
fn cuStreamWaitValue64_v2_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Write a value to memory
Write a value to memory.
@@ -11176,11 +11215,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuMemHostRegister,
::cuEventRecord*/
fn cuStreamWriteValue32_v2_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Write a value to memory
Write a value to memory.
@@ -11209,11 +11248,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuMemHostRegister,
::cuEventRecord*/
fn cuStreamWriteValue64_v2_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Batch operations to synchronize the stream via memory operations
This is a batch version of ::cuStreamWaitValue32() and ::cuStreamWriteValue32().
@@ -11254,11 +11293,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuStreamWriteValue64,
::cuMemHostRegister*/
fn cuStreamBatchMemOp_v2_ptsz(
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
count: ::core::ffi::c_uint,
- paramArray: *mut cuda_types::CUstreamBatchMemOpParams,
+ paramArray: *mut cuda_types::cuda::CUstreamBatchMemOpParams,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns information about a function
Returns in \p *pi the integer value of the attribute \p attrib on the kernel
@@ -11353,9 +11392,9 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cuKernelGetAttribute*/
fn cuFuncGetAttribute(
pi: *mut ::core::ffi::c_int,
- attrib: cuda_types::CUfunction_attribute,
- hfunc: cuda_types::CUfunction,
- ) -> cuda_types::CUresult;
+ attrib: cuda_types::cuda::CUfunction_attribute,
+ hfunc: cuda_types::cuda::CUfunction,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets information about a function
This call sets the value of a specified attribute \p attrib on the kernel given
@@ -11417,10 +11456,10 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cudaFuncSetAttribute,
::cuKernelSetAttribute*/
fn cuFuncSetAttribute(
- hfunc: cuda_types::CUfunction,
- attrib: cuda_types::CUfunction_attribute,
+ hfunc: cuda_types::cuda::CUfunction,
+ attrib: cuda_types::cuda::CUfunction_attribute,
value: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the preferred cache configuration for a device function
On devices where the L1 cache and shared memory use the same hardware
@@ -11463,9 +11502,9 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
::cudaFuncSetCacheConfig,
::cuKernelSetCacheConfig*/
fn cuFuncSetCacheConfig(
- hfunc: cuda_types::CUfunction,
- config: cuda_types::CUfunc_cache,
- ) -> cuda_types::CUresult;
+ hfunc: cuda_types::cuda::CUfunction,
+ config: cuda_types::cuda::CUfunc_cache,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a module handle
Returns in \p *hmod the handle of the module that function \p hfunc
@@ -11489,9 +11528,9 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
\notefnerr
*/
fn cuFuncGetModule(
- hmod: *mut cuda_types::CUmodule,
- hfunc: cuda_types::CUfunction,
- ) -> cuda_types::CUresult;
+ hmod: *mut cuda_types::cuda::CUmodule,
+ hfunc: cuda_types::cuda::CUfunction,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the function name for a ::CUfunction handle
Returns in \p **name the function name associated with the function handle \p hfunc .
@@ -11511,8 +11550,8 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
*/
fn cuFuncGetName(
name: *mut *const ::core::ffi::c_char,
- hfunc: cuda_types::CUfunction,
- ) -> cuda_types::CUresult;
+ hfunc: cuda_types::cuda::CUfunction,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the offset and size of a kernel parameter in the device-side parameter layout
Queries the kernel parameter at \p paramIndex into \p func's list of parameters, and returns
@@ -11534,11 +11573,11 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
\sa ::cuKernelGetParamInfo*/
fn cuFuncGetParamInfo(
- func: cuda_types::CUfunction,
+ func: cuda_types::cuda::CUfunction,
paramIndex: usize,
paramOffset: *mut usize,
paramSize: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns if the function is loaded
Returns in \p state the loading state of \p function.
@@ -11554,9 +11593,9 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
\sa ::cuFuncLoad,
::cuModuleEnumerateFunctions*/
fn cuFuncIsLoaded(
- state: *mut cuda_types::CUfunctionLoadingState,
- function: cuda_types::CUfunction,
- ) -> cuda_types::CUresult;
+ state: *mut cuda_types::cuda::CUfunctionLoadingState,
+ function: cuda_types::cuda::CUfunction,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Loads a function
Finalizes function loading for \p function. Calling this API with a
@@ -11571,7 +11610,7 @@ CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10
\sa ::cuModuleEnumerateFunctions,
::cuFuncIsLoaded*/
- fn cuFuncLoad(function: cuda_types::CUfunction) -> cuda_types::CUresult;
+ fn cuFuncLoad(function: cuda_types::cuda::CUfunction) -> cuda_types::cuda::CUresult;
/** \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel
Invokes the function ::CUfunction or the kernel ::CUkernel \p f
@@ -11690,7 +11729,7 @@ status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config);
::cuKernelGetAttribute,
::cuKernelSetAttribute*/
fn cuLaunchKernel_ptsz(
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
gridDimX: ::core::ffi::c_uint,
gridDimY: ::core::ffi::c_uint,
gridDimZ: ::core::ffi::c_uint,
@@ -11698,10 +11737,10 @@ status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config);
blockDimY: ::core::ffi::c_uint,
blockDimZ: ::core::ffi::c_uint,
sharedMemBytes: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
kernelParams: *mut *mut ::core::ffi::c_void,
extra: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel with launch-time configuration
Invokes the function ::CUfunction or the kernel ::CUkernel \p f with the specified launch-time configuration
@@ -11936,11 +11975,11 @@ status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config);
::cuKernelGetAttribute,
::cuKernelSetAttribute*/
fn cuLaunchKernelEx_ptsz(
- config: *const cuda_types::CUlaunchConfig,
- f: cuda_types::CUfunction,
+ config: *const cuda_types::cuda::CUlaunchConfig,
+ f: cuda_types::cuda::CUfunction,
kernelParams: *mut *mut ::core::ffi::c_void,
extra: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel where thread blocks
can cooperate and synchronize as they execute
@@ -12034,7 +12073,7 @@ status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config);
::cuKernelGetAttribute,
::cuKernelSetAttribute*/
fn cuLaunchCooperativeKernel_ptsz(
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
gridDimX: ::core::ffi::c_uint,
gridDimY: ::core::ffi::c_uint,
gridDimZ: ::core::ffi::c_uint,
@@ -12042,9 +12081,9 @@ status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config);
blockDimY: ::core::ffi::c_uint,
blockDimZ: ::core::ffi::c_uint,
sharedMemBytes: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
kernelParams: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Launches CUDA functions on multiple devices where thread blocks can cooperate and synchronize as they execute
\deprecated This function is deprecated as of CUDA 11.3.
@@ -12182,10 +12221,10 @@ void **kernelParams;
::cuLaunchCooperativeKernel,
::cudaLaunchCooperativeKernelMultiDevice*/
fn cuLaunchCooperativeKernelMultiDevice(
- launchParamsList: *mut cuda_types::CUDA_LAUNCH_PARAMS,
+ launchParamsList: *mut cuda_types::cuda::CUDA_LAUNCH_PARAMS,
numDevices: ::core::ffi::c_uint,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Enqueues a host function call in a stream
Enqueues a host function to run in a stream. The function will be called
@@ -12248,10 +12287,10 @@ void **kernelParams;
::cuStreamAttachMemAsync,
::cuStreamAddCallback*/
fn cuLaunchHostFunc_ptsz(
- hStream: cuda_types::CUstream,
- fn_: cuda_types::CUhostFn,
+ hStream: cuda_types::cuda::CUstream,
+ fn_: cuda_types::cuda::CUhostFn,
userData: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the block-dimensions for the function
\deprecated
@@ -12285,11 +12324,11 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuFuncSetBlockShape(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
x: ::core::ffi::c_int,
y: ::core::ffi::c_int,
z: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the dynamic shared-memory size for the function
\deprecated
@@ -12321,9 +12360,9 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuFuncSetSharedSize(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
bytes: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameter size for the function
\deprecated
@@ -12353,9 +12392,9 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuParamSetSize(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
numbytes: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds an integer parameter to the function's argument list
\deprecated
@@ -12386,10 +12425,10 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuParamSeti(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
offset: ::core::ffi::c_int,
value: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds a floating-point parameter to the function's argument list
\deprecated
@@ -12420,10 +12459,10 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuParamSetf(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
offset: ::core::ffi::c_int,
value: f32,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds arbitrary data to the function's argument list
\deprecated
@@ -12456,11 +12495,11 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuParamSetv(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
offset: ::core::ffi::c_int,
ptr: *mut ::core::ffi::c_void,
numbytes: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Launches a CUDA function
\deprecated
@@ -12509,7 +12548,7 @@ void **kernelParams;
::cuLaunchGrid,
::cuLaunchGridAsync,
::cuLaunchKernel*/
- fn cuLaunch(f: cuda_types::CUfunction) -> cuda_types::CUresult;
+ fn cuLaunch(f: cuda_types::cuda::CUfunction) -> cuda_types::cuda::CUresult;
/** \brief Launches a CUDA function
\deprecated
@@ -12561,10 +12600,10 @@ void **kernelParams;
::cuLaunchGridAsync,
::cuLaunchKernel*/
fn cuLaunchGrid(
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
grid_width: ::core::ffi::c_int,
grid_height: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Launches a CUDA function
\deprecated
@@ -12624,11 +12663,11 @@ void **kernelParams;
::cuLaunchGrid,
::cuLaunchKernel*/
fn cuLaunchGridAsync(
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
grid_width: ::core::ffi::c_int,
grid_height: ::core::ffi::c_int,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds a texture-reference to the function's argument list
\deprecated
@@ -12650,10 +12689,10 @@ void **kernelParams;
::CUDA_ERROR_INVALID_VALUE
\notefnerr*/
fn cuParamSetTexRef(
- hfunc: cuda_types::CUfunction,
+ hfunc: cuda_types::cuda::CUfunction,
texunit: ::core::ffi::c_int,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the shared memory configuration for a device function.
\deprecated
@@ -12705,9 +12744,9 @@ void **kernelParams;
::cuLaunchKernel,
::cudaFuncSetSharedMemConfig*/
fn cuFuncSetSharedMemConfig(
- hfunc: cuda_types::CUfunction,
- config: cuda_types::CUsharedconfig,
- ) -> cuda_types::CUresult;
+ hfunc: cuda_types::cuda::CUfunction,
+ config: cuda_types::cuda::CUsharedconfig,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a graph
Creates an empty graph, which is returned via \p phGraph.
@@ -12738,9 +12777,9 @@ void **kernelParams;
::cuGraphGetEdges,
::cuGraphClone*/
fn cuGraphCreate(
- phGraph: *mut cuda_types::CUgraph,
+ phGraph: *mut cuda_types::cuda::CUgraph,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a kernel execution node and adds it to a graph
Creates a new kernel execution node and adds it to \p hGraph with \p numDependencies
@@ -12841,12 +12880,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddKernelNode_v2(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a kernel node's parameters
Returns the parameters of kernel node \p hNode in \p nodeParams.
@@ -12876,9 +12915,9 @@ void **kernelParams;
::cuGraphAddKernelNode,
::cuGraphKernelNodeSetParams*/
fn cuGraphKernelNodeGetParams_v2(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_KERNEL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets a kernel node's parameters
Sets the parameters of kernel node \p hNode to \p nodeParams.
@@ -12900,9 +12939,9 @@ void **kernelParams;
::cuGraphAddKernelNode,
::cuGraphKernelNodeGetParams*/
fn cuGraphKernelNodeSetParams_v2(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a memcpy node and adds it to a graph
Creates a new memcpy node and adds it to \p hGraph with \p numDependencies
@@ -12949,13 +12988,13 @@ void **kernelParams;
::cuGraphAddHostNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddMemcpyNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- copyParams: *const cuda_types::CUDA_MEMCPY3D,
- ctx: cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ copyParams: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ ctx: cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a memcpy node's parameters
Returns the parameters of memcpy node \p hNode in \p nodeParams.
@@ -12976,9 +13015,9 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphMemcpyNodeSetParams*/
fn cuGraphMemcpyNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_MEMCPY3D,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_MEMCPY3D,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets a memcpy node's parameters
Sets the parameters of memcpy node \p hNode to \p nodeParams.
@@ -13000,9 +13039,9 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphMemcpyNodeGetParams*/
fn cuGraphMemcpyNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_MEMCPY3D,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a memset node and adds it to a graph
Creates a new memset node and adds it to \p hGraph with \p numDependencies
@@ -13043,13 +13082,13 @@ void **kernelParams;
::cuGraphAddHostNode,
::cuGraphAddMemcpyNode*/
fn cuGraphAddMemsetNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- memsetParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS,
- ctx: cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ memsetParams: *const cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS,
+ ctx: cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a memset node's parameters
Returns the parameters of memset node \p hNode in \p nodeParams.
@@ -13070,9 +13109,9 @@ void **kernelParams;
::cuGraphAddMemsetNode,
::cuGraphMemsetNodeSetParams*/
fn cuGraphMemsetNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_MEMSET_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets a memset node's parameters
Sets the parameters of memset node \p hNode to \p nodeParams.
@@ -13094,9 +13133,9 @@ void **kernelParams;
::cuGraphAddMemsetNode,
::cuGraphMemsetNodeGetParams*/
fn cuGraphMemsetNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a host execution node and adds it to a graph
Creates a new CPU execution node and adds it to \p hGraph with \p numDependencies
@@ -13136,12 +13175,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddHostNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ nodeParams: *const cuda_types::cuda::CUDA_HOST_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a host node's parameters
Returns the parameters of host node \p hNode in \p nodeParams.
@@ -13162,9 +13201,9 @@ void **kernelParams;
::cuGraphAddHostNode,
::cuGraphHostNodeSetParams*/
fn cuGraphHostNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_HOST_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_HOST_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets a host node's parameters
Sets the parameters of host node \p hNode to \p nodeParams.
@@ -13186,9 +13225,9 @@ void **kernelParams;
::cuGraphAddHostNode,
::cuGraphHostNodeGetParams*/
fn cuGraphHostNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_HOST_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a child graph node and adds it to a graph
Creates a new node which executes an embedded graph, and adds it to \p hGraph with
@@ -13227,12 +13266,12 @@ void **kernelParams;
::cuGraphAddMemsetNode,
::cuGraphClone*/
fn cuGraphAddChildGraphNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- childGraph: cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ childGraph: cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets a handle to the embedded graph of a child graph node
Gets a handle to the embedded graph in a child graph node. This call
@@ -13257,9 +13296,9 @@ void **kernelParams;
::cuGraphAddChildGraphNode,
::cuGraphNodeFindInClone*/
fn cuGraphChildGraphNodeGetGraph(
- hNode: cuda_types::CUgraphNode,
- phGraph: *mut cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ phGraph: *mut cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an empty node and adds it to a graph
Creates a new node which performs no operation, and adds it to \p hGraph with
@@ -13296,11 +13335,11 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddEmptyNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an event record node and adds it to a graph
Creates a new event record node and adds it to \p hGraph with \p numDependencies
@@ -13340,12 +13379,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddEventRecordNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the event associated with an event record node
Returns the event of event record node \p hNode in \p event_out.
@@ -13368,9 +13407,9 @@ void **kernelParams;
::cuEventRecordWithFlags,
::cuStreamWaitEvent*/
fn cuGraphEventRecordNodeGetEvent(
- hNode: cuda_types::CUgraphNode,
- event_out: *mut cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ event_out: *mut cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets an event record node's event
Sets the event of event record node \p hNode to \p event.
@@ -13394,9 +13433,9 @@ void **kernelParams;
::cuEventRecordWithFlags,
::cuStreamWaitEvent*/
fn cuGraphEventRecordNodeSetEvent(
- hNode: cuda_types::CUgraphNode,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an event wait node and adds it to a graph
Creates a new event wait node and adds it to \p hGraph with \p numDependencies
@@ -13437,12 +13476,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddEventWaitNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the event associated with an event wait node
Returns the event of event wait node \p hNode in \p event_out.
@@ -13465,9 +13504,9 @@ void **kernelParams;
::cuEventRecordWithFlags,
::cuStreamWaitEvent*/
fn cuGraphEventWaitNodeGetEvent(
- hNode: cuda_types::CUgraphNode,
- event_out: *mut cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ event_out: *mut cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets an event wait node's event
Sets the event of event wait node \p hNode to \p event.
@@ -13491,9 +13530,9 @@ void **kernelParams;
::cuEventRecordWithFlags,
::cuStreamWaitEvent*/
fn cuGraphEventWaitNodeSetEvent(
- hNode: cuda_types::CUgraphNode,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an external semaphore signal node and adds it to a graph
Creates a new external semaphore signal node and adds it to \p hGraph with \p
@@ -13540,12 +13579,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddExternalSemaphoresSignalNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns an external semaphore signal node's parameters
Returns the parameters of an external semaphore signal node \p hNode in \p params_out.
@@ -13574,9 +13613,9 @@ void **kernelParams;
::cuSignalExternalSemaphoresAsync,
::cuWaitExternalSemaphoresAsync*/
fn cuGraphExternalSemaphoresSignalNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- params_out: *mut cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ params_out: *mut cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets an external semaphore signal node's parameters
Sets the parameters of an external semaphore signal node \p hNode to \p nodeParams.
@@ -13600,9 +13639,9 @@ void **kernelParams;
::cuSignalExternalSemaphoresAsync,
::cuWaitExternalSemaphoresAsync*/
fn cuGraphExternalSemaphoresSignalNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an external semaphore wait node and adds it to a graph
Creates a new external semaphore wait node and adds it to \p hGraph with \p numDependencies
@@ -13649,12 +13688,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddExternalSemaphoresWaitNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns an external semaphore wait node's parameters
Returns the parameters of an external semaphore wait node \p hNode in \p params_out.
@@ -13683,9 +13722,9 @@ void **kernelParams;
::cuSignalExternalSemaphoresAsync,
::cuWaitExternalSemaphoresAsync*/
fn cuGraphExternalSemaphoresWaitNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- params_out: *mut cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ params_out: *mut cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets an external semaphore wait node's parameters
Sets the parameters of an external semaphore wait node \p hNode to \p nodeParams.
@@ -13709,9 +13748,9 @@ void **kernelParams;
::cuSignalExternalSemaphoresAsync,
::cuWaitExternalSemaphoresAsync*/
fn cuGraphExternalSemaphoresWaitNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a batch memory operation node and adds it to a graph
Creates a new batch memory operation node and adds it to \p hGraph with \p
@@ -13765,12 +13804,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddBatchMemOpNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ nodeParams: *const cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a batch mem op node's parameters
Returns the parameters of batch mem op node \p hNode in \p nodeParams_out.
@@ -13796,9 +13835,9 @@ void **kernelParams;
::cuGraphAddBatchMemOpNode,
::cuGraphBatchMemOpNodeSetParams*/
fn cuGraphBatchMemOpNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams_out: *mut cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams_out: *mut cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets a batch mem op node's parameters
Sets the parameters of batch mem op node \p hNode to \p nodeParams.
@@ -13823,9 +13862,9 @@ void **kernelParams;
::cuGraphAddBatchMemOpNode,
::cuGraphBatchMemOpNodeGetParams*/
fn cuGraphBatchMemOpNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for a batch mem op node in the given graphExec
Sets the parameters of a batch mem op node in an executable graph \p hGraphExec.
@@ -13870,10 +13909,10 @@ void **kernelParams;
::cuGraphBatchMemOpNodeSetParams,
::cuGraphInstantiate*/
fn cuGraphExecBatchMemOpNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_BATCH_MEM_OP_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an allocation node and adds it to a graph
Creates a new allocation node and adds it to \p hGraph with \p numDependencies
@@ -13946,12 +13985,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddMemAllocNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *mut cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ nodeParams: *mut cuda_types::cuda::CUDA_MEM_ALLOC_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a memory alloc node's parameters
Returns the parameters of a memory alloc node \p hNode in \p params_out.
@@ -13974,9 +14013,9 @@ void **kernelParams;
::cuGraphAddMemAllocNode,
::cuGraphMemFreeNodeGetParams*/
fn cuGraphMemAllocNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- params_out: *mut cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ params_out: *mut cuda_types::cuda::CUDA_MEM_ALLOC_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a memory free node and adds it to a graph
Creates a new memory free node and adds it to \p hGraph with \p numDependencies
@@ -14032,12 +14071,12 @@ void **kernelParams;
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
fn cuGraphAddMemFreeNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- dptr: cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ dptr: cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a memory free node's parameters
Returns the address of a memory free node \p hNode in \p dptr_out.
@@ -14057,9 +14096,9 @@ void **kernelParams;
::cuGraphAddMemFreeNode,
::cuGraphMemAllocNodeGetParams*/
fn cuGraphMemFreeNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- dptr_out: *mut cuda_types::CUdeviceptr,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ dptr_out: *mut cuda_types::cuda::CUdeviceptr,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Free unused memory that was cached on the specified device for use with graphs back to the OS.
Blocks which are not in use by a graph that is either currently executing or scheduled to execute are
@@ -14076,7 +14115,9 @@ void **kernelParams;
::cuGraphAddMemFreeNode,
::cuDeviceSetGraphMemAttribute,
::cuDeviceGetGraphMemAttribute*/
- fn cuDeviceGraphMemTrim(device: cuda_types::CUdevice) -> cuda_types::CUresult;
+ fn cuDeviceGraphMemTrim(
+ device: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query asynchronous allocation attributes related to graphs
Valid attributes are:
@@ -14102,10 +14143,10 @@ void **kernelParams;
::cuGraphAddMemAllocNode,
::cuGraphAddMemFreeNode*/
fn cuDeviceGetGraphMemAttribute(
- device: cuda_types::CUdevice,
- attr: cuda_types::CUgraphMem_attribute,
+ device: cuda_types::cuda::CUdevice,
+ attr: cuda_types::cuda::CUgraphMem_attribute,
value: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Set asynchronous allocation attributes related to graphs
Valid attributes are:
@@ -14128,10 +14169,10 @@ void **kernelParams;
::cuGraphAddMemAllocNode,
::cuGraphAddMemFreeNode*/
fn cuDeviceSetGraphMemAttribute(
- device: cuda_types::CUdevice,
- attr: cuda_types::CUgraphMem_attribute,
+ device: cuda_types::cuda::CUdevice,
+ attr: cuda_types::cuda::CUgraphMem_attribute,
value: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Clones a graph
This function creates a copy of \p originalGraph and returns it in \p phGraphClone.
@@ -14154,9 +14195,9 @@ void **kernelParams;
::cuGraphCreate,
::cuGraphNodeFindInClone*/
fn cuGraphClone(
- phGraphClone: *mut cuda_types::CUgraph,
- originalGraph: cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ phGraphClone: *mut cuda_types::cuda::CUgraph,
+ originalGraph: cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Finds a cloned version of a node
This function returns the node in \p hClonedGraph corresponding to \p hOriginalNode
@@ -14180,10 +14221,10 @@ void **kernelParams;
\sa
::cuGraphClone*/
fn cuGraphNodeFindInClone(
- phNode: *mut cuda_types::CUgraphNode,
- hOriginalNode: cuda_types::CUgraphNode,
- hClonedGraph: cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ phNode: *mut cuda_types::cuda::CUgraphNode,
+ hOriginalNode: cuda_types::cuda::CUgraphNode,
+ hClonedGraph: cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a node's type
Returns the node type of \p hNode in \p type.
@@ -14212,9 +14253,9 @@ void **kernelParams;
::cuGraphMemsetNodeGetParams,
::cuGraphMemsetNodeSetParams*/
fn cuGraphNodeGetType(
- hNode: cuda_types::CUgraphNode,
- type_: *mut cuda_types::CUgraphNodeType,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ type_: *mut cuda_types::cuda::CUgraphNodeType,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a graph's nodes
Returns a list of \p hGraph's nodes. \p nodes may be NULL, in which case this
@@ -14243,10 +14284,10 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphGetNodes(
- hGraph: cuda_types::CUgraph,
- nodes: *mut cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ nodes: *mut cuda_types::cuda::CUgraphNode,
numNodes: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a graph's root nodes
Returns a list of \p hGraph's root nodes. \p rootNodes may be NULL, in which case this
@@ -14275,10 +14316,10 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphGetRootNodes(
- hGraph: cuda_types::CUgraph,
- rootNodes: *mut cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ rootNodes: *mut cuda_types::cuda::CUgraphNode,
numRootNodes: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a graph's dependency edges
Returns a list of \p hGraph's dependency edges. Edges are returned via corresponding
@@ -14310,11 +14351,11 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphGetEdges(
- hGraph: cuda_types::CUgraph,
- from: *mut cuda_types::CUgraphNode,
- to: *mut cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *mut cuda_types::cuda::CUgraphNode,
+ to: *mut cuda_types::cuda::CUgraphNode,
numEdges: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a graph's dependency edges (12.3+)
Returns a list of \p hGraph's dependency edges. Edges are returned via corresponding
@@ -14352,12 +14393,12 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphGetEdges_v2(
- hGraph: cuda_types::CUgraph,
- from: *mut cuda_types::CUgraphNode,
- to: *mut cuda_types::CUgraphNode,
- edgeData: *mut cuda_types::CUgraphEdgeData,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *mut cuda_types::cuda::CUgraphNode,
+ to: *mut cuda_types::cuda::CUgraphNode,
+ edgeData: *mut cuda_types::cuda::CUgraphEdgeData,
numEdges: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a node's dependencies
Returns a list of \p node's dependencies. \p dependencies may be NULL, in which case this
@@ -14386,10 +14427,10 @@ void **kernelParams;
::cuGraphAddDependencies,
::cuGraphRemoveDependencies*/
fn cuGraphNodeGetDependencies(
- hNode: cuda_types::CUgraphNode,
- dependencies: *mut cuda_types::CUgraphNode,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
numDependencies: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a node's dependencies (12.3+)
Returns a list of \p node's dependencies. \p dependencies may be NULL, in which case this
@@ -14424,11 +14465,11 @@ void **kernelParams;
::cuGraphAddDependencies,
::cuGraphRemoveDependencies*/
fn cuGraphNodeGetDependencies_v2(
- hNode: cuda_types::CUgraphNode,
- dependencies: *mut cuda_types::CUgraphNode,
- edgeData: *mut cuda_types::CUgraphEdgeData,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
+ edgeData: *mut cuda_types::cuda::CUgraphEdgeData,
numDependencies: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a node's dependent nodes
Returns a list of \p node's dependent nodes. \p dependentNodes may be NULL, in which
@@ -14458,10 +14499,10 @@ void **kernelParams;
::cuGraphAddDependencies,
::cuGraphRemoveDependencies*/
fn cuGraphNodeGetDependentNodes(
- hNode: cuda_types::CUgraphNode,
- dependentNodes: *mut cuda_types::CUgraphNode,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dependentNodes: *mut cuda_types::cuda::CUgraphNode,
numDependentNodes: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a node's dependent nodes (12.3+)
Returns a list of \p node's dependent nodes. \p dependentNodes may be NULL, in which
@@ -14497,11 +14538,11 @@ void **kernelParams;
::cuGraphAddDependencies,
::cuGraphRemoveDependencies*/
fn cuGraphNodeGetDependentNodes_v2(
- hNode: cuda_types::CUgraphNode,
- dependentNodes: *mut cuda_types::CUgraphNode,
- edgeData: *mut cuda_types::CUgraphEdgeData,
+ hNode: cuda_types::cuda::CUgraphNode,
+ dependentNodes: *mut cuda_types::cuda::CUgraphNode,
+ edgeData: *mut cuda_types::cuda::CUgraphEdgeData,
numDependentNodes: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds dependency edges to a graph
The number of dependencies to be added is defined by \p numDependencies
@@ -14528,11 +14569,11 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphAddDependencies(
- hGraph: cuda_types::CUgraph,
- from: *const cuda_types::CUgraphNode,
- to: *const cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *const cuda_types::cuda::CUgraphNode,
+ to: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds dependency edges to a graph (12.3+)
The number of dependencies to be added is defined by \p numDependencies
@@ -14560,12 +14601,12 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphAddDependencies_v2(
- hGraph: cuda_types::CUgraph,
- from: *const cuda_types::CUgraphNode,
- to: *const cuda_types::CUgraphNode,
- edgeData: *const cuda_types::CUgraphEdgeData,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *const cuda_types::cuda::CUgraphNode,
+ to: *const cuda_types::cuda::CUgraphNode,
+ edgeData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Removes dependency edges from a graph
The number of \p dependencies to be removed is defined by \p numDependencies.
@@ -14595,11 +14636,11 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphRemoveDependencies(
- hGraph: cuda_types::CUgraph,
- from: *const cuda_types::CUgraphNode,
- to: *const cuda_types::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *const cuda_types::cuda::CUgraphNode,
+ to: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Removes dependency edges from a graph (12.3+)
The number of \p dependencies to be removed is defined by \p numDependencies.
@@ -14633,12 +14674,12 @@ void **kernelParams;
::cuGraphNodeGetDependencies,
::cuGraphNodeGetDependentNodes*/
fn cuGraphRemoveDependencies_v2(
- hGraph: cuda_types::CUgraph,
- from: *const cuda_types::CUgraphNode,
- to: *const cuda_types::CUgraphNode,
- edgeData: *const cuda_types::CUgraphEdgeData,
+ hGraph: cuda_types::cuda::CUgraph,
+ from: *const cuda_types::cuda::CUgraphNode,
+ to: *const cuda_types::cuda::CUgraphNode,
+ edgeData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Remove a node from the graph
Removes \p hNode from its graph. This operation also severs any dependencies of other nodes
@@ -14662,7 +14703,9 @@ void **kernelParams;
::cuGraphAddHostNode,
::cuGraphAddMemcpyNode,
::cuGraphAddMemsetNode*/
- fn cuGraphDestroyNode(hNode: cuda_types::CUgraphNode) -> cuda_types::CUresult;
+ fn cuGraphDestroyNode(
+ hNode: cuda_types::cuda::CUgraphNode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an executable graph from a graph
Instantiates \p hGraph as an executable graph. The graph is validated for any
@@ -14732,10 +14775,10 @@ void **kernelParams;
::cuGraphLaunch,
::cuGraphExecDestroy*/
fn cuGraphInstantiateWithFlags(
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
flags: ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an executable graph from a graph
Instantiates \p hGraph as an executable graph according to the \p instantiateParams structure.
@@ -14836,10 +14879,10 @@ CUgraphInstantiateResult result_out;
::cuGraphInstantiate,
::cuGraphExecDestroy*/
fn cuGraphInstantiateWithParams_ptsz(
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- instantiateParams: *mut cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS,
- ) -> cuda_types::CUresult;
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ instantiateParams: *mut cuda_types::cuda::CUDA_GRAPH_INSTANTIATE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query the instantiation flags of an executable graph
Returns the flags that were passed to instantiation for the given executable graph.
@@ -14859,9 +14902,9 @@ CUgraphInstantiateResult result_out;
::cuGraphInstantiate,
::cuGraphInstantiateWithParams*/
fn cuGraphExecGetFlags(
- hGraphExec: cuda_types::CUgraphExec,
- flags: *mut cuda_types::cuuint64_t,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ flags: *mut cuda_types::cuda::cuuint64_t,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for a kernel node in the given graphExec
Sets the parameters of a kernel node in an executable graph \p hGraphExec.
@@ -14916,10 +14959,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecKernelNodeSetParams_v2(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for a memcpy node in the given graphExec.
Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had
@@ -14964,11 +15007,11 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecMemcpyNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- copyParams: *const cuda_types::CUDA_MEMCPY3D,
- ctx: cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ copyParams: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ ctx: cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for a memset node in the given graphExec.
Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had
@@ -15013,11 +15056,11 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecMemsetNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- memsetParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS,
- ctx: cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ memsetParams: *const cuda_types::cuda::CUDA_MEMSET_NODE_PARAMS,
+ ctx: cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for a host node in the given graphExec.
Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had
@@ -15053,10 +15096,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecHostNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_HOST_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Updates node parameters in the child graph node in the given graphExec.
Updates the work represented by \p hNode in \p hGraphExec as though the nodes contained
@@ -15098,10 +15141,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecChildGraphNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- childGraph: cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ childGraph: cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the event for an event record node in the given graphExec
Sets the event of an event record node in an executable graph \p hGraphExec.
@@ -15140,10 +15183,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecEventRecordNodeSetEvent(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the event for an event wait node in the given graphExec
Sets the event of an event wait node in an executable graph \p hGraphExec.
@@ -15182,10 +15225,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecEventWaitNodeSetEvent(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- event: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ event: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for an external semaphore signal node in the given graphExec
Sets the parameters of an external semaphore signal node in an executable graph \p hGraphExec.
@@ -15227,10 +15270,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecExternalSemaphoresSignalNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the parameters for an external semaphore wait node in the given graphExec
Sets the parameters of an external semaphore wait node in an executable graph \p hGraphExec.
@@ -15272,10 +15315,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecExternalSemaphoresWaitNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_EXT_SEM_WAIT_NODE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Enables or disables the specified node in the given graphExec
Sets \p hNode to be either enabled or disabled. Disabled nodes are functionally equivalent
@@ -15314,10 +15357,10 @@ CUgraphInstantiateResult result_out;
::cuGraphInstantiate
::cuGraphLaunch*/
fn cuGraphNodeSetEnabled(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
isEnabled: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query whether a node in the given graphExec is enabled
Sets isEnabled to 1 if \p hNode is enabled, or 0 if \p hNode is disabled.
@@ -15346,10 +15389,10 @@ CUgraphInstantiateResult result_out;
::cuGraphInstantiate
::cuGraphLaunch*/
fn cuGraphNodeGetEnabled(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
isEnabled: *mut ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Uploads an executable graph in a stream
Uploads \p hGraphExec to the device in \p hStream without executing it. Uploads of
@@ -15373,9 +15416,9 @@ CUgraphInstantiateResult result_out;
::cuGraphLaunch,
::cuGraphExecDestroy*/
fn cuGraphUpload_ptsz(
- hGraphExec: cuda_types::CUgraphExec,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Launches an executable graph in a stream
Executes \p hGraphExec in \p hStream. Only one instance of \p hGraphExec may be executing
@@ -15403,9 +15446,9 @@ CUgraphInstantiateResult result_out;
::cuGraphUpload,
::cuGraphExecDestroy*/
fn cuGraphLaunch_ptsz(
- hGraphExec: cuda_types::CUgraphExec,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys an executable graph
Destroys the executable graph specified by \p hGraphExec, as well
@@ -15427,7 +15470,9 @@ CUgraphInstantiateResult result_out;
::cuGraphInstantiate,
::cuGraphUpload,
::cuGraphLaunch*/
- fn cuGraphExecDestroy(hGraphExec: cuda_types::CUgraphExec) -> cuda_types::CUresult;
+ fn cuGraphExecDestroy(
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a graph
Destroys the graph specified by \p hGraph, as well as all of its nodes.
@@ -15444,7 +15489,7 @@ CUgraphInstantiateResult result_out;
\sa
::cuGraphCreate*/
- fn cuGraphDestroy(hGraph: cuda_types::CUgraph) -> cuda_types::CUresult;
+ fn cuGraphDestroy(hGraph: cuda_types::cuda::CUgraph) -> cuda_types::cuda::CUresult;
/** \brief Check whether an executable graph can be updated with a graph and perform the update if possible
Updates the node parameters in the instantiated graph specified by \p hGraphExec with the
@@ -15532,10 +15577,10 @@ CUgraphInstantiateResult result_out;
\sa
::cuGraphInstantiate*/
fn cuGraphExecUpdate_v2(
- hGraphExec: cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- resultInfo: *mut cuda_types::CUgraphExecUpdateResultInfo,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ resultInfo: *mut cuda_types::cuda::CUgraphExecUpdateResultInfo,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Copies attributes from source node to destination node.
Copies attributes from source node \p src to destination node \p dst.
@@ -15553,9 +15598,9 @@ CUgraphInstantiateResult result_out;
\sa
::CUaccessPolicyWindow*/
fn cuGraphKernelNodeCopyAttributes(
- dst: cuda_types::CUgraphNode,
- src: cuda_types::CUgraphNode,
- ) -> cuda_types::CUresult;
+ dst: cuda_types::cuda::CUgraphNode,
+ src: cuda_types::cuda::CUgraphNode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Queries node attribute.
Queries attribute \p attr from node \p hNode and stores it in corresponding
@@ -15574,10 +15619,10 @@ CUgraphInstantiateResult result_out;
\sa
::CUaccessPolicyWindow*/
fn cuGraphKernelNodeGetAttribute(
- hNode: cuda_types::CUgraphNode,
- attr: cuda_types::CUkernelNodeAttrID,
- value_out: *mut cuda_types::CUkernelNodeAttrValue,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ attr: cuda_types::cuda::CUkernelNodeAttrID,
+ value_out: *mut cuda_types::cuda::CUkernelNodeAttrValue,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets node attribute.
Sets attribute \p attr on node \p hNode from corresponding attribute of
@@ -15596,10 +15641,10 @@ CUgraphInstantiateResult result_out;
\sa
::CUaccessPolicyWindow*/
fn cuGraphKernelNodeSetAttribute(
- hNode: cuda_types::CUgraphNode,
- attr: cuda_types::CUkernelNodeAttrID,
- value: *const cuda_types::CUkernelNodeAttrValue,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ attr: cuda_types::cuda::CUkernelNodeAttrID,
+ value: *const cuda_types::cuda::CUkernelNodeAttrValue,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Write a DOT file describing graph structure
Using the provided \p hGraph, write to \p path a DOT formatted description of the graph.
@@ -15616,10 +15661,10 @@ CUgraphInstantiateResult result_out;
::CUDA_ERROR_INVALID_VALUE,
::CUDA_ERROR_OPERATING_SYSTEM*/
fn cuGraphDebugDotPrint(
- hGraph: cuda_types::CUgraph,
+ hGraph: cuda_types::cuda::CUgraph,
path: *const ::core::ffi::c_char,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a user object
Create a user object with the specified destructor callback and initial reference count. The
@@ -15653,12 +15698,12 @@ CUgraphInstantiateResult result_out;
::cuGraphReleaseUserObject,
::cuGraphCreate*/
fn cuUserObjectCreate(
- object_out: *mut cuda_types::CUuserObject,
+ object_out: *mut cuda_types::cuda::CUuserObject,
ptr: *mut ::core::ffi::c_void,
- destroy: cuda_types::CUhostFn,
+ destroy: cuda_types::cuda::CUhostFn,
initialRefcount: ::core::ffi::c_uint,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Retain a reference to a user object
Retains new references to a user object. The new references are owned by the caller.
@@ -15680,9 +15725,9 @@ CUgraphInstantiateResult result_out;
::cuGraphReleaseUserObject,
::cuGraphCreate*/
fn cuUserObjectRetain(
- object: cuda_types::CUuserObject,
+ object: cuda_types::cuda::CUuserObject,
count: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Release a reference to a user object
Releases user object references owned by the caller. The object's destructor is invoked if
@@ -15708,9 +15753,9 @@ CUgraphInstantiateResult result_out;
::cuGraphReleaseUserObject,
::cuGraphCreate*/
fn cuUserObjectRelease(
- object: cuda_types::CUuserObject,
+ object: cuda_types::cuda::CUuserObject,
count: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Retain a reference to a user object from a graph
Creates or moves user object references that will be owned by a CUDA graph.
@@ -15736,11 +15781,11 @@ CUgraphInstantiateResult result_out;
::cuGraphReleaseUserObject,
::cuGraphCreate*/
fn cuGraphRetainUserObject(
- graph: cuda_types::CUgraph,
- object: cuda_types::CUuserObject,
+ graph: cuda_types::cuda::CUgraph,
+ object: cuda_types::cuda::CUuserObject,
count: ::core::ffi::c_uint,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Release a user object reference from a graph
Releases user object references owned by a graph.
@@ -15763,10 +15808,10 @@ CUgraphInstantiateResult result_out;
::cuGraphRetainUserObject,
::cuGraphCreate*/
fn cuGraphReleaseUserObject(
- graph: cuda_types::CUgraph,
- object: cuda_types::CUuserObject,
+ graph: cuda_types::cuda::CUgraph,
+ object: cuda_types::cuda::CUuserObject,
count: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds a node of arbitrary type to a graph
Creates a new node in \p hGraph described by \p nodeParams with \p numDependencies
@@ -15804,12 +15849,12 @@ CUgraphInstantiateResult result_out;
::cuGraphNodeSetParams,
::cuGraphExecNodeSetParams*/
fn cuGraphAddNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *mut cuda_types::CUgraphNodeParams,
- ) -> cuda_types::CUresult;
+ nodeParams: *mut cuda_types::cuda::CUgraphNodeParams,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Adds a node of arbitrary type to a graph (12.3+)
Creates a new node in \p hGraph described by \p nodeParams with \p numDependencies
@@ -15849,13 +15894,13 @@ CUgraphInstantiateResult result_out;
::cuGraphNodeSetParams,
::cuGraphExecNodeSetParams*/
fn cuGraphAddNode_v2(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- nodeParams: *mut cuda_types::CUgraphNodeParams,
- ) -> cuda_types::CUresult;
+ nodeParams: *mut cuda_types::cuda::CUgraphNodeParams,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Update's a graph node's parameters
Sets the parameters of graph node \p hNode to \p nodeParams. The node type specified by
@@ -15879,9 +15924,9 @@ CUgraphInstantiateResult result_out;
::cuGraphAddNode,
::cuGraphExecNodeSetParams*/
fn cuGraphNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUgraphNodeParams,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUgraphNodeParams,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Update's a graph node's parameters in an instantiated graph
Sets the parameters of a node in an executable graph \p hGraphExec. The node is identified
@@ -15926,10 +15971,10 @@ CUgraphInstantiateResult result_out;
::cuGraphExecUpdate,
::cuGraphInstantiate*/
fn cuGraphExecNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUgraphNodeParams,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUgraphNodeParams,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a conditional handle
Creates a conditional handle associated with \p hGraph.
@@ -15956,12 +16001,12 @@ CUgraphInstantiateResult result_out;
\sa
::cuGraphAddNode*/
fn cuGraphConditionalHandleCreate(
- pHandle_out: *mut cuda_types::CUgraphConditionalHandle,
- hGraph: cuda_types::CUgraph,
- ctx: cuda_types::CUcontext,
+ pHandle_out: *mut cuda_types::cuda::CUgraphConditionalHandle,
+ hGraph: cuda_types::cuda::CUgraph,
+ ctx: cuda_types::cuda::CUcontext,
defaultLaunchValue: ::core::ffi::c_uint,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns occupancy of a function
Returns in \p *numBlocks the number of the maximum active blocks per
@@ -15985,10 +16030,10 @@ CUgraphInstantiateResult result_out;
::cudaOccupancyMaxActiveBlocksPerMultiprocessor*/
fn cuOccupancyMaxActiveBlocksPerMultiprocessor(
numBlocks: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
+ func: cuda_types::cuda::CUfunction,
blockSize: ::core::ffi::c_int,
dynamicSMemSize: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns occupancy of a function
Returns in \p *numBlocks the number of the maximum active blocks per
@@ -16029,11 +16074,11 @@ CUgraphInstantiateResult result_out;
::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags*/
fn cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
numBlocks: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
+ func: cuda_types::cuda::CUfunction,
blockSize: ::core::ffi::c_int,
dynamicSMemSize: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Suggest a launch configuration with reasonable occupancy
Returns in \p *blockSize a reasonable block size that can achieve
@@ -16085,11 +16130,11 @@ CUgraphInstantiateResult result_out;
fn cuOccupancyMaxPotentialBlockSize(
minGridSize: *mut ::core::ffi::c_int,
blockSize: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
- blockSizeToDynamicSMemSize: cuda_types::CUoccupancyB2DSize,
+ func: cuda_types::cuda::CUfunction,
+ blockSizeToDynamicSMemSize: cuda_types::cuda::CUoccupancyB2DSize,
dynamicSMemSize: usize,
blockSizeLimit: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Suggest a launch configuration with reasonable occupancy
An extended version of ::cuOccupancyMaxPotentialBlockSize. In
@@ -16135,12 +16180,12 @@ CUgraphInstantiateResult result_out;
fn cuOccupancyMaxPotentialBlockSizeWithFlags(
minGridSize: *mut ::core::ffi::c_int,
blockSize: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
- blockSizeToDynamicSMemSize: cuda_types::CUoccupancyB2DSize,
+ func: cuda_types::cuda::CUfunction,
+ blockSizeToDynamicSMemSize: cuda_types::cuda::CUoccupancyB2DSize,
dynamicSMemSize: usize,
blockSizeLimit: ::core::ffi::c_int,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM
Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM.
@@ -16160,10 +16205,10 @@ CUgraphInstantiateResult result_out;
\notefnerr*/
fn cuOccupancyAvailableDynamicSMemPerBlock(
dynamicSmemSize: *mut usize,
- func: cuda_types::CUfunction,
+ func: cuda_types::cuda::CUfunction,
numBlocks: ::core::ffi::c_int,
blockSize: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Given the kernel function (\p func) and launch configuration
(\p config), return the maximum cluster size in \p *clusterSize.
@@ -16197,9 +16242,9 @@ CUgraphInstantiateResult result_out;
::cuFuncGetAttribute*/
fn cuOccupancyMaxPotentialClusterSize(
clusterSize: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
- config: *const cuda_types::CUlaunchConfig,
- ) -> cuda_types::CUresult;
+ func: cuda_types::cuda::CUfunction,
+ config: *const cuda_types::cuda::CUlaunchConfig,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Given the kernel function (\p func) and launch configuration
(\p config), return the maximum number of clusters that could co-exist
on the target device in \p *numClusters.
@@ -16235,9 +16280,9 @@ CUgraphInstantiateResult result_out;
::cuFuncGetAttribute*/
fn cuOccupancyMaxActiveClusters(
numClusters: *mut ::core::ffi::c_int,
- func: cuda_types::CUfunction,
- config: *const cuda_types::CUlaunchConfig,
- ) -> cuda_types::CUresult;
+ func: cuda_types::cuda::CUfunction,
+ config: *const cuda_types::cuda::CUlaunchConfig,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Binds an array as a texture reference
\deprecated
@@ -16266,10 +16311,10 @@ CUgraphInstantiateResult result_out;
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetArray(
- hTexRef: cuda_types::CUtexref,
- hArray: cuda_types::CUarray,
+ hTexRef: cuda_types::cuda::CUtexref,
+ hArray: cuda_types::cuda::CUarray,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Binds a mipmapped array to a texture reference
\deprecated
@@ -16297,10 +16342,10 @@ CUgraphInstantiateResult result_out;
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetMipmappedArray(
- hTexRef: cuda_types::CUtexref,
- hMipmappedArray: cuda_types::CUmipmappedArray,
+ hTexRef: cuda_types::cuda::CUtexref,
+ hMipmappedArray: cuda_types::cuda::CUmipmappedArray,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Binds an address as a texture reference
\deprecated
@@ -16345,10 +16390,10 @@ CUgraphInstantiateResult result_out;
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetAddress_v2(
ByteOffset: *mut usize,
- hTexRef: cuda_types::CUtexref,
- dptr: cuda_types::CUdeviceptr,
+ hTexRef: cuda_types::cuda::CUtexref,
+ dptr: cuda_types::cuda::CUdeviceptr,
bytes: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Binds an address as a 2D texture reference
\deprecated
@@ -16401,11 +16446,11 @@ CUgraphInstantiateResult result_out;
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetAddress2D_v3(
- hTexRef: cuda_types::CUtexref,
- desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR,
- dptr: cuda_types::CUdeviceptr,
+ hTexRef: cuda_types::cuda::CUtexref,
+ desc: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR,
+ dptr: cuda_types::cuda::CUdeviceptr,
Pitch: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the format for a texture reference
\deprecated
@@ -16435,10 +16480,10 @@ CUgraphInstantiateResult result_out;
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
::cudaCreateChannelDesc*/
fn cuTexRefSetFormat(
- hTexRef: cuda_types::CUtexref,
- fmt: cuda_types::CUarray_format,
+ hTexRef: cuda_types::cuda::CUtexref,
+ fmt: cuda_types::cuda::CUarray_format,
NumPackedComponents: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the addressing mode for a texture reference
\deprecated
@@ -16479,10 +16524,10 @@ CU_TR_ADDRESS_MODE_BORDER = 3
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetAddressMode(
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
dim: ::core::ffi::c_int,
- am: cuda_types::CUaddress_mode,
- ) -> cuda_types::CUresult;
+ am: cuda_types::cuda::CUaddress_mode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the filtering mode for a texture reference
\deprecated
@@ -16516,9 +16561,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetFilterMode(
- hTexRef: cuda_types::CUtexref,
- fm: cuda_types::CUfilter_mode,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ fm: cuda_types::cuda::CUfilter_mode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the mipmap filtering mode for a texture reference
\deprecated
@@ -16552,9 +16597,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetMipmapFilterMode(
- hTexRef: cuda_types::CUtexref,
- fm: cuda_types::CUfilter_mode,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ fm: cuda_types::cuda::CUfilter_mode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the mipmap level bias for a texture reference
\deprecated
@@ -16581,9 +16626,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetMipmapLevelBias(
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
bias: f32,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the mipmap min/max mipmap level clamps for a texture reference
\deprecated
@@ -16612,10 +16657,10 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetMipmapLevelClamp(
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
minMipmapLevelClamp: f32,
maxMipmapLevelClamp: f32,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the maximum anisotropy for a texture reference
\deprecated
@@ -16642,9 +16687,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetMaxAnisotropy(
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
maxAniso: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the border color for a texture reference
\deprecated
@@ -16675,9 +16720,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefSetAddressMode,
::cuTexRefGetAddressMode, ::cuTexRefGetBorderColor*/
fn cuTexRefSetBorderColor(
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
pBorderColor: *mut f32,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the flags for a texture reference
\deprecated
@@ -16717,9 +16762,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefSetFlags(
- hTexRef: cuda_types::CUtexref,
+ hTexRef: cuda_types::cuda::CUtexref,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the address associated with a texture reference
\deprecated
@@ -16744,9 +16789,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetAddress_v2(
- pdptr: *mut cuda_types::CUdeviceptr,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ pdptr: *mut cuda_types::cuda::CUdeviceptr,
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the array bound to a texture reference
\deprecated
@@ -16771,9 +16816,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetArray(
- phArray: *mut cuda_types::CUarray,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ phArray: *mut cuda_types::cuda::CUarray,
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the mipmapped array bound to a texture reference
\deprecated
@@ -16798,9 +16843,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetMipmappedArray(
- phMipmappedArray: *mut cuda_types::CUmipmappedArray,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ phMipmappedArray: *mut cuda_types::cuda::CUmipmappedArray,
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the addressing mode used by a texture reference
\deprecated
@@ -16826,10 +16871,10 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetAddressMode(
- pam: *mut cuda_types::CUaddress_mode,
- hTexRef: cuda_types::CUtexref,
+ pam: *mut cuda_types::cuda::CUaddress_mode,
+ hTexRef: cuda_types::cuda::CUtexref,
dim: ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the filter-mode used by a texture reference
\deprecated
@@ -16853,9 +16898,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetFilterMode(
- pfm: *mut cuda_types::CUfilter_mode,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ pfm: *mut cuda_types::cuda::CUfilter_mode,
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the format used by a texture reference
\deprecated
@@ -16881,10 +16926,10 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags*/
fn cuTexRefGetFormat(
- pFormat: *mut cuda_types::CUarray_format,
+ pFormat: *mut cuda_types::cuda::CUarray_format,
pNumChannels: *mut ::core::ffi::c_int,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the mipmap filtering mode for a texture reference
\deprecated
@@ -16908,9 +16953,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetMipmapFilterMode(
- pfm: *mut cuda_types::CUfilter_mode,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ pfm: *mut cuda_types::cuda::CUfilter_mode,
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the mipmap level bias for a texture reference
\deprecated
@@ -16935,8 +16980,8 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetMipmapLevelBias(
pbias: *mut f32,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the min/max mipmap level clamps for a texture reference
\deprecated
@@ -16963,8 +17008,8 @@ CU_TR_FILTER_MODE_LINEAR = 1
fn cuTexRefGetMipmapLevelClamp(
pminMipmapLevelClamp: *mut f32,
pmaxMipmapLevelClamp: *mut f32,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the maximum anisotropy for a texture reference
\deprecated
@@ -16989,8 +17034,8 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/
fn cuTexRefGetMaxAnisotropy(
pmaxAniso: *mut ::core::ffi::c_int,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the border color used by a texture reference
\deprecated
@@ -17018,8 +17063,8 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefSetAddressMode, ::cuTexRefSetBorderColor*/
fn cuTexRefGetBorderColor(
pBorderColor: *mut f32,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the flags used by a texture reference
\deprecated
@@ -17043,8 +17088,8 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuTexRefGetFilterMode, ::cuTexRefGetFormat*/
fn cuTexRefGetFlags(
pFlags: *mut ::core::ffi::c_uint,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a texture reference
\deprecated
@@ -17066,7 +17111,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::CUDA_ERROR_INVALID_VALUE
\sa ::cuTexRefDestroy*/
- fn cuTexRefCreate(pTexRef: *mut cuda_types::CUtexref) -> cuda_types::CUresult;
+ fn cuTexRefCreate(
+ pTexRef: *mut cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a texture reference
\deprecated
@@ -17083,7 +17130,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
::CUDA_ERROR_INVALID_VALUE
\sa ::cuTexRefCreate*/
- fn cuTexRefDestroy(hTexRef: cuda_types::CUtexref) -> cuda_types::CUresult;
+ fn cuTexRefDestroy(
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Sets the CUDA array for a surface reference.
\deprecated
@@ -17109,10 +17158,10 @@ CU_TR_FILTER_MODE_LINEAR = 1
::cuModuleGetSurfRef,
::cuSurfRefGetArray*/
fn cuSurfRefSetArray(
- hSurfRef: cuda_types::CUsurfref,
- hArray: cuda_types::CUarray,
+ hSurfRef: cuda_types::cuda::CUsurfref,
+ hArray: cuda_types::cuda::CUarray,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Passes back the CUDA array bound to a surface reference.
\deprecated
@@ -17133,9 +17182,9 @@ CU_TR_FILTER_MODE_LINEAR = 1
\sa ::cuModuleGetSurfRef, ::cuSurfRefSetArray*/
fn cuSurfRefGetArray(
- phArray: *mut cuda_types::CUarray,
- hSurfRef: cuda_types::CUsurfref,
- ) -> cuda_types::CUresult;
+ phArray: *mut cuda_types::cuda::CUarray,
+ hSurfRef: cuda_types::cuda::CUsurfref,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a texture object
Creates a texture object and returns it in \p pTexObject. \p pResDesc describes
@@ -17358,11 +17407,11 @@ unsigned int lastLayer;
::cuTexObjectDestroy,
::cudaCreateTextureObject*/
fn cuTexObjectCreate(
- pTexObject: *mut cuda_types::CUtexObject,
- pResDesc: *const cuda_types::CUDA_RESOURCE_DESC,
- pTexDesc: *const cuda_types::CUDA_TEXTURE_DESC,
- pResViewDesc: *const cuda_types::CUDA_RESOURCE_VIEW_DESC,
- ) -> cuda_types::CUresult;
+ pTexObject: *mut cuda_types::cuda::CUtexObject,
+ pResDesc: *const cuda_types::cuda::CUDA_RESOURCE_DESC,
+ pTexDesc: *const cuda_types::cuda::CUDA_TEXTURE_DESC,
+ pResViewDesc: *const cuda_types::cuda::CUDA_RESOURCE_VIEW_DESC,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a texture object
Destroys the texture object specified by \p texObject.
@@ -17379,7 +17428,9 @@ unsigned int lastLayer;
\sa
::cuTexObjectCreate,
::cudaDestroyTextureObject*/
- fn cuTexObjectDestroy(texObject: cuda_types::CUtexObject) -> cuda_types::CUresult;
+ fn cuTexObjectDestroy(
+ texObject: cuda_types::cuda::CUtexObject,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a texture object's resource descriptor
Returns the resource descriptor for the texture object specified by \p texObject.
@@ -17398,9 +17449,9 @@ unsigned int lastLayer;
::cuTexObjectCreate,
::cudaGetTextureObjectResourceDesc,*/
fn cuTexObjectGetResourceDesc(
- pResDesc: *mut cuda_types::CUDA_RESOURCE_DESC,
- texObject: cuda_types::CUtexObject,
- ) -> cuda_types::CUresult;
+ pResDesc: *mut cuda_types::cuda::CUDA_RESOURCE_DESC,
+ texObject: cuda_types::cuda::CUtexObject,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a texture object's texture descriptor
Returns the texture descriptor for the texture object specified by \p texObject.
@@ -17419,9 +17470,9 @@ unsigned int lastLayer;
::cuTexObjectCreate,
::cudaGetTextureObjectTextureDesc*/
fn cuTexObjectGetTextureDesc(
- pTexDesc: *mut cuda_types::CUDA_TEXTURE_DESC,
- texObject: cuda_types::CUtexObject,
- ) -> cuda_types::CUresult;
+ pTexDesc: *mut cuda_types::cuda::CUDA_TEXTURE_DESC,
+ texObject: cuda_types::cuda::CUtexObject,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a texture object's resource view descriptor
Returns the resource view descriptor for the texture object specified by \p texObject.
@@ -17441,9 +17492,9 @@ unsigned int lastLayer;
::cuTexObjectCreate,
::cudaGetTextureObjectResourceViewDesc*/
fn cuTexObjectGetResourceViewDesc(
- pResViewDesc: *mut cuda_types::CUDA_RESOURCE_VIEW_DESC,
- texObject: cuda_types::CUtexObject,
- ) -> cuda_types::CUresult;
+ pResViewDesc: *mut cuda_types::cuda::CUDA_RESOURCE_VIEW_DESC,
+ texObject: cuda_types::cuda::CUtexObject,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a surface object
Creates a surface object and returns it in \p pSurfObject. \p pResDesc describes
@@ -17469,9 +17520,9 @@ unsigned int lastLayer;
::cuSurfObjectDestroy,
::cudaCreateSurfaceObject*/
fn cuSurfObjectCreate(
- pSurfObject: *mut cuda_types::CUsurfObject,
- pResDesc: *const cuda_types::CUDA_RESOURCE_DESC,
- ) -> cuda_types::CUresult;
+ pSurfObject: *mut cuda_types::cuda::CUsurfObject,
+ pResDesc: *const cuda_types::cuda::CUDA_RESOURCE_DESC,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a surface object
Destroys the surface object specified by \p surfObject.
@@ -17488,7 +17539,9 @@ unsigned int lastLayer;
\sa
::cuSurfObjectCreate,
::cudaDestroySurfaceObject*/
- fn cuSurfObjectDestroy(surfObject: cuda_types::CUsurfObject) -> cuda_types::CUresult;
+ fn cuSurfObjectDestroy(
+ surfObject: cuda_types::cuda::CUsurfObject,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns a surface object's resource descriptor
Returns the resource descriptor for the surface object specified by \p surfObject.
@@ -17507,9 +17560,9 @@ unsigned int lastLayer;
::cuSurfObjectCreate,
::cudaGetSurfaceObjectResourceDesc*/
fn cuSurfObjectGetResourceDesc(
- pResDesc: *mut cuda_types::CUDA_RESOURCE_DESC,
- surfObject: cuda_types::CUsurfObject,
- ) -> cuda_types::CUresult;
+ pResDesc: *mut cuda_types::cuda::CUDA_RESOURCE_DESC,
+ surfObject: cuda_types::cuda::CUsurfObject,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a tensor map descriptor object representing tiled memory region
Creates a descriptor for Tensor Memory Access (TMA) object specified
@@ -17649,19 +17702,19 @@ CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA
::cuTensorMapEncodeIm2col,
::cuTensorMapReplaceAddress*/
fn cuTensorMapEncodeTiled(
- tensorMap: *mut cuda_types::CUtensorMap,
- tensorDataType: cuda_types::CUtensorMapDataType,
- tensorRank: cuda_types::cuuint32_t,
+ tensorMap: *mut cuda_types::cuda::CUtensorMap,
+ tensorDataType: cuda_types::cuda::CUtensorMapDataType,
+ tensorRank: cuda_types::cuda::cuuint32_t,
globalAddress: *mut ::core::ffi::c_void,
- globalDim: *const cuda_types::cuuint64_t,
- globalStrides: *const cuda_types::cuuint64_t,
- boxDim: *const cuda_types::cuuint32_t,
- elementStrides: *const cuda_types::cuuint32_t,
- interleave: cuda_types::CUtensorMapInterleave,
- swizzle: cuda_types::CUtensorMapSwizzle,
- l2Promotion: cuda_types::CUtensorMapL2promotion,
- oobFill: cuda_types::CUtensorMapFloatOOBfill,
- ) -> cuda_types::CUresult;
+ globalDim: *const cuda_types::cuda::cuuint64_t,
+ globalStrides: *const cuda_types::cuda::cuuint64_t,
+ boxDim: *const cuda_types::cuda::cuuint32_t,
+ elementStrides: *const cuda_types::cuda::cuuint32_t,
+ interleave: cuda_types::cuda::CUtensorMapInterleave,
+ swizzle: cuda_types::cuda::CUtensorMapSwizzle,
+ l2Promotion: cuda_types::cuda::CUtensorMapL2promotion,
+ oobFill: cuda_types::cuda::CUtensorMapFloatOOBfill,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a tensor map descriptor object representing im2col memory region
Creates a descriptor for Tensor Memory Access (TMA) object specified
@@ -17816,22 +17869,22 @@ CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA
::cuTensorMapEncodeTiled,
::cuTensorMapReplaceAddress*/
fn cuTensorMapEncodeIm2col(
- tensorMap: *mut cuda_types::CUtensorMap,
- tensorDataType: cuda_types::CUtensorMapDataType,
- tensorRank: cuda_types::cuuint32_t,
+ tensorMap: *mut cuda_types::cuda::CUtensorMap,
+ tensorDataType: cuda_types::cuda::CUtensorMapDataType,
+ tensorRank: cuda_types::cuda::cuuint32_t,
globalAddress: *mut ::core::ffi::c_void,
- globalDim: *const cuda_types::cuuint64_t,
- globalStrides: *const cuda_types::cuuint64_t,
+ globalDim: *const cuda_types::cuda::cuuint64_t,
+ globalStrides: *const cuda_types::cuda::cuuint64_t,
pixelBoxLowerCorner: *const ::core::ffi::c_int,
pixelBoxUpperCorner: *const ::core::ffi::c_int,
- channelsPerPixel: cuda_types::cuuint32_t,
- pixelsPerColumn: cuda_types::cuuint32_t,
- elementStrides: *const cuda_types::cuuint32_t,
- interleave: cuda_types::CUtensorMapInterleave,
- swizzle: cuda_types::CUtensorMapSwizzle,
- l2Promotion: cuda_types::CUtensorMapL2promotion,
- oobFill: cuda_types::CUtensorMapFloatOOBfill,
- ) -> cuda_types::CUresult;
+ channelsPerPixel: cuda_types::cuda::cuuint32_t,
+ pixelsPerColumn: cuda_types::cuda::cuuint32_t,
+ elementStrides: *const cuda_types::cuda::cuuint32_t,
+ interleave: cuda_types::cuda::CUtensorMapInterleave,
+ swizzle: cuda_types::cuda::CUtensorMapSwizzle,
+ l2Promotion: cuda_types::cuda::CUtensorMapL2promotion,
+ oobFill: cuda_types::cuda::CUtensorMapFloatOOBfill,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Modify an existing tensor map descriptor with an updated global address
Modifies the descriptor for Tensor Memory Access (TMA) object passed in \p tensorMap with
@@ -17855,9 +17908,9 @@ CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA
::cuTensorMapEncodeTiled,
::cuTensorMapEncodeIm2col*/
fn cuTensorMapReplaceAddress(
- tensorMap: *mut cuda_types::CUtensorMap,
+ tensorMap: *mut cuda_types::cuda::CUtensorMap,
globalAddress: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Queries if a device may directly access a peer device's memory.
Returns in \p *canAccessPeer a value of 1 if contexts on \p dev are capable of
@@ -17884,9 +17937,9 @@ CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA
::cudaDeviceCanAccessPeer*/
fn cuDeviceCanAccessPeer(
canAccessPeer: *mut ::core::ffi::c_int,
- dev: cuda_types::CUdevice,
- peerDev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ peerDev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Enables direct access to memory allocations in a peer context.
If both the current context and \p peerContext are on devices which support unified
@@ -17937,9 +17990,9 @@ CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA
::cuCtxDisablePeerAccess,
::cudaDeviceEnablePeerAccess*/
fn cuCtxEnablePeerAccess(
- peerContext: cuda_types::CUcontext,
+ peerContext: cuda_types::cuda::CUcontext,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Disables direct access to memory allocations in a peer context and
unregisters any registered allocations.
@@ -17964,8 +18017,8 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCtxEnablePeerAccess,
::cudaDeviceDisablePeerAccess*/
fn cuCtxDisablePeerAccess(
- peerContext: cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ peerContext: cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Queries attributes of the link between two devices.
Returns in \p *value the value of the requested attribute \p attrib of the
@@ -18004,10 +18057,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cudaDeviceGetP2PAttribute*/
fn cuDeviceGetP2PAttribute(
value: *mut ::core::ffi::c_int,
- attrib: cuda_types::CUdevice_P2PAttribute,
- srcDevice: cuda_types::CUdevice,
- dstDevice: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ attrib: cuda_types::cuda::CUdevice_P2PAttribute,
+ srcDevice: cuda_types::cuda::CUdevice,
+ dstDevice: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unregisters a graphics resource for access by CUDA
Unregisters the graphics resource \p resource so it is not accessible by
@@ -18035,8 +18088,8 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsGLRegisterImage,
::cudaGraphicsUnregisterResource*/
fn cuGraphicsUnregisterResource(
- resource: cuda_types::CUgraphicsResource,
- ) -> cuda_types::CUresult;
+ resource: cuda_types::cuda::CUgraphicsResource,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get an array through which to access a subresource of a mapped graphics resource.
Returns in \p *pArray an array through which the subresource of the mapped
@@ -18074,11 +18127,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsResourceGetMappedPointer,
::cudaGraphicsSubResourceGetMappedArray*/
fn cuGraphicsSubResourceGetMappedArray(
- pArray: *mut cuda_types::CUarray,
- resource: cuda_types::CUgraphicsResource,
+ pArray: *mut cuda_types::cuda::CUarray,
+ resource: cuda_types::cuda::CUgraphicsResource,
arrayIndex: ::core::ffi::c_uint,
mipLevel: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get a mipmapped array through which to access a mapped graphics resource.
Returns in \p *pMipmappedArray a mipmapped array through which the mapped graphics
@@ -18107,9 +18160,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsResourceGetMappedPointer,
::cudaGraphicsResourceGetMappedMipmappedArray*/
fn cuGraphicsResourceGetMappedMipmappedArray(
- pMipmappedArray: *mut cuda_types::CUmipmappedArray,
- resource: cuda_types::CUgraphicsResource,
- ) -> cuda_types::CUresult;
+ pMipmappedArray: *mut cuda_types::cuda::CUmipmappedArray,
+ resource: cuda_types::cuda::CUgraphicsResource,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get a device pointer through which to access a mapped graphics resource.
Returns in \p *pDevPtr a pointer through which the mapped graphics resource
@@ -18141,10 +18194,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsSubResourceGetMappedArray,
::cudaGraphicsResourceGetMappedPointer*/
fn cuGraphicsResourceGetMappedPointer_v2(
- pDevPtr: *mut cuda_types::CUdeviceptr,
+ pDevPtr: *mut cuda_types::cuda::CUdeviceptr,
pSize: *mut usize,
- resource: cuda_types::CUgraphicsResource,
- ) -> cuda_types::CUresult;
+ resource: cuda_types::cuda::CUgraphicsResource,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Set usage flags for mapping a graphics resource
Set \p flags for mapping the graphics resource \p resource.
@@ -18183,9 +18236,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsMapResources,
::cudaGraphicsResourceSetMapFlags*/
fn cuGraphicsResourceSetMapFlags_v2(
- resource: cuda_types::CUgraphicsResource,
+ resource: cuda_types::cuda::CUgraphicsResource,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Map graphics resources for access by CUDA
Maps the \p count graphics resources in \p resources for access by CUDA.
@@ -18224,9 +18277,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cudaGraphicsMapResources*/
fn cuGraphicsMapResources_ptsz(
count: ::core::ffi::c_uint,
- resources: *mut cuda_types::CUgraphicsResource,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ resources: *mut cuda_types::cuda::CUgraphicsResource,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unmap graphics resources.
Unmaps the \p count graphics resources in \p resources.
@@ -18262,9 +18315,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cudaGraphicsUnmapResources*/
fn cuGraphicsUnmapResources_ptsz(
count: ::core::ffi::c_uint,
- resources: *mut cuda_types::CUgraphicsResource,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ resources: *mut cuda_types::cuda::CUgraphicsResource,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Returns the requested driver API function pointer
Returns in \p **pfn the address of the CUDA driver function for the requested
@@ -18328,9 +18381,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
symbol: *const ::core::ffi::c_char,
pfn: *mut *mut ::core::ffi::c_void,
cudaVersion: ::core::ffi::c_int,
- flags: cuda_types::cuuint64_t,
- symbolStatus: *mut cuda_types::CUdriverProcAddressQueryResult,
- ) -> cuda_types::CUresult;
+ flags: cuda_types::cuda::cuuint64_t,
+ symbolStatus: *mut cuda_types::cuda::CUdriverProcAddressQueryResult,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allows caller to fetch a coredump attribute value for the current context
Returns in \p *value the requested value specified by \p attrib. It is up to the caller
@@ -18380,10 +18433,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCoredumpSetAttribute,
::cuCoredumpSetAttributeGlobal*/
fn cuCoredumpGetAttribute(
- attrib: cuda_types::CUcoredumpSettings,
+ attrib: cuda_types::cuda::CUcoredumpSettings,
value: *mut ::core::ffi::c_void,
size: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allows caller to fetch a coredump attribute value for the entire application
Returns in \p *value the requested value specified by \p attrib. It is up to the caller
@@ -18426,10 +18479,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCoredumpSetAttribute,
::cuCoredumpSetAttributeGlobal*/
fn cuCoredumpGetAttributeGlobal(
- attrib: cuda_types::CUcoredumpSettings,
+ attrib: cuda_types::cuda::CUcoredumpSettings,
value: *mut ::core::ffi::c_void,
size: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allows caller to set a coredump attribute value for the current context
This function should be considered an alternate interface to the CUDA-GDB environment
@@ -18485,10 +18538,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCoredumpGetAttribute,
::cuCoredumpSetAttributeGlobal*/
fn cuCoredumpSetAttribute(
- attrib: cuda_types::CUcoredumpSettings,
+ attrib: cuda_types::cuda::CUcoredumpSettings,
value: *mut ::core::ffi::c_void,
size: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Allows caller to set a coredump attribute value globally
This function should be considered an alternate interface to the CUDA-GDB environment
@@ -18541,15 +18594,15 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCoredumpGetAttributeGlobal,
::cuCoredumpSetAttribute*/
fn cuCoredumpSetAttributeGlobal(
- attrib: cuda_types::CUcoredumpSettings,
+ attrib: cuda_types::cuda::CUcoredumpSettings,
value: *mut ::core::ffi::c_void,
size: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/// @}
fn cuGetExportTable(
ppExportTable: *mut *const ::core::ffi::c_void,
- pExportTableId: *const cuda_types::CUuuid,
- ) -> cuda_types::CUresult;
+ pExportTableId: *const cuda_types::cuda::CUuuid,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates a green context with a specified set of resources.
This API creates a green context with the resources specified in the descriptor \p desc and
@@ -18593,11 +18646,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCtxCreate,
::cuCtxCreate_v3*/
fn cuGreenCtxCreate(
- phCtx: *mut cuda_types::CUgreenCtx,
- desc: cuda_types::CUdevResourceDesc,
- dev: cuda_types::CUdevice,
+ phCtx: *mut cuda_types::cuda::CUgreenCtx,
+ desc: cuda_types::cuda::CUdevResourceDesc,
+ dev: cuda_types::cuda::CUdevice,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Destroys a green context
Destroys the green context, releasing the primary context of the device that this green context was created for.
@@ -18615,7 +18668,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa
::cuGreenCtxCreate,
::cuCtxDestroy*/
- fn cuGreenCtxDestroy(hCtx: cuda_types::CUgreenCtx) -> cuda_types::CUresult;
+ fn cuGreenCtxDestroy(
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Converts a green context into the primary context
The API converts a green context into the primary context returned in \p pContext. It is important
@@ -18640,9 +18695,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa
::cuGreenCtxCreate*/
fn cuCtxFromGreenCtx(
- pContext: *mut cuda_types::CUcontext,
- hCtx: cuda_types::CUgreenCtx,
- ) -> cuda_types::CUresult;
+ pContext: *mut cuda_types::cuda::CUcontext,
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get device resources
Get the \p type resources available to the \p device.
@@ -18665,10 +18720,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa
::cuDevResourceGenerateDesc*/
fn cuDeviceGetDevResource(
- device: cuda_types::CUdevice,
- resource: *mut cuda_types::CUdevResource,
- type_: cuda_types::CUdevResourceType,
- ) -> cuda_types::CUresult;
+ device: cuda_types::cuda::CUdevice,
+ resource: *mut cuda_types::cuda::CUdevResource,
+ type_: cuda_types::cuda::CUdevResourceType,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get context resources
Get the \p type resources available to the context represented by \p hCtx
@@ -18690,10 +18745,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa
::cuDevResourceGenerateDesc*/
fn cuCtxGetDevResource(
- hCtx: cuda_types::CUcontext,
- resource: *mut cuda_types::CUdevResource,
- type_: cuda_types::CUdevResourceType,
- ) -> cuda_types::CUresult;
+ hCtx: cuda_types::cuda::CUcontext,
+ resource: *mut cuda_types::cuda::CUdevResource,
+ type_: cuda_types::cuda::CUdevResourceType,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get green context resources
Get the \p type resources available to the green context represented by \p hCtx
@@ -18712,10 +18767,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa
::cuDevResourceGenerateDesc*/
fn cuGreenCtxGetDevResource(
- hCtx: cuda_types::CUgreenCtx,
- resource: *mut cuda_types::CUdevResource,
- type_: cuda_types::CUdevResourceType,
- ) -> cuda_types::CUresult;
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ resource: *mut cuda_types::cuda::CUdevResource,
+ type_: cuda_types::cuda::CUdevResourceType,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Splits \p CU_DEV_RESOURCE_TYPE_SM resources.
Splits \p CU_DEV_RESOURCE_TYPE_SM resources into \p nbGroups, adhering to the minimum SM count specified in \p minCount
@@ -18768,13 +18823,13 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuCtxGetDevResource,
::cuDeviceGetDevResource*/
fn cuDevSmResourceSplitByCount(
- result: *mut cuda_types::CUdevResource,
+ result: *mut cuda_types::cuda::CUdevResource,
nbGroups: *mut ::core::ffi::c_uint,
- input: *const cuda_types::CUdevResource,
- remaining: *mut cuda_types::CUdevResource,
+ input: *const cuda_types::cuda::CUdevResource,
+ remaining: *mut cuda_types::cuda::CUdevResource,
useFlags: ::core::ffi::c_uint,
minCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Generate a resource descriptor
Generates a resource descriptor with the set of resources specified in \p resources.
@@ -18799,10 +18854,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa
::cuDevSmResourceSplitByCount*/
fn cuDevResourceGenerateDesc(
- phDesc: *mut cuda_types::CUdevResourceDesc,
- resources: *mut cuda_types::CUdevResource,
+ phDesc: *mut cuda_types::cuda::CUdevResourceDesc,
+ resources: *mut cuda_types::cuda::CUdevResource,
nbResources: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Records an event.
Captures in \phEvent all the activities of the green context of \phCtx
@@ -18829,9 +18884,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGreenCtxWaitEvent,
::cuEventRecord*/
fn cuGreenCtxRecordEvent(
- hCtx: cuda_types::CUgreenCtx,
- hEvent: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ hEvent: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Make a green context wait on an event
Makes all future work submitted to green context \phCtx wait for all work
@@ -18856,9 +18911,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGreenCtxRecordEvent,
::cuStreamWaitEvent*/
fn cuGreenCtxWaitEvent(
- hCtx: cuda_types::CUgreenCtx,
- hEvent: cuda_types::CUevent,
- ) -> cuda_types::CUresult;
+ hCtx: cuda_types::cuda::CUgreenCtx,
+ hEvent: cuda_types::cuda::CUevent,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Query the green context associated with a stream
Returns the CUDA green context that the stream is associated with, or NULL if the stream
@@ -18903,516 +18958,534 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cudaStreamCreate,
::cudaStreamCreateWithFlags*/
fn cuStreamGetGreenCtx(
- hStream: cuda_types::CUstream,
- phCtx: *mut cuda_types::CUgreenCtx,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ phCtx: *mut cuda_types::cuda::CUgreenCtx,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemHostRegister(
p: *mut ::core::ffi::c_void,
bytesize: usize,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphicsResourceSetMapFlags(
- resource: cuda_types::CUgraphicsResource,
+ resource: cuda_types::cuda::CUgraphicsResource,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuLinkCreate(
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- stateOut: *mut cuda_types::CUlinkState,
- ) -> cuda_types::CUresult;
+ stateOut: *mut cuda_types::cuda::CUlinkState,
+ ) -> cuda_types::cuda::CUresult;
fn cuLinkAddData(
- state: cuda_types::CUlinkState,
- type_: cuda_types::CUjitInputType,
+ state: cuda_types::cuda::CUlinkState,
+ type_: cuda_types::cuda::CUjitInputType,
data: *mut ::core::ffi::c_void,
size: usize,
name: *const ::core::ffi::c_char,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuLinkAddFile(
- state: cuda_types::CUlinkState,
- type_: cuda_types::CUjitInputType,
+ state: cuda_types::cuda::CUlinkState,
+ type_: cuda_types::cuda::CUjitInputType,
path: *const ::core::ffi::c_char,
numOptions: ::core::ffi::c_uint,
- options: *mut cuda_types::CUjit_option,
+ options: *mut cuda_types::cuda::CUjit_option,
optionValues: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuTexRefSetAddress2D_v2(
- hTexRef: cuda_types::CUtexref,
- desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR,
- dptr: cuda_types::CUdeviceptr,
+ hTexRef: cuda_types::cuda::CUtexref,
+ desc: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR,
+ dptr: cuda_types::cuda::CUdeviceptr,
Pitch: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuDeviceTotalMem(
bytes: *mut ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
fn cuCtxCreate(
- pctx: *mut cuda_types::CUcontext,
+ pctx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- dev: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
fn cuModuleGetGlobal(
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
bytes: *mut ::core::ffi::c_uint,
- hmod: cuda_types::CUmodule,
+ hmod: cuda_types::cuda::CUmodule,
name: *const ::core::ffi::c_char,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemGetInfo(
free: *mut ::core::ffi::c_uint,
total: *mut ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemAlloc(
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
bytesize: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemAllocPitch(
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
pPitch: *mut ::core::ffi::c_uint,
WidthInBytes: ::core::ffi::c_uint,
Height: ::core::ffi::c_uint,
ElementSizeBytes: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
- fn cuMemFree(dptr: cuda_types::CUdeviceptr_v1) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
+ fn cuMemFree(dptr: cuda_types::cuda::CUdeviceptr_v1) -> cuda_types::cuda::CUresult;
fn cuMemGetAddressRange(
- pbase: *mut cuda_types::CUdeviceptr_v1,
+ pbase: *mut cuda_types::cuda::CUdeviceptr_v1,
psize: *mut ::core::ffi::c_uint,
- dptr: cuda_types::CUdeviceptr_v1,
- ) -> cuda_types::CUresult;
+ dptr: cuda_types::cuda::CUdeviceptr_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemAllocHost(
pp: *mut *mut ::core::ffi::c_void,
bytesize: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemHostGetDevicePointer(
- pdptr: *mut cuda_types::CUdeviceptr_v1,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr_v1,
p: *mut ::core::ffi::c_void,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoD(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
srcHost: *const ::core::ffi::c_void,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoH(
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoD(
- dstDevice: cuda_types::CUdeviceptr_v1,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoA(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: ::core::ffi::c_uint,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoD(
- dstDevice: cuda_types::CUdeviceptr_v1,
- srcArray: cuda_types::CUarray,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: ::core::ffi::c_uint,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoA(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: ::core::ffi::c_uint,
srcHost: *const ::core::ffi::c_void,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoH(
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: ::core::ffi::c_uint,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoA(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: ::core::ffi::c_uint,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: ::core::ffi::c_uint,
ByteCount: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoAAsync(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: ::core::ffi::c_uint,
srcHost: *const ::core::ffi::c_void,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoHAsync(
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: ::core::ffi::c_uint,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
- fn cuMemcpy2D(pCopy: *const cuda_types::CUDA_MEMCPY2D_v1) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuMemcpy2D(
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy2DUnaligned(
- pCopy: *const cuda_types::CUDA_MEMCPY2D_v1,
- ) -> cuda_types::CUresult;
- fn cuMemcpy3D(pCopy: *const cuda_types::CUDA_MEMCPY3D_v1) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D_v1,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuMemcpy3D(
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoDAsync(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
srcHost: *const ::core::ffi::c_void,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoHAsync(
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoDAsync(
- dstDevice: cuda_types::CUdeviceptr_v1,
- srcDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
+ srcDevice: cuda_types::cuda::CUdeviceptr_v1,
ByteCount: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy2DAsync(
- pCopy: *const cuda_types::CUDA_MEMCPY2D_v1,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D_v1,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy3DAsync(
- pCopy: *const cuda_types::CUDA_MEMCPY3D_v1,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_v1,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD8(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
uc: ::core::ffi::c_uchar,
N: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD16(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
us: ::core::ffi::c_ushort,
N: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD32(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
ui: ::core::ffi::c_uint,
N: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D8(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
dstPitch: ::core::ffi::c_uint,
uc: ::core::ffi::c_uchar,
Width: ::core::ffi::c_uint,
Height: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D16(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
dstPitch: ::core::ffi::c_uint,
us: ::core::ffi::c_ushort,
Width: ::core::ffi::c_uint,
Height: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D32(
- dstDevice: cuda_types::CUdeviceptr_v1,
+ dstDevice: cuda_types::cuda::CUdeviceptr_v1,
dstPitch: ::core::ffi::c_uint,
ui: ::core::ffi::c_uint,
Width: ::core::ffi::c_uint,
Height: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuArrayCreate(
- pHandle: *mut cuda_types::CUarray,
- pAllocateArray: *const cuda_types::CUDA_ARRAY_DESCRIPTOR_v1,
- ) -> cuda_types::CUresult;
+ pHandle: *mut cuda_types::cuda::CUarray,
+ pAllocateArray: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuArrayGetDescriptor(
- pArrayDescriptor: *mut cuda_types::CUDA_ARRAY_DESCRIPTOR_v1,
- hArray: cuda_types::CUarray,
- ) -> cuda_types::CUresult;
+ pArrayDescriptor: *mut cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR_v1,
+ hArray: cuda_types::cuda::CUarray,
+ ) -> cuda_types::cuda::CUresult;
fn cuArray3DCreate(
- pHandle: *mut cuda_types::CUarray,
- pAllocateArray: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1,
- ) -> cuda_types::CUresult;
+ pHandle: *mut cuda_types::cuda::CUarray,
+ pAllocateArray: *const cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuArray3DGetDescriptor(
- pArrayDescriptor: *mut cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1,
- hArray: cuda_types::CUarray,
- ) -> cuda_types::CUresult;
+ pArrayDescriptor: *mut cuda_types::cuda::CUDA_ARRAY3D_DESCRIPTOR_v1,
+ hArray: cuda_types::cuda::CUarray,
+ ) -> cuda_types::cuda::CUresult;
fn cuTexRefSetAddress(
ByteOffset: *mut ::core::ffi::c_uint,
- hTexRef: cuda_types::CUtexref,
- dptr: cuda_types::CUdeviceptr_v1,
+ hTexRef: cuda_types::cuda::CUtexref,
+ dptr: cuda_types::cuda::CUdeviceptr_v1,
bytes: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuTexRefSetAddress2D(
- hTexRef: cuda_types::CUtexref,
- desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR_v1,
- dptr: cuda_types::CUdeviceptr_v1,
+ hTexRef: cuda_types::cuda::CUtexref,
+ desc: *const cuda_types::cuda::CUDA_ARRAY_DESCRIPTOR_v1,
+ dptr: cuda_types::cuda::CUdeviceptr_v1,
Pitch: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuTexRefGetAddress(
- pdptr: *mut cuda_types::CUdeviceptr_v1,
- hTexRef: cuda_types::CUtexref,
- ) -> cuda_types::CUresult;
+ pdptr: *mut cuda_types::cuda::CUdeviceptr_v1,
+ hTexRef: cuda_types::cuda::CUtexref,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphicsResourceGetMappedPointer(
- pDevPtr: *mut cuda_types::CUdeviceptr_v1,
+ pDevPtr: *mut cuda_types::cuda::CUdeviceptr_v1,
pSize: *mut ::core::ffi::c_uint,
- resource: cuda_types::CUgraphicsResource,
- ) -> cuda_types::CUresult;
- fn cuCtxDestroy(ctx: cuda_types::CUcontext) -> cuda_types::CUresult;
- fn cuCtxPopCurrent(pctx: *mut cuda_types::CUcontext) -> cuda_types::CUresult;
- fn cuCtxPushCurrent(ctx: cuda_types::CUcontext) -> cuda_types::CUresult;
- fn cuStreamDestroy(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
- fn cuEventDestroy(hEvent: cuda_types::CUevent) -> cuda_types::CUresult;
- fn cuDevicePrimaryCtxRelease(dev: cuda_types::CUdevice) -> cuda_types::CUresult;
- fn cuDevicePrimaryCtxReset(dev: cuda_types::CUdevice) -> cuda_types::CUresult;
+ resource: cuda_types::cuda::CUgraphicsResource,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuCtxDestroy(ctx: cuda_types::cuda::CUcontext) -> cuda_types::cuda::CUresult;
+ fn cuCtxPopCurrent(
+ pctx: *mut cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuCtxPushCurrent(ctx: cuda_types::cuda::CUcontext) -> cuda_types::cuda::CUresult;
+ fn cuStreamDestroy(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuEventDestroy(hEvent: cuda_types::cuda::CUevent) -> cuda_types::cuda::CUresult;
+ fn cuDevicePrimaryCtxRelease(
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuDevicePrimaryCtxReset(
+ dev: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
fn cuDevicePrimaryCtxSetFlags(
- dev: cuda_types::CUdevice,
+ dev: cuda_types::cuda::CUdevice,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoD_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoH_v2(
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoD_v2(
- dstDevice: cuda_types::CUdeviceptr,
- srcDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoA_v2(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoD_v2(
- dstDevice: cuda_types::CUdeviceptr,
- srcArray: cuda_types::CUarray,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoA_v2(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoH_v2(
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoA_v2(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoAAsync_v2(
- dstArray: cuda_types::CUarray,
+ dstArray: cuda_types::cuda::CUarray,
dstOffset: usize,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAtoHAsync_v2(
dstHost: *mut ::core::ffi::c_void,
- srcArray: cuda_types::CUarray,
+ srcArray: cuda_types::cuda::CUarray,
srcOffset: usize,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
- fn cuMemcpy2D_v2(pCopy: *const cuda_types::CUDA_MEMCPY2D) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuMemcpy2D_v2(
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy2DUnaligned_v2(
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
- ) -> cuda_types::CUresult;
- fn cuMemcpy3D_v2(pCopy: *const cuda_types::CUDA_MEMCPY3D) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuMemcpy3D_v2(
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyHtoDAsync_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
srcHost: *const ::core::ffi::c_void,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoHAsync_v2(
dstHost: *mut ::core::ffi::c_void,
- srcDevice: cuda_types::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyDtoDAsync_v2(
- dstDevice: cuda_types::CUdeviceptr,
- srcDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy2DAsync_v2(
- pCopy: *const cuda_types::CUDA_MEMCPY2D,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY2D,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy3DAsync_v2(
- pCopy: *const cuda_types::CUDA_MEMCPY3D,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD8_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
uc: ::core::ffi::c_uchar,
N: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD16_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
us: ::core::ffi::c_ushort,
N: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD32_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
ui: ::core::ffi::c_uint,
N: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D8_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
uc: ::core::ffi::c_uchar,
Width: usize,
Height: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D16_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
us: ::core::ffi::c_ushort,
Width: usize,
Height: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D32_v2(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
ui: ::core::ffi::c_uint,
Width: usize,
Height: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy(
- dst: cuda_types::CUdeviceptr,
- src: cuda_types::CUdeviceptr,
+ dst: cuda_types::cuda::CUdeviceptr,
+ src: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyAsync(
- dst: cuda_types::CUdeviceptr,
- src: cuda_types::CUdeviceptr,
+ dst: cuda_types::cuda::CUdeviceptr,
+ src: cuda_types::cuda::CUdeviceptr,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyPeer(
- dstDevice: cuda_types::CUdeviceptr,
- dstContext: cuda_types::CUcontext,
- srcDevice: cuda_types::CUdeviceptr,
- srcContext: cuda_types::CUcontext,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ dstContext: cuda_types::cuda::CUcontext,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
+ srcContext: cuda_types::cuda::CUcontext,
ByteCount: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpyPeerAsync(
- dstDevice: cuda_types::CUdeviceptr,
- dstContext: cuda_types::CUcontext,
- srcDevice: cuda_types::CUdeviceptr,
- srcContext: cuda_types::CUcontext,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
+ dstContext: cuda_types::cuda::CUcontext,
+ srcDevice: cuda_types::cuda::CUdeviceptr,
+ srcContext: cuda_types::cuda::CUcontext,
ByteCount: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy3DPeer(
- pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_PEER,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemcpy3DPeerAsync(
- pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pCopy: *const cuda_types::cuda::CUDA_MEMCPY3D_PEER,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD8Async(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
uc: ::core::ffi::c_uchar,
N: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD16Async(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
us: ::core::ffi::c_ushort,
N: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD32Async(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
ui: ::core::ffi::c_uint,
N: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D8Async(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
uc: ::core::ffi::c_uchar,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D16Async(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
us: ::core::ffi::c_ushort,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemsetD2D32Async(
- dstDevice: cuda_types::CUdeviceptr,
+ dstDevice: cuda_types::cuda::CUdeviceptr,
dstPitch: usize,
ui: ::core::ffi::c_uint,
Width: usize,
Height: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetPriority(
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
priority: *mut ::core::ffi::c_int,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetId(
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
streamId: *mut ::core::ffi::c_ulonglong,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetFlags(
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
flags: *mut ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetCtx(
- hStream: cuda_types::CUstream,
- pctx: *mut cuda_types::CUcontext,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ pctx: *mut cuda_types::cuda::CUcontext,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitEvent(
- hStream: cuda_types::CUstream,
- hEvent: cuda_types::CUevent,
+ hStream: cuda_types::cuda::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamAddCallback(
- hStream: cuda_types::CUstream,
- callback: cuda_types::CUstreamCallback,
+ hStream: cuda_types::cuda::CUstream,
+ callback: cuda_types::cuda::CUstreamCallback,
userData: *mut ::core::ffi::c_void,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamAttachMemAsync(
- hStream: cuda_types::CUstream,
- dptr: cuda_types::CUdeviceptr,
+ hStream: cuda_types::cuda::CUstream,
+ dptr: cuda_types::cuda::CUdeviceptr,
length: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
- fn cuStreamQuery(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
- fn cuStreamSynchronize(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
+ fn cuStreamQuery(hStream: cuda_types::cuda::CUstream) -> cuda_types::cuda::CUresult;
+ fn cuStreamSynchronize(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuEventRecord(
- hEvent: cuda_types::CUevent,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hEvent: cuda_types::cuda::CUevent,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuEventRecordWithFlags(
- hEvent: cuda_types::CUevent,
- hStream: cuda_types::CUstream,
+ hEvent: cuda_types::cuda::CUevent,
+ hStream: cuda_types::cuda::CUstream,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuLaunchKernel(
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
gridDimX: ::core::ffi::c_uint,
gridDimY: ::core::ffi::c_uint,
gridDimZ: ::core::ffi::c_uint,
@@ -19420,136 +19493,136 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
blockDimY: ::core::ffi::c_uint,
blockDimZ: ::core::ffi::c_uint,
sharedMemBytes: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
kernelParams: *mut *mut ::core::ffi::c_void,
extra: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuLaunchKernelEx(
- config: *const cuda_types::CUlaunchConfig,
- f: cuda_types::CUfunction,
+ config: *const cuda_types::cuda::CUlaunchConfig,
+ f: cuda_types::cuda::CUfunction,
kernelParams: *mut *mut ::core::ffi::c_void,
extra: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuLaunchHostFunc(
- hStream: cuda_types::CUstream,
- fn_: cuda_types::CUhostFn,
+ hStream: cuda_types::cuda::CUstream,
+ fn_: cuda_types::cuda::CUhostFn,
userData: *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphicsMapResources(
count: ::core::ffi::c_uint,
- resources: *mut cuda_types::CUgraphicsResource,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ resources: *mut cuda_types::cuda::CUgraphicsResource,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphicsUnmapResources(
count: ::core::ffi::c_uint,
- resources: *mut cuda_types::CUgraphicsResource,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ resources: *mut cuda_types::cuda::CUgraphicsResource,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWriteValue32(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitValue32(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWriteValue64(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitValue64(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamBatchMemOp(
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
count: ::core::ffi::c_uint,
- paramArray: *mut cuda_types::CUstreamBatchMemOpParams,
+ paramArray: *mut cuda_types::cuda::CUstreamBatchMemOpParams,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWriteValue32_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitValue32_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWriteValue64_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitValue64_ptsz(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamBatchMemOp_ptsz(
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
count: ::core::ffi::c_uint,
- paramArray: *mut cuda_types::CUstreamBatchMemOpParams,
+ paramArray: *mut cuda_types::cuda::CUstreamBatchMemOpParams,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWriteValue32_v2(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitValue32_v2(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint32_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint32_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWriteValue64_v2(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamWaitValue64_v2(
- stream: cuda_types::CUstream,
- addr: cuda_types::CUdeviceptr,
- value: cuda_types::cuuint64_t,
+ stream: cuda_types::cuda::CUstream,
+ addr: cuda_types::cuda::CUdeviceptr,
+ value: cuda_types::cuda::cuuint64_t,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamBatchMemOp_v2(
- stream: cuda_types::CUstream,
+ stream: cuda_types::cuda::CUstream,
count: ::core::ffi::c_uint,
- paramArray: *mut cuda_types::CUstreamBatchMemOpParams,
+ paramArray: *mut cuda_types::cuda::CUstreamBatchMemOpParams,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemPrefetchAsync(
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- dstDevice: cuda_types::CUdevice,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ dstDevice: cuda_types::cuda::CUdevice,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemPrefetchAsync_v2(
- devPtr: cuda_types::CUdeviceptr,
+ devPtr: cuda_types::cuda::CUdeviceptr,
count: usize,
- location: cuda_types::CUmemLocation,
+ location: cuda_types::cuda::CUmemLocation,
flags: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuLaunchCooperativeKernel(
- f: cuda_types::CUfunction,
+ f: cuda_types::cuda::CUfunction,
gridDimX: ::core::ffi::c_uint,
gridDimY: ::core::ffi::c_uint,
gridDimZ: ::core::ffi::c_uint,
@@ -19557,181 +19630,185 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
blockDimY: ::core::ffi::c_uint,
blockDimZ: ::core::ffi::c_uint,
sharedMemBytes: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
+ hStream: cuda_types::cuda::CUstream,
kernelParams: *mut *mut ::core::ffi::c_void,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuSignalExternalSemaphoresAsync(
- extSemArray: *const cuda_types::CUexternalSemaphore,
- paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
+ extSemArray: *const cuda_types::cuda::CUexternalSemaphore,
+ paramsArray: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,
numExtSems: ::core::ffi::c_uint,
- stream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ stream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuWaitExternalSemaphoresAsync(
- extSemArray: *const cuda_types::CUexternalSemaphore,
- paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
+ extSemArray: *const cuda_types::cuda::CUexternalSemaphore,
+ paramsArray: *const cuda_types::cuda::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,
numExtSems: ::core::ffi::c_uint,
- stream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
- fn cuStreamBeginCapture(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
- fn cuStreamBeginCapture_ptsz(hStream: cuda_types::CUstream) -> cuda_types::CUresult;
+ stream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuStreamBeginCapture(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
+ fn cuStreamBeginCapture_ptsz(
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamBeginCapture_v2(
- hStream: cuda_types::CUstream,
- mode: cuda_types::CUstreamCaptureMode,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ mode: cuda_types::cuda::CUstreamCaptureMode,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamBeginCaptureToGraph(
- hStream: cuda_types::CUstream,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
- mode: cuda_types::CUstreamCaptureMode,
- ) -> cuda_types::CUresult;
+ mode: cuda_types::cuda::CUstreamCaptureMode,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamEndCapture(
- hStream: cuda_types::CUstream,
- phGraph: *mut cuda_types::CUgraph,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ phGraph: *mut cuda_types::cuda::CUgraph,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamIsCapturing(
- hStream: cuda_types::CUstream,
- captureStatus: *mut cuda_types::CUstreamCaptureStatus,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetCaptureInfo(
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetCaptureInfo_ptsz(
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetCaptureInfo_v2(
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- graph_out: *mut cuda_types::CUgraph,
- dependencies_out: *mut *const cuda_types::CUgraphNode,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ graph_out: *mut cuda_types::cuda::CUgraph,
+ dependencies_out: *mut *const cuda_types::cuda::CUgraphNode,
numDependencies_out: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetCaptureInfo_v3(
- hStream: cuda_types::CUstream,
- captureStatus_out: *mut cuda_types::CUstreamCaptureStatus,
- id_out: *mut cuda_types::cuuint64_t,
- graph_out: *mut cuda_types::CUgraph,
- dependencies_out: *mut *const cuda_types::CUgraphNode,
- edgeData_out: *mut *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ captureStatus_out: *mut cuda_types::cuda::CUstreamCaptureStatus,
+ id_out: *mut cuda_types::cuda::cuuint64_t,
+ graph_out: *mut cuda_types::cuda::CUgraph,
+ dependencies_out: *mut *const cuda_types::cuda::CUgraphNode,
+ edgeData_out: *mut *const cuda_types::cuda::CUgraphEdgeData,
numDependencies_out: *mut usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphAddKernelNode(
- phGraphNode: *mut cuda_types::CUgraphNode,
- hGraph: cuda_types::CUgraph,
- dependencies: *const cuda_types::CUgraphNode,
+ phGraphNode: *mut cuda_types::cuda::CUgraphNode,
+ hGraph: cuda_types::cuda::CUgraph,
+ dependencies: *const cuda_types::cuda::CUgraphNode,
numDependencies: usize,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1,
- ) -> cuda_types::CUresult;
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphKernelNodeGetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *mut cuda_types::CUDA_KERNEL_NODE_PARAMS_v1,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *mut cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphKernelNodeSetParams(
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1,
- ) -> cuda_types::CUresult;
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphExecKernelNodeSetParams(
- hGraphExec: cuda_types::CUgraphExec,
- hNode: cuda_types::CUgraphNode,
- nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hNode: cuda_types::cuda::CUgraphNode,
+ nodeParams: *const cuda_types::cuda::CUDA_KERNEL_NODE_PARAMS_v1,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphInstantiateWithParams(
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- instantiateParams: *mut cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS,
- ) -> cuda_types::CUresult;
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ instantiateParams: *mut cuda_types::cuda::CUDA_GRAPH_INSTANTIATE_PARAMS,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphExecUpdate(
- hGraphExec: cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- hErrorNode_out: *mut cuda_types::CUgraphNode,
- updateResult_out: *mut cuda_types::CUgraphExecUpdateResult,
- ) -> cuda_types::CUresult;
+ hGraphExec: cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ hErrorNode_out: *mut cuda_types::cuda::CUgraphNode,
+ updateResult_out: *mut cuda_types::cuda::CUgraphExecUpdateResult,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphUpload(
- hGraph: cuda_types::CUgraphExec,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hGraph: cuda_types::cuda::CUgraphExec,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphLaunch(
- hGraph: cuda_types::CUgraphExec,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hGraph: cuda_types::cuda::CUgraphExec,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamCopyAttributes(
- dstStream: cuda_types::CUstream,
- srcStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ dstStream: cuda_types::cuda::CUstream,
+ srcStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamGetAttribute(
- hStream: cuda_types::CUstream,
- attr: cuda_types::CUstreamAttrID,
- value: *mut cuda_types::CUstreamAttrValue,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ attr: cuda_types::cuda::CUstreamAttrID,
+ value: *mut cuda_types::cuda::CUstreamAttrValue,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamSetAttribute(
- hStream: cuda_types::CUstream,
- attr: cuda_types::CUstreamAttrID,
- param: *const cuda_types::CUstreamAttrValue,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ attr: cuda_types::cuda::CUstreamAttrID,
+ param: *const cuda_types::cuda::CUstreamAttrValue,
+ ) -> cuda_types::cuda::CUresult;
fn cuIpcOpenMemHandle(
- pdptr: *mut cuda_types::CUdeviceptr,
- handle: cuda_types::CUipcMemHandle,
+ pdptr: *mut cuda_types::cuda::CUdeviceptr,
+ handle: cuda_types::cuda::CUipcMemHandle,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphInstantiate(
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- phErrorNode: *mut cuda_types::CUgraphNode,
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ phErrorNode: *mut cuda_types::cuda::CUgraphNode,
logBuffer: *mut ::core::ffi::c_char,
bufferSize: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuGraphInstantiate_v2(
- phGraphExec: *mut cuda_types::CUgraphExec,
- hGraph: cuda_types::CUgraph,
- phErrorNode: *mut cuda_types::CUgraphNode,
+ phGraphExec: *mut cuda_types::cuda::CUgraphExec,
+ hGraph: cuda_types::cuda::CUgraph,
+ phErrorNode: *mut cuda_types::cuda::CUgraphNode,
logBuffer: *mut ::core::ffi::c_char,
bufferSize: usize,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuMemMapArrayAsync(
- mapInfoList: *mut cuda_types::CUarrayMapInfo,
+ mapInfoList: *mut cuda_types::cuda::CUarrayMapInfo,
count: ::core::ffi::c_uint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemFreeAsync(
- dptr: cuda_types::CUdeviceptr,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ dptr: cuda_types::cuda::CUdeviceptr,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemAllocAsync(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuMemAllocFromPoolAsync(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
bytesize: usize,
- pool: cuda_types::CUmemoryPool,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ pool: cuda_types::cuda::CUmemoryPool,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamUpdateCaptureDependencies(
- hStream: cuda_types::CUstream,
- dependencies: *mut cuda_types::CUgraphNode,
+ hStream: cuda_types::cuda::CUstream,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
numDependencies: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuStreamUpdateCaptureDependencies_v2(
- hStream: cuda_types::CUstream,
- dependencies: *mut cuda_types::CUgraphNode,
- dependencyData: *const cuda_types::CUgraphEdgeData,
+ hStream: cuda_types::cuda::CUstream,
+ dependencies: *mut cuda_types::cuda::CUgraphNode,
+ dependencyData: *const cuda_types::cuda::CUgraphEdgeData,
numDependencies: usize,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuGetProcAddress(
symbol: *const ::core::ffi::c_char,
pfn: *mut *mut ::core::ffi::c_void,
cudaVersion: ::core::ffi::c_int,
- flags: cuda_types::cuuint64_t,
- ) -> cuda_types::CUresult;
+ flags: cuda_types::cuda::cuuint64_t,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initialize the profiling.
\deprecated
@@ -19783,8 +19860,8 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
fn cuProfilerInitialize(
configFile: *const ::core::ffi::c_char,
outputFile: *const ::core::ffi::c_char,
- outputMode: cuda_types::CUoutput_mode,
- ) -> cuda_types::CUresult;
+ outputMode: cuda_types::cuda::CUoutput_mode,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Enable profiling.
Enables profile collection by the active profiling tool for the
@@ -19805,7 +19882,7 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuProfilerInitialize,
::cuProfilerStop,
::cudaProfilerStart*/
- fn cuProfilerStart() -> cuda_types::CUresult;
+ fn cuProfilerStart() -> cuda_types::cuda::CUresult;
/** \brief Disable profiling.
Disables profile collection by the active profiling tool for the
@@ -19825,7 +19902,7 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuProfilerInitialize,
::cuProfilerStart,
::cudaProfilerStop*/
- fn cuProfilerStop() -> cuda_types::CUresult;
+ fn cuProfilerStop() -> cuda_types::cuda::CUresult;
/** \brief Registers an OpenGL buffer object
Registers the buffer object specified by \p buffer for access by
@@ -19861,10 +19938,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsResourceGetMappedPointer,
::cudaGraphicsGLRegisterBuffer*/
fn cuGraphicsGLRegisterBuffer(
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- buffer: cuda_types::GLuint,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ buffer: cuda_types::cuda::GLuint,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Register an OpenGL texture or renderbuffer object
Registers the texture or renderbuffer object specified by \p image for access by CUDA.
@@ -19921,11 +19998,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsSubResourceGetMappedArray,
::cudaGraphicsGLRegisterImage*/
fn cuGraphicsGLRegisterImage(
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- image: cuda_types::GLuint,
- target: cuda_types::GLenum,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ image: cuda_types::cuda::GLuint,
+ target: cuda_types::cuda::GLenum,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the CUDA devices associated with the current OpenGL context
Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices
@@ -19962,10 +20039,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cudaGLGetDevices*/
fn cuGLGetDevices_v2(
pCudaDeviceCount: *mut ::core::ffi::c_uint,
- pCudaDevices: *mut cuda_types::CUdevice,
+ pCudaDevices: *mut cuda_types::cuda::CUdevice,
cudaDeviceCount: ::core::ffi::c_uint,
- deviceList: cuda_types::CUGLDeviceList,
- ) -> cuda_types::CUresult;
+ deviceList: cuda_types::cuda::CUGLDeviceList,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a CUDA context for interoperability with OpenGL
\deprecated This function is deprecated as of Cuda 5.0.
@@ -19993,10 +20070,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags,
::cuWGLGetDevice*/
fn cuGLCtxCreate_v2(
- pCtx: *mut cuda_types::CUcontext,
+ pCtx: *mut cuda_types::cuda::CUcontext,
Flags: ::core::ffi::c_uint,
- device: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ device: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Initializes OpenGL interoperability
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20018,7 +20095,7 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync,
::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags,
::cuWGLGetDevice*/
- fn cuGLInit() -> cuda_types::CUresult;
+ fn cuGLInit() -> cuda_types::cuda::CUresult;
/** \brief Registers an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20040,7 +20117,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\notefnerr
\sa ::cuGraphicsGLRegisterBuffer*/
- fn cuGLRegisterBufferObject(buffer: cuda_types::GLuint) -> cuda_types::CUresult;
+ fn cuGLRegisterBufferObject(
+ buffer: cuda_types::cuda::GLuint,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Maps an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20072,10 +20151,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa ::cuGraphicsMapResources*/
fn cuGLMapBufferObject_v2_ptds(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
size: *mut usize,
- buffer: cuda_types::GLuint,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unmaps an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20101,7 +20180,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\notefnerr
\sa ::cuGraphicsUnmapResources*/
- fn cuGLUnmapBufferObject(buffer: cuda_types::GLuint) -> cuda_types::CUresult;
+ fn cuGLUnmapBufferObject(
+ buffer: cuda_types::cuda::GLuint,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unregister an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20127,7 +20208,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\notefnerr
\sa ::cuGraphicsUnregisterResource*/
- fn cuGLUnregisterBufferObject(buffer: cuda_types::GLuint) -> cuda_types::CUresult;
+ fn cuGLUnregisterBufferObject(
+ buffer: cuda_types::cuda::GLuint,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Set the map flags for an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20168,9 +20251,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa ::cuGraphicsResourceSetMapFlags*/
fn cuGLSetBufferObjectMapFlags(
- buffer: cuda_types::GLuint,
+ buffer: cuda_types::cuda::GLuint,
Flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Maps an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20203,11 +20286,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa ::cuGraphicsMapResources*/
fn cuGLMapBufferObjectAsync_v2_ptsz(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
size: *mut usize,
- buffer: cuda_types::GLuint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Unmaps an OpenGL buffer object
\deprecated This function is deprecated as of Cuda 3.0.
@@ -20235,42 +20318,42 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
\sa ::cuGraphicsUnmapResources*/
fn cuGLUnmapBufferObjectAsync(
- buffer: cuda_types::GLuint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuGLGetDevices(
pCudaDeviceCount: *mut ::core::ffi::c_uint,
- pCudaDevices: *mut cuda_types::CUdevice,
+ pCudaDevices: *mut cuda_types::cuda::CUdevice,
cudaDeviceCount: ::core::ffi::c_uint,
- deviceList: cuda_types::CUGLDeviceList,
- ) -> cuda_types::CUresult;
+ deviceList: cuda_types::cuda::CUGLDeviceList,
+ ) -> cuda_types::cuda::CUresult;
fn cuGLMapBufferObject_v2(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
size: *mut usize,
- buffer: cuda_types::GLuint,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ ) -> cuda_types::cuda::CUresult;
fn cuGLMapBufferObjectAsync_v2(
- dptr: *mut cuda_types::CUdeviceptr,
+ dptr: *mut cuda_types::cuda::CUdeviceptr,
size: *mut usize,
- buffer: cuda_types::GLuint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
fn cuGLCtxCreate(
- pCtx: *mut cuda_types::CUcontext,
+ pCtx: *mut cuda_types::cuda::CUcontext,
Flags: ::core::ffi::c_uint,
- device: cuda_types::CUdevice,
- ) -> cuda_types::CUresult;
+ device: cuda_types::cuda::CUdevice,
+ ) -> cuda_types::cuda::CUresult;
fn cuGLMapBufferObject(
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
size: *mut ::core::ffi::c_uint,
- buffer: cuda_types::GLuint,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ ) -> cuda_types::cuda::CUresult;
fn cuGLMapBufferObjectAsync(
- dptr: *mut cuda_types::CUdeviceptr_v1,
+ dptr: *mut cuda_types::cuda::CUdeviceptr_v1,
size: *mut ::core::ffi::c_uint,
- buffer: cuda_types::GLuint,
- hStream: cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ buffer: cuda_types::cuda::GLuint,
+ hStream: cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Registers an EGL image
Registers the EGLImageKHR specified by \p image for access by
@@ -20317,10 +20400,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsUnmapResources,
::cudaGraphicsEGLRegisterImage*/
fn cuGraphicsEGLRegisterImage(
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- image: cuda_types::EGLImageKHR,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ image: cuda_types::cuda::EGLImageKHR,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Connect CUDA to EGLStream as a consumer.
Connect CUDA as a consumer to EGLStreamKHR specified by \p stream.
@@ -20340,9 +20423,9 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
::cudaEGLStreamConsumerConnect*/
fn cuEGLStreamConsumerConnect(
- conn: *mut cuda_types::CUeglStreamConnection,
- stream: cuda_types::EGLStreamKHR,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ stream: cuda_types::cuda::EGLStreamKHR,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Connect CUDA to EGLStream as a consumer with given flags.
Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by CUeglResourceLocationFlags.
@@ -20363,10 +20446,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
::cudaEGLStreamConsumerConnectWithFlags*/
fn cuEGLStreamConsumerConnectWithFlags(
- conn: *mut cuda_types::CUeglStreamConnection,
- stream: cuda_types::EGLStreamKHR,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ stream: cuda_types::cuda::EGLStreamKHR,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Disconnect CUDA as a consumer to EGLStream .
Disconnect CUDA as a consumer to EGLStreamKHR.
@@ -20382,8 +20465,8 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
::cudaEGLStreamConsumerDisconnect*/
fn cuEGLStreamConsumerDisconnect(
- conn: *mut cuda_types::CUeglStreamConnection,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Acquire an image frame from the EGLStream with CUDA as a consumer.
Acquire an image frame from EGLStreamKHR. This API can also acquire an old frame presented
@@ -20410,11 +20493,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
::cudaEGLStreamConsumerAcquireFrame*/
fn cuEGLStreamConsumerAcquireFrame(
- conn: *mut cuda_types::CUeglStreamConnection,
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- pStream: *mut cuda_types::CUstream,
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ pStream: *mut cuda_types::cuda::CUstream,
timeout: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Releases the last frame acquired from the EGLStream.
Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR.
@@ -20434,10 +20517,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
::cudaEGLStreamConsumerReleaseFrame*/
fn cuEGLStreamConsumerReleaseFrame(
- conn: *mut cuda_types::CUeglStreamConnection,
- pCudaResource: cuda_types::CUgraphicsResource,
- pStream: *mut cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ pCudaResource: cuda_types::cuda::CUgraphicsResource,
+ pStream: *mut cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Connect CUDA to EGLStream as a producer.
Connect CUDA as a producer to EGLStreamKHR specified by \p stream.
@@ -20459,11 +20542,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamProducerPresentFrame,
::cudaEGLStreamProducerConnect*/
fn cuEGLStreamProducerConnect(
- conn: *mut cuda_types::CUeglStreamConnection,
- stream: cuda_types::EGLStreamKHR,
- width: cuda_types::EGLint,
- height: cuda_types::EGLint,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ stream: cuda_types::cuda::EGLStreamKHR,
+ width: cuda_types::cuda::EGLint,
+ height: cuda_types::cuda::EGLint,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Disconnect CUDA as a producer to EGLStream .
Disconnect CUDA as a producer to EGLStreamKHR.
@@ -20479,8 +20562,8 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamProducerPresentFrame,
::cudaEGLStreamProducerDisconnect*/
fn cuEGLStreamProducerDisconnect(
- conn: *mut cuda_types::CUeglStreamConnection,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer.
When a frame is presented by the producer, it gets associated with the EGLStream
@@ -20526,10 +20609,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamProducerReturnFrame,
::cudaEGLStreamProducerPresentFrame*/
fn cuEGLStreamProducerPresentFrame(
- conn: *mut cuda_types::CUeglStreamConnection,
- eglframe: cuda_types::CUeglFrame,
- pStream: *mut cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ eglframe: cuda_types::cuda::CUeglFrame,
+ pStream: *mut cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Return the CUDA eglFrame to the EGLStream released by the consumer.
This API can potentially return CUDA_ERROR_LAUNCH_TIMEOUT if the consumer has not
@@ -20548,10 +20631,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEGLStreamProducerPresentFrame,
::cudaEGLStreamProducerReturnFrame*/
fn cuEGLStreamProducerReturnFrame(
- conn: *mut cuda_types::CUeglStreamConnection,
- eglframe: *mut cuda_types::CUeglFrame,
- pStream: *mut cuda_types::CUstream,
- ) -> cuda_types::CUresult;
+ conn: *mut cuda_types::cuda::CUeglStreamConnection,
+ eglframe: *mut cuda_types::cuda::CUeglFrame,
+ pStream: *mut cuda_types::cuda::CUstream,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Get an eglFrame through which to access a registered EGL graphics resource.
Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource
@@ -20599,11 +20682,11 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsResourceGetMappedPointer,
::cudaGraphicsResourceGetMappedEglFrame*/
fn cuGraphicsResourceGetMappedEglFrame(
- eglFrame: *mut cuda_types::CUeglFrame,
- resource: cuda_types::CUgraphicsResource,
+ eglFrame: *mut cuda_types::cuda::CUeglFrame,
+ resource: cuda_types::cuda::CUgraphicsResource,
index: ::core::ffi::c_uint,
mipLevel: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Creates an event from EGLSync object
Creates an event *phEvent from an EGLSyncKHR eglSync with the flags specified
@@ -20639,10 +20722,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuEventSynchronize,
::cuEventDestroy*/
fn cuEventCreateFromEGLSync(
- phEvent: *mut cuda_types::CUevent,
- eglSync: cuda_types::EGLSyncKHR,
+ phEvent: *mut cuda_types::cuda::CUevent,
+ eglSync: cuda_types::cuda::EGLSyncKHR,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Gets the CUDA device associated with a VDPAU device
Returns in \p *pDevice the CUDA device associated with a \p vdpDevice, if
@@ -20666,10 +20749,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
::cudaVDPAUGetDevice*/
fn cuVDPAUGetDevice(
- pDevice: *mut cuda_types::CUdevice,
- vdpDevice: cuda_types::VdpDevice,
- vdpGetProcAddress: cuda_types::VdpGetProcAddress,
- ) -> cuda_types::CUresult;
+ pDevice: *mut cuda_types::cuda::CUdevice,
+ vdpDevice: cuda_types::cuda::VdpDevice,
+ vdpGetProcAddress: cuda_types::cuda::VdpGetProcAddress,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Create a CUDA context for interoperability with VDPAU
Creates a new CUDA context, initializes VDPAU interoperability, and
@@ -20699,12 +20782,12 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
::cuVDPAUGetDevice*/
fn cuVDPAUCtxCreate_v2(
- pCtx: *mut cuda_types::CUcontext,
+ pCtx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- device: cuda_types::CUdevice,
- vdpDevice: cuda_types::VdpDevice,
- vdpGetProcAddress: cuda_types::VdpGetProcAddress,
- ) -> cuda_types::CUresult;
+ device: cuda_types::cuda::CUdevice,
+ vdpDevice: cuda_types::cuda::VdpDevice,
+ vdpGetProcAddress: cuda_types::cuda::VdpGetProcAddress,
+ ) -> cuda_types::cuda::CUresult;
/** \brief Registers a VDPAU VdpVideoSurface object
Registers the VdpVideoSurface specified by \p vdpSurface for access by
@@ -20776,10 +20859,10 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuVDPAUGetDevice,
::cudaGraphicsVDPAURegisterVideoSurface*/
fn cuGraphicsVDPAURegisterVideoSurface(
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- vdpSurface: cuda_types::VdpVideoSurface,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ vdpSurface: cuda_types::cuda::VdpVideoSurface,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
/** \brief Registers a VDPAU VdpOutputSurface object
Registers the VdpOutputSurface specified by \p vdpSurface for access by
@@ -20838,15 +20921,15 @@ Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
::cuVDPAUGetDevice,
::cudaGraphicsVDPAURegisterOutputSurface*/
fn cuGraphicsVDPAURegisterOutputSurface(
- pCudaResource: *mut cuda_types::CUgraphicsResource,
- vdpSurface: cuda_types::VdpOutputSurface,
+ pCudaResource: *mut cuda_types::cuda::CUgraphicsResource,
+ vdpSurface: cuda_types::cuda::VdpOutputSurface,
flags: ::core::ffi::c_uint,
- ) -> cuda_types::CUresult;
+ ) -> cuda_types::cuda::CUresult;
fn cuVDPAUCtxCreate(
- pCtx: *mut cuda_types::CUcontext,
+ pCtx: *mut cuda_types::cuda::CUcontext,
flags: ::core::ffi::c_uint,
- device: cuda_types::CUdevice,
- vdpDevice: cuda_types::VdpDevice,
- vdpGetProcAddress: cuda_types::VdpGetProcAddress,
- ) -> cuda_types::CUresult;
+ device: cuda_types::cuda::CUdevice,
+ vdpDevice: cuda_types::cuda::VdpDevice,
+ vdpGetProcAddress: cuda_types::cuda::VdpGetProcAddress,
+ ) -> cuda_types::cuda::CUresult;
}
diff --git a/cuda_base/src/lib.rs b/cuda_base/src/lib.rs
index 833d372..58f5eae 100644
--- a/cuda_base/src/lib.rs
+++ b/cuda_base/src/lib.rs
@@ -14,6 +14,7 @@ use syn::{
};
const CUDA_RS: &'static str = include_str! {"cuda.rs"};
+const NVML_RS: &'static str = include_str! {"nvml.rs"};
// This macro accepts following arguments:
// * `normal_macro`: ident for a normal macro
@@ -31,9 +32,13 @@ const CUDA_RS: &'static str = include_str! {"cuda.rs"};
// Additionally, it does a fixup of CUDA types so they get prefixed with `type_path`
#[proc_macro]
pub fn cuda_function_declarations(tokens: TokenStream) -> TokenStream {
+ function_declarations(tokens, CUDA_RS)
+}
+
+fn function_declarations(tokens: TokenStream, module: &str) -> TokenStream {
let input = parse_macro_input!(tokens as FnDeclInput);
+ let mut cuda_module = syn::parse_str::<File>(module).unwrap();
let mut choose_macro = ChooseMacro::new(input);
- let mut cuda_module = syn::parse_str::<File>(CUDA_RS).unwrap();
syn::visit_mut::visit_file_mut(&mut FixFnSignatures, &mut cuda_module);
let extern_ = if let Item::ForeignMod(extern_) = cuda_module.items.pop().unwrap() {
extern_
@@ -68,6 +73,11 @@ pub fn cuda_function_declarations(tokens: TokenStream) -> TokenStream {
}
result.into()
}
+
+#[proc_macro]
+pub fn nvml_function_declarations(tokens: TokenStream) -> TokenStream {
+ function_declarations(tokens, NVML_RS)
+}
struct FnDeclInput {
normal_macro: Path,
overrides: Punctuated<OverrideMacro, Token![,]>,
@@ -193,6 +203,7 @@ fn join(fn_: Vec<String>, find_module: bool) -> Punctuated<Ident, Token![::]> {
"func" => &["function"],
"mem" => &["memory"],
"memcpy" => &["memory", "copy"],
+ "memset" => &["memory", "set"],
_ => return None,
})
}
diff --git a/cuda_base/src/nvml.rs b/cuda_base/src/nvml.rs
new file mode 100644
index 0000000..b89ef7a
--- /dev/null
+++ b/cuda_base/src/nvml.rs
@@ -0,0 +1,7857 @@
+// Generated automatically by zluda_bindgen
+// DO NOT EDIT MANUALLY
+#![allow(warnings)]
+extern "system" {
+ #[must_use]
+ /** Initialize NVML, but don't initialize any GPUs yet.
+
+ \note nvmlInit_v3 introduces a "flags" argument, that allows passing boolean values
+ modifying the behaviour of nvmlInit().
+ \note In NVML 5.319 new nvmlInit_v2 has replaced nvmlInit"_v1" (default in NVML 4.304 and older) that
+ did initialize all GPU devices in the system.
+
+ This allows NVML to communicate with a GPU
+ when other GPUs in the system are unstable or in a bad state. When using this API, GPUs are
+ discovered and initialized in nvmlDeviceGetHandleBy* functions instead.
+
+ \note To contrast nvmlInit_v2 with nvmlInit"_v1", NVML 4.304 nvmlInit"_v1" will fail when any detected GPU is in
+ a bad or unstable state.
+
+ For all products.
+
+ This method, should be called once before invoking any other methods in the library.
+ A reference count of the number of initializations is maintained. Shutdown only occurs
+ when the reference count reaches zero.
+
+ @return
+ - \ref NVML_SUCCESS if NVML has been properly initialized
+ - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running
+ - \ref NVML_ERROR_NO_PERMISSION if NVML does not have permission to talk to the driver
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlInit_v2() -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** nvmlInitWithFlags is a variant of nvmlInit(), that allows passing a set of boolean values
+ modifying the behaviour of nvmlInit().
+ Other than the "flags" parameter it is completely similar to \ref nvmlInit_v2.
+
+ For all products.
+
+ @param flags behaviour modifier flags
+
+ @return
+ - \ref NVML_SUCCESS if NVML has been properly initialized
+ - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running
+ - \ref NVML_ERROR_NO_PERMISSION if NVML does not have permission to talk to the driver
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlInitWithFlags(flags: ::core::ffi::c_uint) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Shut down NVML by releasing all GPU resources previously allocated with \ref nvmlInit_v2().
+
+ For all products.
+
+ This method should be called after NVML work is done, once for each call to \ref nvmlInit_v2()
+ A reference count of the number of initializations is maintained. Shutdown only occurs
+ when the reference count reaches zero. For backwards compatibility, no error is reported if
+ nvmlShutdown() is called more times than nvmlInit().
+
+ @return
+ - \ref NVML_SUCCESS if NVML has been properly shut down
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlShutdown() -> cuda_types::nvml::nvmlReturn_t;
+ /** Helper method for converting NVML error codes into readable strings.
+
+ For all products.
+
+ @param result NVML error code to convert
+
+ @return String representation of the error.
+*/
+ fn nvmlErrorString(
+ result: cuda_types::nvml::nvmlReturn_t,
+ ) -> *const ::core::ffi::c_char;
+ #[must_use]
+ /** Retrieves the version of the system's graphics driver.
+
+ For all products.
+
+ The version identifier is an alphanumeric string. It will not exceed 80 characters in length
+ (including the NULL terminator). See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE.
+
+ @param version Reference in which to return the version identifier
+ @param length The maximum allowed length of the string returned in \a version
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small*/
+ fn nvmlSystemGetDriverVersion(
+ version: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the version of the NVML library.
+
+ For all products.
+
+ The version identifier is an alphanumeric string. It will not exceed 80 characters in length
+ (including the NULL terminator). See \ref nvmlConstants::NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE.
+
+ @param version Reference in which to return the version identifier
+ @param length The maximum allowed length of the string returned in \a version
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small*/
+ fn nvmlSystemGetNVMLVersion(
+ version: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the version of the CUDA driver.
+
+ For all products.
+
+ The CUDA driver version returned will be retreived from the currently installed version of CUDA.
+ If the cuda library is not found, this function will return a known supported version number.
+
+ @param cudaDriverVersion Reference in which to return the version identifier
+
+ @return
+ - \ref NVML_SUCCESS if \a cudaDriverVersion has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a cudaDriverVersion is NULL*/
+ fn nvmlSystemGetCudaDriverVersion(
+ cudaDriverVersion: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the version of the CUDA driver from the shared library.
+
+ For all products.
+
+ The returned CUDA driver version by calling cuDriverGetVersion()
+
+ @param cudaDriverVersion Reference in which to return the version identifier
+
+ @return
+ - \ref NVML_SUCCESS if \a cudaDriverVersion has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a cudaDriverVersion is NULL
+ - \ref NVML_ERROR_LIBRARY_NOT_FOUND if \a libcuda.so.1 or libcuda.dll is not found
+ - \ref NVML_ERROR_FUNCTION_NOT_FOUND if \a cuDriverGetVersion() is not found in the shared library*/
+ fn nvmlSystemGetCudaDriverVersion_v2(
+ cudaDriverVersion: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets name of the process with provided process id
+
+ For all products.
+
+ Returned process name is cropped to provided length.
+ name string is encoded in ANSI.
+
+ @param pid The identifier of the process
+ @param name Reference in which to return the process name
+ @param length The maximum allowed length of the string returned in \a name
+
+ @return
+ - \ref NVML_SUCCESS if \a name has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a name is NULL or \a length is 0.
+ - \ref NVML_ERROR_NOT_FOUND if process doesn't exists
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlSystemGetProcessName(
+ pid: ::core::ffi::c_uint,
+ name: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system.
+
+ For S-class products.
+
+ The \a hwbcCount argument is expected to be set to the size of the input \a hwbcEntries array.
+ The HIC must be connected to an S-class system for it to be reported by this function.
+
+ @param hwbcCount Size of hwbcEntries array
+ @param hwbcEntries Array holding information about hwbc
+
+ @return
+ - \ref NVML_SUCCESS if \a hwbcCount and \a hwbcEntries have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if either \a hwbcCount or \a hwbcEntries is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a hwbcCount indicates that the \a hwbcEntries array is too small*/
+ fn nvmlSystemGetHicVersion(
+ hwbcCount: *mut ::core::ffi::c_uint,
+ hwbcEntries: *mut cuda_types::nvml::nvmlHwbcEntry_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the set of GPUs that have a CPU affinity with the given CPU number
+ For all products.
+ Supported on Linux only.
+
+ @param cpuNumber The CPU number
+ @param count When zero, is set to the number of matching GPUs such that \a deviceArray
+ can be malloc'd. When non-zero, \a deviceArray will be filled with \a count
+ number of device handles.
+ @param deviceArray An array of device handles for GPUs found with affinity to \a cpuNumber
+
+ @return
+ - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a cpuNumber, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature
+ - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery*/
+ fn nvmlSystemGetTopologyGpuSet(
+ cpuNumber: ::core::ffi::c_uint,
+ count: *mut ::core::ffi::c_uint,
+ deviceArray: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the number of units in the system.
+
+ For S-class products.
+
+ @param unitCount Reference in which to return the number of units
+
+ @return
+ - \ref NVML_SUCCESS if \a unitCount has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unitCount is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlUnitGetCount(
+ unitCount: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Acquire the handle for a particular unit, based on its index.
+
+ For S-class products.
+
+ Valid indices are derived from the \a unitCount returned by \ref nvmlUnitGetCount().
+ For example, if \a unitCount is 2 the valid indices are 0 and 1, corresponding to UNIT 0 and UNIT 1.
+
+ The order in which NVML enumerates units has no guarantees of consistency between reboots.
+
+ @param index The index of the target unit, >= 0 and < \a unitCount
+ @param unit Reference in which to return the unit handle
+
+ @return
+ - \ref NVML_SUCCESS if \a unit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a unit is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlUnitGetHandleByIndex(
+ index: ::core::ffi::c_uint,
+ unit: *mut cuda_types::nvml::nvmlUnit_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the static information associated with a unit.
+
+ For S-class products.
+
+ See \ref nvmlUnitInfo_t for details on available unit info.
+
+ @param unit The identifier of the target unit
+ @param info Reference in which to return the unit information
+
+ @return
+ - \ref NVML_SUCCESS if \a info has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a info is NULL*/
+ fn nvmlUnitGetUnitInfo(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ info: *mut cuda_types::nvml::nvmlUnitInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the LED state associated with this unit.
+
+ For S-class products.
+
+ See \ref nvmlLedState_t for details on allowed states.
+
+ @param unit The identifier of the target unit
+ @param state Reference in which to return the current LED state
+
+ @return
+ - \ref NVML_SUCCESS if \a state has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a state is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlUnitSetLedState()*/
+ fn nvmlUnitGetLedState(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ state: *mut cuda_types::nvml::nvmlLedState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the PSU stats for the unit.
+
+ For S-class products.
+
+ See \ref nvmlPSUInfo_t for details on available PSU info.
+
+ @param unit The identifier of the target unit
+ @param psu Reference in which to return the PSU information
+
+ @return
+ - \ref NVML_SUCCESS if \a psu has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a psu is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlUnitGetPsuInfo(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ psu: *mut cuda_types::nvml::nvmlPSUInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the temperature readings for the unit, in degrees C.
+
+ For S-class products.
+
+ Depending on the product, readings may be available for intake (type=0),
+ exhaust (type=1) and board (type=2).
+
+ @param unit The identifier of the target unit
+ @param type The type of reading to take
+ @param temp Reference in which to return the intake temperature
+
+ @return
+ - \ref NVML_SUCCESS if \a temp has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a type is invalid or \a temp is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlUnitGetTemperature(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ type_: ::core::ffi::c_uint,
+ temp: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the fan speed readings for the unit.
+
+ For S-class products.
+
+ See \ref nvmlUnitFanSpeeds_t for details on available fan speed info.
+
+ @param unit The identifier of the target unit
+ @param fanSpeeds Reference in which to return the fan speed information
+
+ @return
+ - \ref NVML_SUCCESS if \a fanSpeeds has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a fanSpeeds is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlUnitGetFanSpeedInfo(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ fanSpeeds: *mut cuda_types::nvml::nvmlUnitFanSpeeds_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the set of GPU devices that are attached to the specified unit.
+
+ For S-class products.
+
+ The \a deviceCount argument is expected to be set to the size of the input \a devices array.
+
+ @param unit The identifier of the target unit
+ @param deviceCount Reference in which to provide the \a devices array size, and
+ to return the number of attached GPU devices
+ @param devices Reference in which to return the references to the attached GPU devices
+
+ @return
+ - \ref NVML_SUCCESS if \a deviceCount and \a devices have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a deviceCount indicates that the \a devices array is too small
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid, either of \a deviceCount or \a devices is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlUnitGetDevices(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ deviceCount: *mut ::core::ffi::c_uint,
+ devices: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the number of compute devices in the system. A compute device is a single GPU.
+
+ For all products.
+
+ Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system
+ even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device.
+ Update your code to handle this error, or use NVML 4.304 or older nvml header file.
+ For backward binary compatibility reasons _v1 version of the API is still present in the shared
+ library.
+ Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to.
+
+ @param deviceCount Reference in which to return the number of accessible devices
+
+ @return
+ - \ref NVML_SUCCESS if \a deviceCount has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCount_v2(
+ deviceCount: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get attributes (engine counts etc.) for the given NVML device handle.
+
+ @note This API currently only supports MIG device handles.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device NVML device handle
+ @param attributes Device attributes
+
+ @return
+ - \ref NVML_SUCCESS if \a device attributes were successfully retrieved
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device handle is invalid
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetAttributes_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ attributes: *mut cuda_types::nvml::nvmlDeviceAttributes_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Acquire the handle for a particular device, based on its index.
+
+ For all products.
+
+ Valid indices are derived from the \a accessibleDevices count returned by
+ \ref nvmlDeviceGetCount_v2(). For example, if \a accessibleDevices is 2 the valid indices
+ are 0 and 1, corresponding to GPU 0 and GPU 1.
+
+ The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it
+ is recommended that devices be looked up by their PCI ids or UUID. See
+ \ref nvmlDeviceGetHandleByUUID() and \ref nvmlDeviceGetHandleByPciBusId_v2().
+
+ Note: The NVML index may not correlate with other APIs, such as the CUDA device index.
+
+ Starting from NVML 5, this API causes NVML to initialize the target GPU
+ NVML may initialize additional GPUs if:
+ - The target GPU is an SLI slave
+
+ Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system
+ even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device.
+ Update your code to handle this error, or use NVML 4.304 or older nvml header file.
+ For backward binary compatibility reasons _v1 version of the API is still present in the shared
+ library.
+ Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to.
+
+ This means that nvmlDeviceGetHandleByIndex_v2 and _v1 can return different devices for the same index.
+ If you don't touch macros that map old (_v1) versions to _v2 versions at the top of the file you don't
+ need to worry about that.
+
+ @param index The index of the target GPU, >= 0 and < \a accessibleDevices
+ @param device Reference in which to return the device handle
+
+ @return
+ - \ref NVML_SUCCESS if \a device has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a device is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device
+ - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetIndex
+ @see nvmlDeviceGetCount*/
+ fn nvmlDeviceGetHandleByIndex_v2(
+ index: ::core::ffi::c_uint,
+ device: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Acquire the handle for a particular device, based on its board serial number.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ This number corresponds to the value printed directly on the board, and to the value returned by
+ \ref nvmlDeviceGetSerial().
+
+ @deprecated Since more than one GPU can exist on a single board this function is deprecated in favor
+ of \ref nvmlDeviceGetHandleByUUID.
+ For dual GPU boards this function will return NVML_ERROR_INVALID_ARGUMENT.
+
+ Starting from NVML 5, this API causes NVML to initialize the target GPU
+ NVML may initialize additional GPUs as it searches for the target GPU
+
+ @param serial The board serial number of the target GPU
+ @param device Reference in which to return the device handle
+
+ @return
+ - \ref NVML_SUCCESS if \a device has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a serial is invalid, \a device is NULL or more than one
+ device has the same serial (dual GPU boards)
+ - \ref NVML_ERROR_NOT_FOUND if \a serial does not match a valid device on the system
+ - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables
+ - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs
+ - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetSerial
+ @see nvmlDeviceGetHandleByUUID*/
+ fn nvmlDeviceGetHandleBySerial(
+ serial: *const ::core::ffi::c_char,
+ device: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Acquire the handle for a particular device, based on its globally unique immutable UUID associated with each device.
+
+ For all products.
+
+ @param uuid The UUID of the target GPU or MIG instance
+ @param device Reference in which to return the device handle or MIG device handle
+
+ Starting from NVML 5, this API causes NVML to initialize the target GPU
+ NVML may initialize additional GPUs as it searches for the target GPU
+
+ @return
+ - \ref NVML_SUCCESS if \a device has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a uuid is invalid or \a device is null
+ - \ref NVML_ERROR_NOT_FOUND if \a uuid does not match a valid device on the system
+ - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables
+ - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs
+ - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetUUID*/
+ fn nvmlDeviceGetHandleByUUID(
+ uuid: *const ::core::ffi::c_char,
+ device: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Acquire the handle for a particular device, based on its PCI bus id.
+
+ For all products.
+
+ This value corresponds to the nvmlPciInfo_t::busId returned by \ref nvmlDeviceGetPciInfo_v3().
+
+ Starting from NVML 5, this API causes NVML to initialize the target GPU
+ NVML may initialize additional GPUs if:
+ - The target GPU is an SLI slave
+
+ \note NVML 4.304 and older version of nvmlDeviceGetHandleByPciBusId"_v1" returns NVML_ERROR_NOT_FOUND
+ instead of NVML_ERROR_NO_PERMISSION.
+
+ @param pciBusId The PCI bus id of the target GPU
+ @param device Reference in which to return the device handle
+
+ @return
+ - \ref NVML_SUCCESS if \a device has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciBusId is invalid or \a device is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a pciBusId does not match a valid device on the system
+ - \ref NVML_ERROR_INSUFFICIENT_POWER if the attached device has improperly attached external power cables
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device
+ - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetHandleByPciBusId_v2(
+ pciBusId: *const ::core::ffi::c_char,
+ device: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the name of this device.
+
+ For all products.
+
+ The name is an alphanumeric string that denotes a particular product, e.g. Tesla &tm; C2070. It will not
+ exceed 96 characters in length (including the NULL terminator). See \ref
+ nvmlConstants::NVML_DEVICE_NAME_V2_BUFFER_SIZE.
+
+ When used with MIG device handles the API returns MIG device names which can be used to identify devices
+ based on their attributes.
+
+ @param device The identifier of the target device
+ @param name Reference in which to return the product name
+ @param length The maximum allowed length of the string returned in \a name
+
+ @return
+ - \ref NVML_SUCCESS if \a name has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a name is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetName(
+ device: cuda_types::nvml::nvmlDevice_t,
+ name: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the brand of this device.
+
+ For all products.
+
+ The type is a member of \ref nvmlBrandType_t defined above.
+
+ @param device The identifier of the target device
+ @param type Reference in which to return the product brand type
+
+ @return
+ - \ref NVML_SUCCESS if \a name has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a type is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetBrand(
+ device: cuda_types::nvml::nvmlDevice_t,
+ type_: *mut cuda_types::nvml::nvmlBrandType_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the NVML index of this device.
+
+ For all products.
+
+ Valid indices are derived from the \a accessibleDevices count returned by
+ \ref nvmlDeviceGetCount_v2(). For example, if \a accessibleDevices is 2 the valid indices
+ are 0 and 1, corresponding to GPU 0 and GPU 1.
+
+ The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it
+ is recommended that devices be looked up by their PCI ids or GPU UUID. See
+ \ref nvmlDeviceGetHandleByPciBusId_v2() and \ref nvmlDeviceGetHandleByUUID().
+
+ When used with MIG device handles this API returns indices that can be
+ passed to \ref nvmlDeviceGetMigDeviceHandleByIndex to retrieve an identical handle.
+ MIG device indices are unique within a device.
+
+ Note: The NVML index may not correlate with other APIs, such as the CUDA device index.
+
+ @param device The identifier of the target device
+ @param index Reference in which to return the NVML index of the device
+
+ @return
+ - \ref NVML_SUCCESS if \a index has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a index is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetHandleByIndex()
+ @see nvmlDeviceGetCount()*/
+ fn nvmlDeviceGetIndex(
+ device: cuda_types::nvml::nvmlDevice_t,
+ index: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the globally unique board serial number associated with this device's board.
+
+ For all products with an inforom.
+
+ The serial number is an alphanumeric string that will not exceed 30 characters (including the NULL terminator).
+ This number matches the serial number tag that is physically attached to the board. See \ref
+ nvmlConstants::NVML_DEVICE_SERIAL_BUFFER_SIZE.
+
+ @param device The identifier of the target device
+ @param serial Reference in which to return the board/module serial number
+ @param length The maximum allowed length of the string returned in \a serial
+
+ @return
+ - \ref NVML_SUCCESS if \a serial has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a serial is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetSerial(
+ device: cuda_types::nvml::nvmlDevice_t,
+ serial: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ fn nvmlDeviceGetModuleId(
+ device: cuda_types::nvml::nvmlDevice_t,
+ moduleId: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the Device's C2C Mode information
+
+ @param device The identifier of the target device
+ @param c2cModeInfo Output struct containing the device's C2C Mode info
+
+ @return
+ - \ref NVML_SUCCESS if \a C2C Mode Infor query is successful
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a serial is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetC2cModeInfoV(
+ device: cuda_types::nvml::nvmlDevice_t,
+ c2cModeInfo: *mut cuda_types::nvml::nvmlC2cModeInfo_v1_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves an array of unsigned ints (sized to nodeSetSize) of bitmasks with
+ the ideal memory affinity within node or socket for the device.
+ For example, if NUMA node 0, 1 are ideal within the socket for the device and nodeSetSize == 1,
+ result[0] = 0x3
+
+ \note If requested scope is not applicable to the target topology, the API
+ will fall back to reporting the memory affinity for the immediate non-I/O
+ ancestor of the device.
+
+ For Kepler &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+ @param nodeSetSize The size of the nodeSet array that is safe to access
+ @param nodeSet Array reference in which to return a bitmask of NODEs, 64 NODEs per
+ unsigned long on 64-bit machines, 32 on 32-bit machines
+ @param scope Scope that change the default behavior
+
+ @return
+ - \ref NVML_SUCCESS if \a NUMA node Affinity has been filled
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, nodeSetSize == 0, nodeSet is NULL or scope is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMemoryAffinity(
+ device: cuda_types::nvml::nvmlDevice_t,
+ nodeSetSize: ::core::ffi::c_uint,
+ nodeSet: *mut ::core::ffi::c_ulong,
+ scope: cuda_types::nvml::nvmlAffinityScope_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves an array of unsigned ints (sized to cpuSetSize) of bitmasks with the
+ ideal CPU affinity within node or socket for the device.
+ For example, if processors 0, 1, 32, and 33 are ideal for the device and cpuSetSize == 2,
+ result[0] = 0x3, result[1] = 0x3
+
+ \note If requested scope is not applicable to the target topology, the API
+ will fall back to reporting the CPU affinity for the immediate non-I/O
+ ancestor of the device.
+
+ For Kepler &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+ @param cpuSetSize The size of the cpuSet array that is safe to access
+ @param cpuSet Array reference in which to return a bitmask of CPUs, 64 CPUs per
+ unsigned long on 64-bit machines, 32 on 32-bit machines
+ @param scope Scope that change the default behavior
+
+ @return
+ - \ref NVML_SUCCESS if \a cpuAffinity has been filled
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, cpuSetSize == 0, cpuSet is NULL or sope is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCpuAffinityWithinScope(
+ device: cuda_types::nvml::nvmlDevice_t,
+ cpuSetSize: ::core::ffi::c_uint,
+ cpuSet: *mut ::core::ffi::c_ulong,
+ scope: cuda_types::nvml::nvmlAffinityScope_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves an array of unsigned ints (sized to cpuSetSize) of bitmasks with the ideal CPU affinity for the device
+ For example, if processors 0, 1, 32, and 33 are ideal for the device and cpuSetSize == 2,
+ result[0] = 0x3, result[1] = 0x3
+ This is equivalent to calling \ref nvmlDeviceGetCpuAffinityWithinScope with \ref NVML_AFFINITY_SCOPE_NODE.
+
+ For Kepler &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+ @param cpuSetSize The size of the cpuSet array that is safe to access
+ @param cpuSet Array reference in which to return a bitmask of CPUs, 64 CPUs per
+ unsigned long on 64-bit machines, 32 on 32-bit machines
+
+ @return
+ - \ref NVML_SUCCESS if \a cpuAffinity has been filled
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, cpuSetSize == 0, or cpuSet is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCpuAffinity(
+ device: cuda_types::nvml::nvmlDevice_t,
+ cpuSetSize: ::core::ffi::c_uint,
+ cpuSet: *mut ::core::ffi::c_ulong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets the ideal affinity for the calling thread and device using the guidelines
+ given in nvmlDeviceGetCpuAffinity(). Note, this is a change as of version 8.0.
+ Older versions set the affinity for a calling process and all children.
+ Currently supports up to 1024 processors.
+
+ For Kepler &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if the calling process has been successfully bound
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetCpuAffinity(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Clear all affinity bindings for the calling thread. Note, this is a change as of version
+ 8.0 as older versions cleared the affinity for a calling process and all children.
+
+ For Kepler &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if the calling process has been successfully unbound
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceClearCpuAffinity(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the NUMA node of the given GPU device.
+ This only applies to platforms where the GPUs are NUMA nodes.
+
+ @param[in] device The device handle
+ @param[out] node NUMA node ID of the device
+
+ @returns
+ - \ref NVML_SUCCESS if the NUMA node is retrieved successfully
+ - \ref NVML_ERROR_NOT_SUPPORTED if request is not supported on the current platform
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device \a node is invalid*/
+ fn nvmlDeviceGetNumaNodeId(
+ device: cuda_types::nvml::nvmlDevice_t,
+ node: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /// @}
+ fn nvmlDeviceGetTopologyCommonAncestor(
+ device1: cuda_types::nvml::nvmlDevice_t,
+ device2: cuda_types::nvml::nvmlDevice_t,
+ pathInfo: *mut cuda_types::nvml::nvmlGpuTopologyLevel_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level
+ For all products.
+ Supported on Linux only.
+
+ @param device The identifier of the first device
+ @param level The \ref nvmlGpuTopologyLevel_t level to search for other GPUs
+ @param count When zero, is set to the number of matching GPUs such that \a deviceArray
+ can be malloc'd. When non-zero, \a deviceArray will be filled with \a count
+ number of device handles.
+ @param deviceArray An array of device handles for GPUs found at \a level
+
+ @return
+ - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a level, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature
+ - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery*/
+ fn nvmlDeviceGetTopologyNearestGpus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ level: cuda_types::nvml::nvmlGpuTopologyLevel_t,
+ count: *mut ::core::ffi::c_uint,
+ deviceArray: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the status for a given p2p capability index between a given pair of GPU
+
+ @param device1 The first device
+ @param device2 The second device
+ @param p2pIndex p2p Capability Index being looked for between \a device1 and \a device2
+ @param p2pStatus Reference in which to return the status of the \a p2pIndex
+ between \a device1 and \a device2
+ @return
+ - \ref NVML_SUCCESS if \a p2pStatus has been populated
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1 or \a device2 or \a p2pIndex is invalid or \a p2pStatus is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetP2PStatus(
+ device1: cuda_types::nvml::nvmlDevice_t,
+ device2: cuda_types::nvml::nvmlDevice_t,
+ p2pIndex: cuda_types::nvml::nvmlGpuP2PCapsIndex_t,
+ p2pStatus: *mut cuda_types::nvml::nvmlGpuP2PStatus_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the globally unique immutable UUID associated with this device, as a 5 part hexadecimal string,
+ that augments the immutable, board serial identifier.
+
+ For all products.
+
+ The UUID is a globally unique identifier. It is the only available identifier for pre-Fermi-architecture products.
+ It does NOT correspond to any identifier printed on the board. It will not exceed 96 characters in length
+ (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_UUID_V2_BUFFER_SIZE.
+
+ When used with MIG device handles the API returns globally unique UUIDs which can be used to identify MIG
+ devices across both GPU and MIG devices. UUIDs are immutable for the lifetime of a MIG device.
+
+ @param device The identifier of the target device
+ @param uuid Reference in which to return the GPU UUID
+ @param length The maximum allowed length of the string returned in \a uuid
+
+ @return
+ - \ref NVML_SUCCESS if \a uuid has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a uuid is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetUUID(
+ device: cuda_types::nvml::nvmlDevice_t,
+ uuid: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for
+ each GPU will have the form /dev/nvidia[minor number].
+
+ For all products.
+ Supported only for Linux
+
+ @param device The identifier of the target device
+ @param minorNumber Reference in which to return the minor number for the device
+ @return
+ - \ref NVML_SUCCESS if the minor number is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minorNumber is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMinorNumber(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minorNumber: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the the device board part number which is programmed into the board's InfoROM
+
+ For all products.
+
+ @param device Identifier of the target device
+ @param partNumber Reference to the buffer to return
+ @param length Length of the buffer reference
+
+ @return
+ - \ref NVML_SUCCESS if \a partNumber has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NOT_SUPPORTED if the needed VBIOS fields have not been filled
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a serial is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetBoardPartNumber(
+ device: cuda_types::nvml::nvmlDevice_t,
+ partNumber: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the version information for the device's infoROM object.
+
+ For all products with an inforom.
+
+ Fermi and higher parts have non-volatile on-board memory for persisting device info, such as aggregate
+ ECC counts. The version of the data structures in this memory may change from time to time. It will not
+ exceed 16 characters in length (including the NULL terminator).
+ See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE.
+
+ See \ref nvmlInforomObject_t for details on the available infoROM objects.
+
+ @param device The identifier of the target device
+ @param object The target infoROM object
+ @param version Reference in which to return the infoROM version
+ @param length The maximum allowed length of the string returned in \a version
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetInforomImageVersion*/
+ fn nvmlDeviceGetInforomVersion(
+ device: cuda_types::nvml::nvmlDevice_t,
+ object: cuda_types::nvml::nvmlInforomObject_t,
+ version: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the global infoROM image version
+
+ For all products with an inforom.
+
+ Image version just like VBIOS version uniquely describes the exact version of the infoROM flashed on the board
+ in contrast to infoROM object version which is only an indicator of supported features.
+ Version string will not exceed 16 characters in length (including the NULL terminator).
+ See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE.
+
+ @param device The identifier of the target device
+ @param version Reference in which to return the infoROM image version
+ @param length The maximum allowed length of the string returned in \a version
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetInforomVersion*/
+ fn nvmlDeviceGetInforomImageVersion(
+ device: cuda_types::nvml::nvmlDevice_t,
+ version: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the checksum of the configuration stored in the device's infoROM.
+
+ For all products with an inforom.
+
+ Can be used to make sure that two GPUs have the exact same configuration.
+ Current checksum takes into account configuration stored in PWR and ECC infoROM objects.
+ Checksum can change between driver releases or when user changes configuration (e.g. disable/enable ECC)
+
+ @param device The identifier of the target device
+ @param checksum Reference in which to return the infoROM configuration checksum
+
+ @return
+ - \ref NVML_SUCCESS if \a checksum has been set
+ - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's checksum couldn't be retrieved due to infoROM corruption
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a checksum is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetInforomConfigurationChecksum(
+ device: cuda_types::nvml::nvmlDevice_t,
+ checksum: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Reads the infoROM from the flash and verifies the checksums.
+
+ For all products with an inforom.
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if infoROM is not corrupted
+ - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's infoROM is corrupted
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceValidateInforom(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the timestamp and the duration of the last flush of the BBX (blackbox) infoROM object during the current run.
+
+ For all products with an inforom.
+
+ @param device The identifier of the target device
+ @param timestamp The start timestamp of the last BBX Flush
+ @param durationUs The duration (us) of the last BBX Flush
+
+ @return
+ - \ref NVML_SUCCESS if \a timestamp and \a durationUs are successfully retrieved
+ - \ref NVML_ERROR_NOT_READY if the BBX object has not been flushed yet
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetInforomVersion*/
+ fn nvmlDeviceGetLastBBXFlushTime(
+ device: cuda_types::nvml::nvmlDevice_t,
+ timestamp: *mut ::core::ffi::c_ulonglong,
+ durationUs: *mut ::core::ffi::c_ulong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the display mode for the device.
+
+ For all products.
+
+ This method indicates whether a physical display (e.g. monitor) is currently connected to
+ any of the device's connectors.
+
+ See \ref nvmlEnableState_t for details on allowed modes.
+
+ @param device The identifier of the target device
+ @param display Reference in which to return the display mode
+
+ @return
+ - \ref NVML_SUCCESS if \a display has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a display is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetDisplayMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ display: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the display active state for the device.
+
+ For all products.
+
+ This method indicates whether a display is initialized on the device.
+ For example whether X Server is attached to this device and has allocated memory for the screen.
+
+ Display can be active even when no monitor is physically attached.
+
+ See \ref nvmlEnableState_t for details on allowed modes.
+
+ @param device The identifier of the target device
+ @param isActive Reference in which to return the display active state
+
+ @return
+ - \ref NVML_SUCCESS if \a isActive has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isActive is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetDisplayActive(
+ device: cuda_types::nvml::nvmlDevice_t,
+ isActive: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the persistence mode associated with this device.
+
+ For all products.
+ For Linux only.
+
+ When driver persistence mode is enabled the driver software state is not torn down when the last
+ client disconnects. By default this feature is disabled.
+
+ See \ref nvmlEnableState_t for details on allowed modes.
+
+ @param device The identifier of the target device
+ @param mode Reference in which to return the current driver persistence mode
+
+ @return
+ - \ref NVML_SUCCESS if \a mode has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetPersistenceMode()*/
+ fn nvmlDeviceGetPersistenceMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves PCI attributes of this device.
+
+ For all products.
+
+ See \ref nvmlPciInfoExt_t for details on the available PCI info.
+
+ @param device The identifier of the target device
+ @param pci Reference in which to return the PCI info
+
+ @return
+ - \ref NVML_SUCCESS if \a pci has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPciInfoExt(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pci: *mut cuda_types::nvml::nvmlPciInfoExt_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the PCI attributes of this device.
+
+ For all products.
+
+ See \ref nvmlPciInfo_t for details on the available PCI info.
+
+ @param device The identifier of the target device
+ @param pci Reference in which to return the PCI info
+
+ @return
+ - \ref NVML_SUCCESS if \a pci has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPciInfo_v3(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pci: *mut cuda_types::nvml::nvmlPciInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the maximum PCIe link generation possible with this device and system
+
+ I.E. for a generation 2 PCIe device attached to a generation 1 PCIe bus the max link generation this function will
+ report is generation 1.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param maxLinkGen Reference in which to return the max PCIe link generation
+
+ @return
+ - \ref NVML_SUCCESS if \a maxLinkGen has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkGen is null
+ - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMaxPcieLinkGeneration(
+ device: cuda_types::nvml::nvmlDevice_t,
+ maxLinkGen: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the maximum PCIe link generation supported by this device
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param maxLinkGenDevice Reference in which to return the max PCIe link generation
+
+ @return
+ - \ref NVML_SUCCESS if \a maxLinkGenDevice has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkGenDevice is null
+ - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGpuMaxPcieLinkGeneration(
+ device: cuda_types::nvml::nvmlDevice_t,
+ maxLinkGenDevice: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the maximum PCIe link width possible with this device and system
+
+ I.E. for a device with a 16x PCIe bus width attached to a 8x PCIe system bus this function will report
+ a max link width of 8.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param maxLinkWidth Reference in which to return the max PCIe link generation
+
+ @return
+ - \ref NVML_SUCCESS if \a maxLinkWidth has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkWidth is null
+ - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMaxPcieLinkWidth(
+ device: cuda_types::nvml::nvmlDevice_t,
+ maxLinkWidth: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current PCIe link generation
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param currLinkGen Reference in which to return the current PCIe link generation
+
+ @return
+ - \ref NVML_SUCCESS if \a currLinkGen has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkGen is null
+ - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCurrPcieLinkGeneration(
+ device: cuda_types::nvml::nvmlDevice_t,
+ currLinkGen: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current PCIe link width
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param currLinkWidth Reference in which to return the current PCIe link generation
+
+ @return
+ - \ref NVML_SUCCESS if \a currLinkWidth has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkWidth is null
+ - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCurrPcieLinkWidth(
+ device: cuda_types::nvml::nvmlDevice_t,
+ currLinkWidth: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve PCIe utilization information.
+ This function is querying a byte counter over a 20ms interval and thus is the
+ PCIe throughput over that interval.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ This method is not supported in virtual machines running virtual GPU (vGPU).
+
+ @param device The identifier of the target device
+ @param counter The specific counter that should be queried \ref nvmlPcieUtilCounter_t
+ @param value Reference in which to return throughput in KB/s
+
+ @return
+ - \ref NVML_SUCCESS if \a value has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a counter is invalid, or \a value is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPcieThroughput(
+ device: cuda_types::nvml::nvmlDevice_t,
+ counter: cuda_types::nvml::nvmlPcieUtilCounter_t,
+ value: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the PCIe replay counter.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param value Reference in which to return the counter's value
+
+ @return
+ - \ref NVML_SUCCESS if \a value has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a value is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPcieReplayCounter(
+ device: cuda_types::nvml::nvmlDevice_t,
+ value: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current clock speeds for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ See \ref nvmlClockType_t for details on available clock information.
+
+ @param device The identifier of the target device
+ @param type Identify which clock domain to query
+ @param clock Reference in which to return the clock speed in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a clock has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetClockInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ type_: cuda_types::nvml::nvmlClockType_t,
+ clock: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the maximum clock speeds for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ See \ref nvmlClockType_t for details on available clock information.
+
+ \note On GPUs from Fermi family current P0 clocks (reported by \ref nvmlDeviceGetClockInfo) can differ from max clocks
+ by few MHz.
+
+ @param device The identifier of the target device
+ @param type Identify which clock domain to query
+ @param clock Reference in which to return the clock speed in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a clock has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMaxClockInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ type_: cuda_types::nvml::nvmlClockType_t,
+ clock: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the GPCCLK VF offset value
+ @param[in] device The identifier of the target device
+ @param[out] offset The retrieved GPCCLK VF offset value
+
+ @return
+ - \ref NVML_SUCCESS if \a offset has been successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGpcClkVfOffset(
+ device: cuda_types::nvml::nvmlDevice_t,
+ offset: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current setting of a clock that applications will use unless an overspec situation occurs.
+ Can be changed using \ref nvmlDeviceSetApplicationsClocks.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param clockType Identify which clock domain to query
+ @param clockMHz Reference in which to return the clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a clockMHz has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetApplicationsClock(
+ device: cuda_types::nvml::nvmlDevice_t,
+ clockType: cuda_types::nvml::nvmlClockType_t,
+ clockMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the default applications clock that GPU boots with or
+ defaults to after \ref nvmlDeviceResetApplicationsClocks call.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param clockType Identify which clock domain to query
+ @param clockMHz Reference in which to return the default clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a clockMHz has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ \see nvmlDeviceGetApplicationsClock*/
+ fn nvmlDeviceGetDefaultApplicationsClock(
+ device: cuda_types::nvml::nvmlDevice_t,
+ clockType: cuda_types::nvml::nvmlClockType_t,
+ clockMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the clock speed for the clock specified by the clock type and clock ID.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param clockType Identify which clock domain to query
+ @param clockId Identify which clock in the domain to query
+ @param clockMHz Reference in which to return the clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a clockMHz has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetClock(
+ device: cuda_types::nvml::nvmlDevice_t,
+ clockType: cuda_types::nvml::nvmlClockType_t,
+ clockId: cuda_types::nvml::nvmlClockId_t,
+ clockMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the customer defined maximum boost clock speed specified by the given clock type.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param clockType Identify which clock domain to query
+ @param clockMHz Reference in which to return the clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a clockMHz has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device or the \a clockType on this device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMaxCustomerBoostClock(
+ device: cuda_types::nvml::nvmlDevice_t,
+ clockType: cuda_types::nvml::nvmlClockType_t,
+ clockMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the list of possible memory clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param count Reference in which to provide the \a clocksMHz array size, and
+ to return the number of elements
+ @param clocksMHz Reference in which to return the clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to the number of
+ required elements)
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetApplicationsClocks
+ @see nvmlDeviceGetSupportedGraphicsClocks*/
+ fn nvmlDeviceGetSupportedMemoryClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ count: *mut ::core::ffi::c_uint,
+ clocksMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the list of possible graphics clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param memoryClockMHz Memory clock for which to return possible graphics clocks
+ @param count Reference in which to provide the \a clocksMHz array size, and
+ to return the number of elements
+ @param clocksMHz Reference in which to return the clocks in MHz
+
+ @return
+ - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NOT_FOUND if the specified \a memoryClockMHz is not a supported frequency
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetApplicationsClocks
+ @see nvmlDeviceGetSupportedMemoryClocks*/
+ fn nvmlDeviceGetSupportedGraphicsClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ memoryClockMHz: ::core::ffi::c_uint,
+ count: *mut ::core::ffi::c_uint,
+ clocksMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the current state of Auto Boosted clocks on a device and store it in \a isEnabled
+
+ For Kepler &tm; or newer fully supported devices.
+
+ Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates
+ to maximize performance as thermal limits allow.
+
+ On Pascal and newer hardware, Auto Aoosted clocks are controlled through application clocks.
+ Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost
+ behavior.
+
+ @param device The identifier of the target device
+ @param isEnabled Where to store the current state of Auto Boosted clocks of the target device
+ @param defaultIsEnabled Where to store the default Auto Boosted clocks behavior of the target device that the device will
+ revert to when no applications are using the GPU
+
+ @return
+ - \ref NVML_SUCCESS If \a isEnabled has been been set with the Auto Boosted clocks state of \a device
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isEnabled is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+*/
+ fn nvmlDeviceGetAutoBoostedClocksEnabled(
+ device: cuda_types::nvml::nvmlDevice_t,
+ isEnabled: *mut cuda_types::nvml::nvmlEnableState_t,
+ defaultIsEnabled: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the intended operating speed of the device's fan.
+
+ Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the
+ output will not match the actual fan speed.
+
+ For all discrete products with dedicated fans.
+
+ The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed.
+ This value may exceed 100% in certain cases.
+
+ @param device The identifier of the target device
+ @param speed Reference in which to return the fan speed percentage
+
+ @return
+ - \ref NVML_SUCCESS if \a speed has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a speed is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetFanSpeed(
+ device: cuda_types::nvml::nvmlDevice_t,
+ speed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the intended operating speed of the device's specified fan.
+
+ Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the
+ output will not match the actual fan speed.
+
+ For all discrete products with dedicated fans.
+
+ The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed.
+ This value may exceed 100% in certain cases.
+
+ @param device The identifier of the target device
+ @param fan The index of the target fan, zero indexed.
+ @param speed Reference in which to return the fan speed percentage
+
+ @return
+ - \ref NVML_SUCCESS if \a speed has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a fan is not an acceptable index, or \a speed is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan or is newer than Maxwell
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetFanSpeed_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fan: ::core::ffi::c_uint,
+ speed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the intended target speed of the device's specified fan.
+
+ Normally, the driver dynamically adjusts the fan based on
+ the needs of the GPU. But when user set fan speed using nvmlDeviceSetFanSpeed_v2,
+ the driver will attempt to make the fan achieve the setting in
+ nvmlDeviceSetFanSpeed_v2. The actual current speed of the fan
+ is reported in nvmlDeviceGetFanSpeed_v2.
+
+ For all discrete products with dedicated fans.
+
+ The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed.
+ This value may exceed 100% in certain cases.
+
+ @param device The identifier of the target device
+ @param fan The index of the target fan, zero indexed.
+ @param targetSpeed Reference in which to return the fan speed percentage
+
+ @return
+ - \ref NVML_SUCCESS if \a speed has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a fan is not an acceptable index, or \a speed is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan or is newer than Maxwell
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetTargetFanSpeed(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fan: ::core::ffi::c_uint,
+ targetSpeed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the min and max fan speed that user can set for the GPU fan.
+
+ For all cuda-capable discrete products with fans
+
+ @param device The identifier of the target device
+ @param minSpeed The minimum speed allowed to set
+ @param maxSpeed The maximum speed allowed to set
+
+ return
+ NVML_SUCCESS if speed has been adjusted
+ NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ NVML_ERROR_INVALID_ARGUMENT if device is invalid
+ NVML_ERROR_NOT_SUPPORTED if the device does not support this
+ (doesn't have fans)
+ NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMinMaxFanSpeed(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minSpeed: *mut ::core::ffi::c_uint,
+ maxSpeed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets current fan control policy.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ For all cuda-capable discrete products with fans
+
+ device The identifier of the target \a device
+ policy Reference in which to return the fan control \a policy
+
+ return
+ NVML_SUCCESS if \a policy has been populated
+ NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a policy is null or the \a fan given doesn't reference
+ a fan that exists.
+ NVML_ERROR_NOT_SUPPORTED if the \a device is older than Maxwell
+ NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetFanControlPolicy_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fan: ::core::ffi::c_uint,
+ policy: *mut cuda_types::nvml::nvmlFanControlPolicy_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the number of fans on the device.
+
+ For all discrete products with dedicated fans.
+
+ @param device The identifier of the target device
+ @param numFans The number of fans
+
+ @return
+ - \ref NVML_SUCCESS if \a fan number query was successful
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a numFans is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNumFans(
+ device: cuda_types::nvml::nvmlDevice_t,
+ numFans: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current temperature readings for the device, in degrees C.
+
+ For all products.
+
+ See \ref nvmlTemperatureSensors_t for details on available temperature sensors.
+
+ @param device The identifier of the target device
+ @param sensorType Flag that indicates which sensor reading to retrieve
+ @param temp Reference in which to return the temperature reading
+
+ @return
+ - \ref NVML_SUCCESS if \a temp has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a sensorType is invalid or \a temp is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have the specified sensor
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetTemperature(
+ device: cuda_types::nvml::nvmlDevice_t,
+ sensorType: cuda_types::nvml::nvmlTemperatureSensors_t,
+ temp: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds.
+
+ Note: This API is no longer the preferred interface for retrieving the following temperature thresholds
+ on Ada and later architectures: NVML_TEMPERATURE_THRESHOLD_SHUTDOWN, NVML_TEMPERATURE_THRESHOLD_SLOWDOWN,
+ NVML_TEMPERATURE_THRESHOLD_MEM_MAX and NVML_TEMPERATURE_THRESHOLD_GPU_MAX.
+
+ Support for reading these temperature thresholds for Ada and later architectures would be removed from this
+ API in future releases. Please use \ref nvmlDeviceGetFieldValues with NVML_FI_DEV_TEMPERATURE_* fields to retrieve
+ temperature thresholds on these architectures.
+
+ @param device The identifier of the target device
+ @param thresholdType The type of threshold value queried
+ @param temp Reference in which to return the temperature reading
+ @return
+ - \ref NVML_SUCCESS if \a temp has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetTemperatureThreshold(
+ device: cuda_types::nvml::nvmlDevice_t,
+ thresholdType: cuda_types::nvml::nvmlTemperatureThresholds_t,
+ temp: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Used to execute a list of thermal system instructions.
+
+ @param device The identifier of the target device
+ @param sensorIndex The index of the thermal sensor
+ @param pThermalSettings Reference in which to return the thermal sensor information
+
+ @return
+ - \ref NVML_SUCCESS if \a pThermalSettings has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pThermalSettings is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetThermalSettings(
+ device: cuda_types::nvml::nvmlDevice_t,
+ sensorIndex: ::core::ffi::c_uint,
+ pThermalSettings: *mut cuda_types::nvml::nvmlGpuThermalSettings_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current performance state for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ See \ref nvmlPstates_t for details on allowed performance states.
+
+ @param device The identifier of the target device
+ @param pState Reference in which to return the performance state reading
+
+ @return
+ - \ref NVML_SUCCESS if \a pState has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPerformanceState(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pState: *mut cuda_types::nvml::nvmlPstates_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves current clocks event reasons.
+
+ For all fully supported products.
+
+ \note More than one bit can be enabled at the same time. Multiple reasons can be affecting clocks at once.
+
+ @param device The identifier of the target device
+ @param clocksEventReasons Reference in which to return bitmask of active clocks event
+ reasons
+
+ @return
+ - \ref NVML_SUCCESS if \a clocksEventReasons has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clocksEventReasons is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlClocksEventReasons
+ @see nvmlDeviceGetSupportedClocksEventReasons*/
+ fn nvmlDeviceGetCurrentClocksEventReasons(
+ device: cuda_types::nvml::nvmlDevice_t,
+ clocksEventReasons: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /// @deprecated Use \ref nvmlDeviceGetCurrentClocksEventReasons instead
+ fn nvmlDeviceGetCurrentClocksThrottleReasons(
+ device: cuda_types::nvml::nvmlDevice_t,
+ clocksThrottleReasons: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves bitmask of supported clocks event reasons that can be returned by
+ \ref nvmlDeviceGetCurrentClocksEventReasons
+
+ For all fully supported products.
+
+ This method is not supported in virtual machines running virtual GPU (vGPU).
+
+ @param device The identifier of the target device
+ @param supportedClocksEventReasons Reference in which to return bitmask of supported
+ clocks event reasons
+
+ @return
+ - \ref NVML_SUCCESS if \a supportedClocksEventReasons has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a supportedClocksEventReasons is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlClocksEventReasons
+ @see nvmlDeviceGetCurrentClocksEventReasons*/
+ fn nvmlDeviceGetSupportedClocksEventReasons(
+ device: cuda_types::nvml::nvmlDevice_t,
+ supportedClocksEventReasons: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /// @deprecated Use \ref nvmlDeviceGetSupportedClocksEventReasons instead
+ fn nvmlDeviceGetSupportedClocksThrottleReasons(
+ device: cuda_types::nvml::nvmlDevice_t,
+ supportedClocksThrottleReasons: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Use \ref nvmlDeviceGetPerformanceState. This function exposes an incorrect generalization.
+
+ Retrieve the current performance state for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ See \ref nvmlPstates_t for details on allowed performance states.
+
+ @param device The identifier of the target device
+ @param pState Reference in which to return the performance state reading
+
+ @return
+ - \ref NVML_SUCCESS if \a pState has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPowerState(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pState: *mut cuda_types::nvml::nvmlPstates_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve performance monitor samples from the associated subdevice.
+
+ @param device
+ @param pDynamicPstatesInfo
+
+ @return
+ - \ref NVML_SUCCESS if \a pDynamicPstatesInfo has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pDynamicPstatesInfo is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetDynamicPstatesInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pDynamicPstatesInfo: *mut cuda_types::nvml::nvmlGpuDynamicPstatesInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the MemClk (Memory Clock) VF offset value.
+ @param[in] device The identifier of the target device
+ @param[out] offset The retrieved MemClk VF offset value
+
+ @return
+ - \ref NVML_SUCCESS if \a offset has been successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMemClkVfOffset(
+ device: cuda_types::nvml::nvmlDevice_t,
+ offset: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve min and max clocks of some clock domain for a given PState
+
+ @param device The identifier of the target device
+ @param type Clock domain
+ @param pstate PState to query
+ @param minClockMHz Reference in which to return min clock frequency
+ @param maxClockMHz Reference in which to return max clock frequency
+
+ @return
+ - \ref NVML_SUCCESS if everything worked
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a type or \a pstate are invalid or both
+ \a minClockMHz and \a maxClockMHz are NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature*/
+ fn nvmlDeviceGetMinMaxClockOfPState(
+ device: cuda_types::nvml::nvmlDevice_t,
+ type_: cuda_types::nvml::nvmlClockType_t,
+ pstate: cuda_types::nvml::nvmlPstates_t,
+ minClockMHz: *mut ::core::ffi::c_uint,
+ maxClockMHz: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get all supported Performance States (P-States) for the device.
+
+ The returned array would contain a contiguous list of valid P-States supported by
+ the device. If the number of supported P-States is fewer than the size of the array
+ supplied missing elements would contain \a NVML_PSTATE_UNKNOWN.
+
+ The number of elements in the returned list will never exceed \a NVML_MAX_GPU_PERF_PSTATES.
+
+ @param device The identifier of the target device
+ @param pstates Container to return the list of performance states
+ supported by device
+ @param size Size of the supplied \a pstates array in bytes
+
+ @return
+ - \ref NVML_SUCCESS if \a pstates array has been retrieved
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if the the container supplied was not large enough to
+ hold the resulting list
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a pstates is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support performance state readings
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetSupportedPerformanceStates(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pstates: *mut cuda_types::nvml::nvmlPstates_t,
+ size: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the GPCCLK min max VF offset value.
+ @param[in] device The identifier of the target device
+ @param[out] minOffset The retrieved GPCCLK VF min offset value
+ @param[out] maxOffset The retrieved GPCCLK VF max offset value
+
+ @return
+ - \ref NVML_SUCCESS if \a offset has been successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGpcClkMinMaxVfOffset(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minOffset: *mut ::core::ffi::c_int,
+ maxOffset: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the MemClk (Memory Clock) min max VF offset value.
+ @param[in] device The identifier of the target device
+ @param[out] minOffset The retrieved MemClk VF min offset value
+ @param[out] maxOffset The retrieved MemClk VF max offset value
+
+ @return
+ - \ref NVML_SUCCESS if \a offset has been successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMemClkMinMaxVfOffset(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minOffset: *mut ::core::ffi::c_int,
+ maxOffset: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** This API has been deprecated.
+
+ Retrieves the power management mode associated with this device.
+
+ For products from the Fermi family.
+ - Requires \a NVML_INFOROM_POWER version 3.0 or higher.
+
+ For from the Kepler or newer families.
+ - Does not require \a NVML_INFOROM_POWER object.
+
+ This flag indicates whether any power management algorithm is currently active on the device. An
+ enabled state does not necessarily mean the device is being actively throttled -- only that
+ that the driver will do so if the appropriate conditions are met.
+
+ See \ref nvmlEnableState_t for details on allowed modes.
+
+ @param device The identifier of the target device
+ @param mode Reference in which to return the current power management mode
+
+ @return
+ - \ref NVML_SUCCESS if \a mode has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPowerManagementMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the power management limit associated with this device.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ The power limit defines the upper boundary for the card's power draw. If
+ the card's total power draw reaches this limit the power management algorithm kicks in.
+
+ This reading is only available if power management mode is supported.
+ See \ref nvmlDeviceGetPowerManagementMode.
+
+ @param device The identifier of the target device
+ @param limit Reference in which to return the power management limit in milliwatts
+
+ @return
+ - \ref NVML_SUCCESS if \a limit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPowerManagementLimit(
+ device: cuda_types::nvml::nvmlDevice_t,
+ limit: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves information about possible values of power management limits on this device.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param minLimit Reference in which to return the minimum power management limit in milliwatts
+ @param maxLimit Reference in which to return the maximum power management limit in milliwatts
+
+ @return
+ - \ref NVML_SUCCESS if \a minLimit and \a maxLimit have been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minLimit or \a maxLimit is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetPowerManagementLimit*/
+ fn nvmlDeviceGetPowerManagementLimitConstraints(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minLimit: *mut ::core::ffi::c_uint,
+ maxLimit: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves default power management limit on this device, in milliwatts.
+ Default power management limit is a power management limit that the device boots with.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param defaultLimit Reference in which to return the default power management limit in milliwatts
+
+ @return
+ - \ref NVML_SUCCESS if \a defaultLimit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPowerManagementDefaultLimit(
+ device: cuda_types::nvml::nvmlDevice_t,
+ defaultLimit: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory)
+
+ For Fermi &tm; or newer fully supported devices.
+
+ On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw. On Ampere
+ (except GA100) or newer GPUs, the API returns power averaged over 1 sec interval. On GA100 and
+ older architectures, instantaneous power is returned.
+
+ See \ref NVML_FI_DEV_POWER_AVERAGE and \ref NVML_FI_DEV_POWER_INSTANT to query specific power
+ values.
+
+ It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode.
+
+ @param device The identifier of the target device
+ @param power Reference in which to return the power usage information
+
+ @return
+ - \ref NVML_SUCCESS if \a power has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a power is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support power readings
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPowerUsage(
+ device: cuda_types::nvml::nvmlDevice_t,
+ power: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves total energy consumption for this GPU in millijoules (mJ) since the driver was last reloaded
+
+ For Volta &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param energy Reference in which to return the energy consumption information
+
+ @return
+ - \ref NVML_SUCCESS if \a energy has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a energy is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support energy readings
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetTotalEnergyConsumption(
+ device: cuda_types::nvml::nvmlDevice_t,
+ energy: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the effective power limit that the driver enforces after taking into account all limiters
+
+ Note: This can be different from the \ref nvmlDeviceGetPowerManagementLimit if other limits are set elsewhere
+ This includes the out of band power limit interface
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The device to communicate with
+ @param limit Reference in which to return the power management limit in milliwatts
+
+ @return
+ - \ref NVML_SUCCESS if \a limit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetEnforcedPowerLimit(
+ device: cuda_types::nvml::nvmlDevice_t,
+ limit: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current GOM and pending GOM (the one that GPU will switch to after reboot).
+
+ For GK110 M-class and X-class Tesla &tm; products from the Kepler family.
+ Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products.
+ Not supported on Quadro &reg; and Tesla &tm; C-class products.
+
+ @param device The identifier of the target device
+ @param current Reference in which to return the current GOM
+ @param pending Reference in which to return the pending GOM
+
+ @return
+ - \ref NVML_SUCCESS if \a mode has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a current or \a pending is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlGpuOperationMode_t
+ @see nvmlDeviceSetGpuOperationMode*/
+ fn nvmlDeviceGetGpuOperationMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ current: *mut cuda_types::nvml::nvmlGpuOperationMode_t,
+ pending: *mut cuda_types::nvml::nvmlGpuOperationMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the amount of used, free, reserved and total memory available on the device, in bytes.
+ The reserved amount is supported on version 2 only.
+
+ For all products.
+
+ Enabling ECC reduces the amount of total available memory, due to the extra required parity bits.
+ Under WDDM most device memory is allocated and managed on startup by Windows.
+
+ Under Linux and Windows TCC, the reported amount of used memory is equal to the sum of memory allocated
+ by all active channels on the device.
+
+ See \ref nvmlMemory_v2_t for details on available memory info.
+
+ @note In MIG mode, if device handle is provided, the API returns aggregate
+ information, only if the caller has appropriate privileges. Per-instance
+ information can be queried by using specific MIG device handles.
+
+ @note nvmlDeviceGetMemoryInfo_v2 adds additional memory information.
+
+ @note On systems where GPUs are NUMA nodes, the accuracy of FB memory utilization
+ provided by this API depends on the memory accounting of the operating system.
+ This is because FB memory is managed by the operating system instead of the NVIDIA GPU driver.
+ Typically, pages allocated from FB memory are not released even after
+ the process terminates to enhance performance. In scenarios where
+ the operating system is under memory pressure, it may resort to utilizing FB memory.
+ Such actions can result in discrepancies in the accuracy of memory reporting.
+
+ @param device The identifier of the target device
+ @param memory Reference in which to return the memory information
+
+ @return
+ - \ref NVML_SUCCESS if \a memory has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMemoryInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ memory: *mut cuda_types::nvml::nvmlMemory_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ fn nvmlDeviceGetMemoryInfo_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ memory: *mut cuda_types::nvml::nvmlMemory_v2_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current compute mode for the device.
+
+ For all products.
+
+ See \ref nvmlComputeMode_t for details on allowed compute modes.
+
+ @param device The identifier of the target device
+ @param mode Reference in which to return the current compute mode
+
+ @return
+ - \ref NVML_SUCCESS if \a mode has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetComputeMode()*/
+ fn nvmlDeviceGetComputeMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: *mut cuda_types::nvml::nvmlComputeMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the CUDA compute capability of the device.
+
+ For all products.
+
+ Returns the major and minor compute capability version numbers of the
+ device. The major and minor versions are equivalent to the
+ CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR and
+ CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR attributes that would be
+ returned by CUDA's cuDeviceGetAttribute().
+
+ @param device The identifier of the target device
+ @param major Reference in which to return the major CUDA compute capability
+ @param minor Reference in which to return the minor CUDA compute capability
+
+ @return
+ - \ref NVML_SUCCESS if \a major and \a minor have been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a major or \a minor are NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCudaComputeCapability(
+ device: cuda_types::nvml::nvmlDevice_t,
+ major: *mut ::core::ffi::c_int,
+ minor: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current and pending ECC modes for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+ Only applicable to devices with ECC.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher.
+
+ Changing ECC modes requires a reboot. The "pending" ECC mode refers to the target mode following
+ the next reboot.
+
+ See \ref nvmlEnableState_t for details on allowed modes.
+
+ @param device The identifier of the target device
+ @param current Reference in which to return the current ECC mode
+ @param pending Reference in which to return the pending ECC mode
+
+ @return
+ - \ref NVML_SUCCESS if \a current and \a pending have been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or either \a current or \a pending is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetEccMode()*/
+ fn nvmlDeviceGetEccMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ current: *mut cuda_types::nvml::nvmlEnableState_t,
+ pending: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the default ECC modes for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+ Only applicable to devices with ECC.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher.
+
+ See \ref nvmlEnableState_t for details on allowed modes.
+
+ @param device The identifier of the target device
+ @param defaultMode Reference in which to return the default ECC mode
+
+ @return
+ - \ref NVML_SUCCESS if \a current and \a pending have been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a default is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetEccMode()*/
+ fn nvmlDeviceGetDefaultEccMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ defaultMode: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the device boardId from 0-N.
+ Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with
+ \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well.
+ The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across
+ reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and
+ the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will
+ always return those values but they will always be different from each other).
+
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param boardId Reference in which to return the device's board ID
+
+ @return
+ - \ref NVML_SUCCESS if \a boardId has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetBoardId(
+ device: cuda_types::nvml::nvmlDevice_t,
+ boardId: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves whether the device is on a Multi-GPU Board
+ Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param multiGpuBool Reference in which to return a zero or non-zero value
+ to indicate whether the device is on a multi GPU board
+
+ @return
+ - \ref NVML_SUCCESS if \a multiGpuBool has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMultiGpuBoard(
+ device: cuda_types::nvml::nvmlDevice_t,
+ multiGpuBool: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the total ECC error counts for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+ Only applicable to devices with ECC.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher.
+ Requires ECC Mode to be enabled.
+
+ The total error count is the sum of errors across each of the separate memory systems, i.e. the total set of
+ errors across the entire device.
+
+ See \ref nvmlMemoryErrorType_t for a description of available error types.\n
+ See \ref nvmlEccCounterType_t for a description of available counter types.
+
+ @param device The identifier of the target device
+ @param errorType Flag that specifies the type of the errors.
+ @param counterType Flag that specifies the counter-type of the errors.
+ @param eccCounts Reference in which to return the specified ECC errors
+
+ @return
+ - \ref NVML_SUCCESS if \a eccCounts has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceClearEccErrorCounts()*/
+ fn nvmlDeviceGetTotalEccErrors(
+ device: cuda_types::nvml::nvmlDevice_t,
+ errorType: cuda_types::nvml::nvmlMemoryErrorType_t,
+ counterType: cuda_types::nvml::nvmlEccCounterType_t,
+ eccCounts: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the detailed ECC error counts for the device.
+
+ @deprecated This API supports only a fixed set of ECC error locations
+ On different GPU architectures different locations are supported
+ See \ref nvmlDeviceGetMemoryErrorCounter
+
+ For Fermi &tm; or newer fully supported devices.
+ Only applicable to devices with ECC.
+ Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based ECC counts.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other ECC counts.
+ Requires ECC Mode to be enabled.
+
+ Detailed errors provide separate ECC counts for specific parts of the memory system.
+
+ Reports zero for unsupported ECC error counters when a subset of ECC error counters are supported.
+
+ See \ref nvmlMemoryErrorType_t for a description of available bit types.\n
+ See \ref nvmlEccCounterType_t for a description of available counter types.\n
+ See \ref nvmlEccErrorCounts_t for a description of provided detailed ECC counts.
+
+ @param device The identifier of the target device
+ @param errorType Flag that specifies the type of the errors.
+ @param counterType Flag that specifies the counter-type of the errors.
+ @param eccCounts Reference in which to return the specified ECC errors
+
+ @return
+ - \ref NVML_SUCCESS if \a eccCounts has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceClearEccErrorCounts()*/
+ fn nvmlDeviceGetDetailedEccErrors(
+ device: cuda_types::nvml::nvmlDevice_t,
+ errorType: cuda_types::nvml::nvmlMemoryErrorType_t,
+ counterType: cuda_types::nvml::nvmlEccCounterType_t,
+ eccCounts: *mut cuda_types::nvml::nvmlEccErrorCounts_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the requested memory error counter for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+ Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based memory error counts.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other memory error counts.
+
+ Only applicable to devices with ECC.
+
+ Requires ECC Mode to be enabled.
+
+ @note On MIG-enabled GPUs, per instance information can be queried using specific
+ MIG device handles. Per instance information is currently only supported for
+ non-DRAM uncorrectable volatile errors. Querying volatile errors using device
+ handles is currently not supported.
+
+ See \ref nvmlMemoryErrorType_t for a description of available memory error types.\n
+ See \ref nvmlEccCounterType_t for a description of available counter types.\n
+ See \ref nvmlMemoryLocation_t for a description of available counter locations.\n
+
+ @param device The identifier of the target device
+ @param errorType Flag that specifies the type of error.
+ @param counterType Flag that specifies the counter-type of the errors.
+ @param locationType Specifies the location of the counter.
+ @param count Reference in which to return the ECC counter
+
+ @return
+ - \ref NVML_SUCCESS if \a count has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a bitTyp,e \a counterType or \a locationType is
+ invalid, or \a count is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support ECC error reporting in the specified memory
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMemoryErrorCounter(
+ device: cuda_types::nvml::nvmlDevice_t,
+ errorType: cuda_types::nvml::nvmlMemoryErrorType_t,
+ counterType: cuda_types::nvml::nvmlEccCounterType_t,
+ locationType: cuda_types::nvml::nvmlMemoryLocation_t,
+ count: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current utilization rates for the device's major subsystems.
+
+ For Fermi &tm; or newer fully supported devices.
+
+ See \ref nvmlUtilization_t for details on available utilization rates.
+
+ \note During driver initialization when ECC is enabled one can see high GPU and Memory Utilization readings.
+ This is caused by ECC Memory Scrubbing mechanism that is performed during driver initialization.
+
+ @note On MIG-enabled GPUs, querying device utilization rates is not currently supported.
+
+ @param device The identifier of the target device
+ @param utilization Reference in which to return the utilization information
+
+ @return
+ - \ref NVML_SUCCESS if \a utilization has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a utilization is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetUtilizationRates(
+ device: cuda_types::nvml::nvmlDevice_t,
+ utilization: *mut cuda_types::nvml::nvmlUtilization_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current utilization and sampling size in microseconds for the Encoder
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @note On MIG-enabled GPUs, querying encoder utilization is not currently supported.
+
+ @param device The identifier of the target device
+ @param utilization Reference to an unsigned int for encoder utilization info
+ @param samplingPeriodUs Reference to an unsigned int for the sampling period in US
+
+ @return
+ - \ref NVML_SUCCESS if \a utilization has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetEncoderUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ utilization: *mut ::core::ffi::c_uint,
+ samplingPeriodUs: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current capacity of the device's encoder, as a percentage of maximum encoder capacity with valid values in the range 0-100.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param encoderQueryType Type of encoder to query
+ @param encoderCapacity Reference to an unsigned int for the encoder capacity
+
+ @return
+ - \ref NVML_SUCCESS if \a encoderCapacity is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a encoderCapacity is NULL, or \a device or \a encoderQueryType
+ are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if device does not support the encoder specified in \a encodeQueryType
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetEncoderCapacity(
+ device: cuda_types::nvml::nvmlDevice_t,
+ encoderQueryType: cuda_types::nvml::nvmlEncoderType_t,
+ encoderCapacity: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current encoder statistics for a given device.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param sessionCount Reference to an unsigned int for count of active encoder sessions
+ @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions
+ @param averageLatency Reference to an unsigned int for encode latency in microseconds
+
+ @return
+ - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount, or \a device or \a averageFps,
+ or \a averageLatency is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetEncoderStats(
+ device: cuda_types::nvml::nvmlDevice_t,
+ sessionCount: *mut ::core::ffi::c_uint,
+ averageFps: *mut ::core::ffi::c_uint,
+ averageLatency: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves information about active encoder sessions on a target device.
+
+ An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfos. The
+ array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions
+ written to the buffer.
+
+ If the supplied buffer is not large enough to accommodate the active session array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount.
+ To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return
+ NVML_SUCCESS with number of active encoder sessions updated in *sessionCount.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param sessionCount Reference to caller supplied array size, and returns the number of sessions.
+ @param sessionInfos Reference in which to return the session information
+
+ @return
+ - \ref NVML_SUCCESS if \a sessionInfos is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL.
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetEncoderSessions(
+ device: cuda_types::nvml::nvmlDevice_t,
+ sessionCount: *mut ::core::ffi::c_uint,
+ sessionInfos: *mut cuda_types::nvml::nvmlEncoderSessionInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current utilization and sampling size in microseconds for the Decoder
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @note On MIG-enabled GPUs, querying decoder utilization is not currently supported.
+
+ @param device The identifier of the target device
+ @param utilization Reference to an unsigned int for decoder utilization info
+ @param samplingPeriodUs Reference to an unsigned int for the sampling period in US
+
+ @return
+ - \ref NVML_SUCCESS if \a utilization has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetDecoderUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ utilization: *mut ::core::ffi::c_uint,
+ samplingPeriodUs: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current utilization and sampling size in microseconds for the JPG
+
+ %TURING_OR_NEWER%
+
+ @note On MIG-enabled GPUs, querying decoder utilization is not currently supported.
+
+ @param device The identifier of the target device
+ @param utilization Reference to an unsigned int for jpg utilization info
+ @param samplingPeriodUs Reference to an unsigned int for the sampling period in US
+
+ @return
+ - \ref NVML_SUCCESS if \a utilization has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetJpgUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ utilization: *mut ::core::ffi::c_uint,
+ samplingPeriodUs: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current utilization and sampling size in microseconds for the OFA (Optical Flow Accelerator)
+
+ %TURING_OR_NEWER%
+
+ @note On MIG-enabled GPUs, querying decoder utilization is not currently supported.
+
+ @param device The identifier of the target device
+ @param utilization Reference to an unsigned int for ofa utilization info
+ @param samplingPeriodUs Reference to an unsigned int for the sampling period in US
+
+ @return
+ - \ref NVML_SUCCESS if \a utilization has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetOfaUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ utilization: *mut ::core::ffi::c_uint,
+ samplingPeriodUs: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the active frame buffer capture sessions statistics for a given device.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param fbcStats Reference to nvmlFBCStats_t structure containing NvFBC stats
+
+ @return
+ - \ref NVML_SUCCESS if \a fbcStats is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a fbcStats is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetFBCStats(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fbcStats: *mut cuda_types::nvml::nvmlFBCStats_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves information about active frame buffer capture sessions on a target device.
+
+ An array of active FBC sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The
+ array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions
+ written to the buffer.
+
+ If the supplied buffer is not large enough to accommodate the active session array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlFBCSessionInfo_t array required in \a sessionCount.
+ To query the number of active FBC sessions, call this function with *sessionCount = 0. The code will return
+ NVML_SUCCESS with number of active FBC sessions updated in *sessionCount.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @note hResolution, vResolution, averageFPS and averageLatency data for a FBC session returned in \a sessionInfo may
+ be zero if there are no new frames captured since the session started.
+
+ @param device The identifier of the target device
+ @param sessionCount Reference to caller supplied array size, and returns the number of sessions.
+ @param sessionInfo Reference in which to return the session information
+
+ @return
+ - \ref NVML_SUCCESS if \a sessionInfo is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL.
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetFBCSessions(
+ device: cuda_types::nvml::nvmlDevice_t,
+ sessionCount: *mut ::core::ffi::c_uint,
+ sessionInfo: *mut cuda_types::nvml::nvmlFBCSessionInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current and pending driver model for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+ For windows only.
+
+ On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached
+ to the device it must run in WDDM mode. TCC mode is preferred if a display is not attached.
+
+ See \ref nvmlDriverModel_t for details on available driver models.
+
+ @param device The identifier of the target device
+ @param current Reference in which to return the current driver model
+ @param pending Reference in which to return the pending driver model
+
+ @return
+ - \ref NVML_SUCCESS if either \a current and/or \a pending have been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or both \a current and \a pending are NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceSetDriverModel()*/
+ fn nvmlDeviceGetDriverModel(
+ device: cuda_types::nvml::nvmlDevice_t,
+ current: *mut cuda_types::nvml::nvmlDriverModel_t,
+ pending: *mut cuda_types::nvml::nvmlDriverModel_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get VBIOS version of the device.
+
+ For all products.
+
+ The VBIOS version may change from time to time. It will not exceed 32 characters in length
+ (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE.
+
+ @param device The identifier of the target device
+ @param version Reference to which to return the VBIOS version
+ @param length The maximum allowed length of the string returned in \a version
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a version is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVbiosVersion(
+ device: cuda_types::nvml::nvmlDevice_t,
+ version: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Bridge Chip Information for all the bridge chips on the board.
+
+ For all fully supported products.
+ Only applicable to multi-GPU products.
+
+ @param device The identifier of the target device
+ @param bridgeHierarchy Reference to the returned bridge chip Hierarchy
+
+ @return
+ - \ref NVML_SUCCESS if bridge chip exists
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a bridgeInfo is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if bridge chip not supported on the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+*/
+ fn nvmlDeviceGetBridgeChipInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ bridgeHierarchy: *mut cuda_types::nvml::nvmlBridgeChipHierarchy_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get information about processes with a compute context on a device
+
+ For Fermi &tm; or newer fully supported devices.
+
+ This function returns information only about compute running processes (e.g. CUDA application which have
+ active context). Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by this function.
+
+ To query the current number of running compute processes, call this function with *infoCount = 0. The
+ return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call
+ \a infos is allowed to be NULL.
+
+ The usedGpuMemory field returned is all of the memory used by the application.
+
+ Keep in mind that information returned by this call is dynamic and the number of elements might change in
+ time. Allocate more space for \a infos table in case new compute processes are spawned.
+
+ @note In MIG mode, if device handle is provided, the API returns aggregate information, only if
+ the caller has appropriate privileges. Per-instance information can be queried by using
+ specific MIG device handles.
+ Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
+
+ @param device The device handle or MIG device handle
+ @param infoCount Reference in which to provide the \a infos array size, and
+ to return the number of returned elements
+ @param infos Reference in which to return the process information
+
+ @return
+ - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small
+ \a infoCount will contain minimal amount of space necessary for
+ the call to complete
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see \ref nvmlSystemGetProcessName*/
+ fn nvmlDeviceGetComputeRunningProcesses_v3(
+ device: cuda_types::nvml::nvmlDevice_t,
+ infoCount: *mut ::core::ffi::c_uint,
+ infos: *mut cuda_types::nvml::nvmlProcessInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get information about processes with a graphics context on a device
+
+ For Kepler &tm; or newer fully supported devices.
+
+ This function returns information only about graphics based processes
+ (eg. applications using OpenGL, DirectX)
+
+ To query the current number of running graphics processes, call this function with *infoCount = 0. The
+ return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call
+ \a infos is allowed to be NULL.
+
+ The usedGpuMemory field returned is all of the memory used by the application.
+
+ Keep in mind that information returned by this call is dynamic and the number of elements might change in
+ time. Allocate more space for \a infos table in case new graphics processes are spawned.
+
+ @note In MIG mode, if device handle is provided, the API returns aggregate information, only if
+ the caller has appropriate privileges. Per-instance information can be queried by using
+ specific MIG device handles.
+ Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
+
+ @param device The device handle or MIG device handle
+ @param infoCount Reference in which to provide the \a infos array size, and
+ to return the number of returned elements
+ @param infos Reference in which to return the process information
+
+ @return
+ - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small
+ \a infoCount will contain minimal amount of space necessary for
+ the call to complete
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see \ref nvmlSystemGetProcessName*/
+ fn nvmlDeviceGetGraphicsRunningProcesses_v3(
+ device: cuda_types::nvml::nvmlDevice_t,
+ infoCount: *mut ::core::ffi::c_uint,
+ infos: *mut cuda_types::nvml::nvmlProcessInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get information about processes with a MPS compute context on a device
+
+ For Volta &tm; or newer fully supported devices.
+
+ This function returns information only about compute running processes (e.g. CUDA application which have
+ active context) utilizing MPS. Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by
+ this function.
+
+ To query the current number of running compute processes, call this function with *infoCount = 0. The
+ return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call
+ \a infos is allowed to be NULL.
+
+ The usedGpuMemory field returned is all of the memory used by the application.
+
+ Keep in mind that information returned by this call is dynamic and the number of elements might change in
+ time. Allocate more space for \a infos table in case new compute processes are spawned.
+
+ @note In MIG mode, if device handle is provided, the API returns aggregate information, only if
+ the caller has appropriate privileges. Per-instance information can be queried by using
+ specific MIG device handles.
+ Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
+
+ @param device The device handle or MIG device handle
+ @param infoCount Reference in which to provide the \a infos array size, and
+ to return the number of returned elements
+ @param infos Reference in which to return the process information
+
+ @return
+ - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small
+ \a infoCount will contain minimal amount of space necessary for
+ the call to complete
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see \ref nvmlSystemGetProcessName*/
+ fn nvmlDeviceGetMPSComputeRunningProcesses_v3(
+ device: cuda_types::nvml::nvmlDevice_t,
+ infoCount: *mut ::core::ffi::c_uint,
+ infos: *mut cuda_types::nvml::nvmlProcessInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get information about running processes on a device for input context
+
+ %HOPPER_OR_NEWER%
+
+ This function returns information only about running processes (e.g. CUDA application which have
+ active context).
+
+ To determine the size of the @ref plist->procArray array to allocate, call the function with
+ @ref plist->numProcArrayEntries set to zero and @ref plist->procArray set to NULL. The return
+ code will be either NVML_ERROR_INSUFFICIENT_SIZE (if there are valid processes of type
+ @ref plist->mode to report on, in which case the @ref plist->numProcArrayEntries field will
+ indicate the required number of entries in the array) or NVML_SUCCESS (if no processes of type
+ @ref plist->mode exist).
+
+ The usedGpuMemory field returned is all of the memory used by the application.
+ The usedGpuCcProtectedMemory field returned is all of the protected memory used by the application.
+
+ Keep in mind that information returned by this call is dynamic and the number of elements might change in
+ time. Allocate more space for \a plist->procArray table in case new processes are spawned.
+
+ @note In MIG mode, if device handle is provided, the API returns aggregate information, only if
+ the caller has appropriate privileges. Per-instance information can be queried by using
+ specific MIG device handles.
+ Querying per-instance information using MIG device handles is not supported if the device is in
+ vGPU Host virtualization mode.
+ Protected memory usage is currently not available in MIG mode and in windows.
+
+ @param device The device handle or MIG device handle
+ @param plist Reference in which to process detail list
+ @param plist->version The api version
+ @param plist->mode The process mode
+ @param plist->procArray Reference in which to return the process information
+ @param plist->numProcArrayEntries Proc array size of returned entries
+
+ @return
+ - \ref NVML_SUCCESS if \a plist->numprocArrayEntries and \a plist->procArray have been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a plist->numprocArrayEntries indicates that the \a plist->procArray is too small
+ \a plist->numprocArrayEntries will contain minimal amount of space necessary for
+ the call to complete
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a plist is NULL, \a plist->version is invalid,
+ \a plist->mode is invalid,
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+*/
+ fn nvmlDeviceGetRunningProcessDetailList(
+ device: cuda_types::nvml::nvmlDevice_t,
+ plist: *mut cuda_types::nvml::nvmlProcessDetailList_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Check if the GPU devices are on the same physical board.
+
+ For all fully supported products.
+
+ @param device1 The first GPU device
+ @param device2 The second GPU device
+ @param onSameBoard Reference in which to return the status.
+ Non-zero indicates that the GPUs are on the same board.
+
+ @return
+ - \ref NVML_SUCCESS if \a onSameBoard has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a dev1 or \a dev2 are invalid or \a onSameBoard is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this check is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the either GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceOnSameBoard(
+ device1: cuda_types::nvml::nvmlDevice_t,
+ device2: cuda_types::nvml::nvmlDevice_t,
+ onSameBoard: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the root/admin permissions on the target API. See \a nvmlRestrictedAPI_t for the list of supported APIs.
+ If an API is restricted only root users can call that API. See \a nvmlDeviceSetAPIRestriction to change current permissions.
+
+ For all fully supported products.
+
+ @param device The identifier of the target device
+ @param apiType Target API type for this operation
+ @param isRestricted Reference in which to return the current restriction
+ NVML_FEATURE_ENABLED indicates that the API is root-only
+ NVML_FEATURE_DISABLED indicates that the API is accessible to all users
+
+ @return
+ - \ref NVML_SUCCESS if \a isRestricted has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a apiType incorrect or \a isRestricted is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device or the device does not support
+ the feature that is being queried (E.G. Enabling/disabling Auto Boosted clocks is
+ not supported by the device)
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlRestrictedAPI_t*/
+ fn nvmlDeviceGetAPIRestriction(
+ device: cuda_types::nvml::nvmlDevice_t,
+ apiType: cuda_types::nvml::nvmlRestrictedAPI_t,
+ isRestricted: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets recent samples for the GPU.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ Based on type, this method can be used to fetch the power, utilization or clock samples maintained in the buffer by
+ the driver.
+
+ Power, Utilization and Clock samples are returned as type "unsigned int" for the union nvmlValue_t.
+
+ To get the size of samples that user needs to allocate, the method is invoked with samples set to NULL.
+ The returned samplesCount will provide the number of samples that can be queried. The user needs to
+ allocate the buffer with size as samplesCount * sizeof(nvmlSample_t).
+
+ lastSeenTimeStamp represents CPU timestamp in microseconds. Set it to 0 to fetch all the samples maintained by the
+ underlying buffer. Set lastSeenTimeStamp to one of the timeStamps retrieved from the date of the previous query
+ to get more recent samples.
+
+ This method fetches the number of entries which can be accommodated in the provided samples array, and the
+ reference samplesCount is updated to indicate how many samples were actually retrieved. The advantage of using this
+ method for samples in contrast to polling via existing methods is to get get higher frequency data at lower polling cost.
+
+ @note On MIG-enabled GPUs, querying the following sample types, NVML_GPU_UTILIZATION_SAMPLES, NVML_MEMORY_UTILIZATION_SAMPLES
+ NVML_ENC_UTILIZATION_SAMPLES and NVML_DEC_UTILIZATION_SAMPLES, is not currently supported.
+
+ @param device The identifier for the target device
+ @param type Type of sampling event
+ @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp.
+ @param sampleValType Output parameter to represent the type of sample value as described in nvmlSampleVal_t
+ @param sampleCount Reference to provide the number of elements which can be queried in samples array
+ @param samples Reference in which samples are returned
+
+ @return
+ - \ref NVML_SUCCESS if samples are successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a samplesCount is NULL or
+ reference to \a sampleCount is 0 for non null \a samples
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetSamples(
+ device: cuda_types::nvml::nvmlDevice_t,
+ type_: cuda_types::nvml::nvmlSamplingType_t,
+ lastSeenTimeStamp: ::core::ffi::c_ulonglong,
+ sampleValType: *mut cuda_types::nvml::nvmlValueType_t,
+ sampleCount: *mut ::core::ffi::c_uint,
+ samples: *mut cuda_types::nvml::nvmlSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets Total, Available and Used size of BAR1 memory.
+
+ BAR1 is used to map the FB (device memory) so that it can be directly accessed by the CPU or by 3rd party
+ devices (peer-to-peer on the PCIE bus).
+
+ @note In MIG mode, if device handle is provided, the API returns aggregate
+ information, only if the caller has appropriate privileges. Per-instance
+ information can be queried by using specific MIG device handles.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param bar1Memory Reference in which BAR1 memory
+ information is returned.
+
+ @return
+ - \ref NVML_SUCCESS if BAR1 memory is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a bar1Memory is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+*/
+ fn nvmlDeviceGetBAR1MemoryInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ bar1Memory: *mut cuda_types::nvml::nvmlBAR1Memory_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the duration of time during which the device was throttled (lower than requested clocks) due to power
+ or thermal constraints.
+
+ The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The
+ difference in violation times at two different reference times gives the indication of GPU throttling event.
+
+ Violation for thermal capping is not supported at this time.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param perfPolicyType Represents Performance policy which can trigger GPU throttling
+ @param violTime Reference to which violation time related information is returned
+
+
+ @return
+ - \ref NVML_SUCCESS if violation time is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a perfPolicyType is invalid, or \a violTime is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetViolationStatus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ perfPolicyType: cuda_types::nvml::nvmlPerfPolicyType_t,
+ violTime: *mut cuda_types::nvml::nvmlViolationTime_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the device's interrupt number
+
+ @param device The identifier of the target device
+ @param irqNum The interrupt number associated with the specified device
+
+ @return
+ - \ref NVML_SUCCESS if irq number is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a irqNum is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetIrqNum(
+ device: cuda_types::nvml::nvmlDevice_t,
+ irqNum: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the device's core count
+
+ @param device The identifier of the target device
+ @param numCores The number of cores for the specified device
+
+ @return
+ - \ref NVML_SUCCESS if Gpu core count is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a numCores is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetNumGpuCores(
+ device: cuda_types::nvml::nvmlDevice_t,
+ numCores: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the devices power source
+
+ @param device The identifier of the target device
+ @param powerSource The power source of the device
+
+ @return
+ - \ref NVML_SUCCESS if the current power source was successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a powerSource is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetPowerSource(
+ device: cuda_types::nvml::nvmlDevice_t,
+ powerSource: *mut cuda_types::nvml::nvmlPowerSource_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the device's memory bus width
+
+ @param device The identifier of the target device
+ @param busWidth The devices's memory bus width
+
+ @return
+ - \ref NVML_SUCCESS if the memory bus width is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a busWidth is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetMemoryBusWidth(
+ device: cuda_types::nvml::nvmlDevice_t,
+ busWidth: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the device's PCIE Max Link speed in MBPS
+
+ @param device The identifier of the target device
+ @param maxSpeed The devices's PCIE Max Link speed in MBPS
+
+ @return
+ - \ref NVML_SUCCESS if Pcie Max Link Speed is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a maxSpeed is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetPcieLinkMaxSpeed(
+ device: cuda_types::nvml::nvmlDevice_t,
+ maxSpeed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the device's PCIe Link speed in Mbps
+
+ @param device The identifier of the target device
+ @param pcieSpeed The devices's PCIe Max Link speed in Mbps
+
+ @return
+ - \ref NVML_SUCCESS if \a pcieSpeed has been retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pcieSpeed is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support PCIe speed getting
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPcieSpeed(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pcieSpeed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Gets the device's Adaptive Clock status
+
+ @param device The identifier of the target device
+ @param adaptiveClockStatus The current adaptive clocking status, either
+ @ref NVML_ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED
+ or @ref NVML_ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED
+
+ @return
+ - \ref NVML_SUCCESS if the current adaptive clocking status is successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a adaptiveClockStatus is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+*/
+ fn nvmlDeviceGetAdaptiveClockInfoStatus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ adaptiveClockStatus: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the type of the GPU Bus (PCIe, PCI, ...)
+
+ @param device The identifier of the target device
+ @param type The PCI Bus type
+
+ return
+ - \ref NVML_SUCCESS if the bus \a type is successfully retreived
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \device is invalid or \type is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetBusType(
+ device: cuda_types::nvml::nvmlDevice_t,
+ type_: *mut cuda_types::nvml::nvmlBusType_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Will be deprecated in a future release. Use \ref nvmlDeviceGetGpuFabricInfoV instead
+
+ Get fabric information associated with the device.
+
+ %HOPPER_OR_NEWER%
+
+ On Hopper + NVSwitch systems, GPU is registered with the NVIDIA Fabric Manager
+ Upon successful registration, the GPU is added to the NVLink fabric to enable
+ peer-to-peer communication.
+ This API reports the current state of the GPU in the NVLink fabric
+ along with other useful information.
+
+
+ @param device The identifier of the target device
+ @param gpuFabricInfo Information about GPU fabric state
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support gpu fabric*/
+ fn nvmlDeviceGetGpuFabricInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpuFabricInfo: *mut cuda_types::nvml::nvmlGpuFabricInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Versioned wrapper around \ref nvmlDeviceGetGpuFabricInfo that accepts a versioned
+ \ref nvmlGpuFabricInfo_v2_t or later output structure.
+
+ @note The caller must set the \ref nvmlGpuFabricInfoV_t.version field to the
+ appropriate version prior to calling this function. For example:
+ \code
+ nvmlGpuFabricInfoV_t fabricInfo =
+ { .version = nvmlGpuFabricInfo_v2 };
+ nvmlReturn_t result = nvmlDeviceGetGpuFabricInfoV(device,&fabricInfo);
+ \endcode
+
+ %HOPPER_OR_NEWER%
+
+ @param device The identifier of the target device
+ @param gpuFabricInfo Information about GPU fabric state
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support gpu fabric*/
+ fn nvmlDeviceGetGpuFabricInfoV(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpuFabricInfo: *mut cuda_types::nvml::nvmlGpuFabricInfoV_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing System capabilities.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param capabilities System CC capabilities
+
+ @return
+ - \ref NVML_SUCCESS if \a capabilities were successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a capabilities is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlSystemGetConfComputeCapabilities(
+ capabilities: *mut cuda_types::nvml::nvmlConfComputeSystemCaps_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing System State.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param state System CC State
+
+ @return
+ - \ref NVML_SUCCESS if \a state were successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a state is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlSystemGetConfComputeState(
+ state: *mut cuda_types::nvml::nvmlConfComputeSystemState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing Protected and Unprotected Memory Sizes.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param device Device handle
+ @param memInfo Protected/Unprotected Memory sizes
+
+ @return
+ - \ref NVML_SUCCESS if \a memInfo were successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a memInfo or \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlDeviceGetConfComputeMemSizeInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ memInfo: *mut cuda_types::nvml::nvmlConfComputeMemSizeInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing GPUs ready state.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param isAcceptingWork Returns GPU current work accepting state,
+ NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE or
+ NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE
+
+ return
+ - \ref NVML_SUCCESS if \a current GPUs ready state were successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a isAcceptingWork is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlSystemGetConfComputeGpusReadyState(
+ isAcceptingWork: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing protected memory usage.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param device The identifier of the target device
+ @param memory Reference in which to return the memory information
+
+ @return
+ - \ref NVML_SUCCESS if \a memory has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetConfComputeProtectedMemoryUsage(
+ device: cuda_types::nvml::nvmlDevice_t,
+ memory: *mut cuda_types::nvml::nvmlMemory_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing Gpu certificate details.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param device The identifier of the target device
+ @param gpuCert Reference in which to return the gpu certificate information
+
+ @return
+ - \ref NVML_SUCCESS if \a gpu certificate info has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetConfComputeGpuCertificate(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpuCert: *mut cuda_types::nvml::nvmlConfComputeGpuCertificate_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing Gpu attestation report.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param device The identifier of the target device
+ @param gpuAtstReport Reference in which to return the gpu attestation report
+
+ @return
+ - \ref NVML_SUCCESS if \a gpu attestation report has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetConfComputeGpuAttestationReport(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpuAtstReport: *mut cuda_types::nvml::nvmlConfComputeGpuAttestationReport_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing key rotation threshold detail.
+
+ %HOPPER_OR_NEWER%
+ Supported on Linux, Windows TCC.
+
+ @param pKeyRotationThrInfo Reference in which to return the key rotation threshold data
+
+ @return
+ - \ref NVML_SUCCESS if \a gpu key rotation threshold info has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlSystemGetConfComputeKeyRotationThresholdInfo(
+ pKeyRotationThrInfo: *mut cuda_types::nvml::nvmlConfComputeGetKeyRotationThresholdInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get Conf Computing System Settings.
+
+ %HOPPER_OR_NEWER%
+ Supported on Linux, Windows TCC.
+
+ @param settings System CC settings
+
+ @return
+ - \ref NVML_SUCCESS if the query is success
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counters is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH if the provided version is invalid/unsupported
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlSystemGetConfComputeSettings(
+ settings: *mut cuda_types::nvml::nvmlSystemConfComputeSettings_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve GSP firmware version.
+
+ The caller passes in buffer via \a version and corresponding GSP firmware numbered version
+ is returned with the same parameter in string format.
+
+ @param device Device handle
+ @param version The retrieved GSP firmware version
+
+ @return
+ - \ref NVML_SUCCESS if GSP firmware version is sucessfully retrieved
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or GSP \a version pointer is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if GSP firmware is not enabled for GPU
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGspFirmwareVersion(
+ device: cuda_types::nvml::nvmlDevice_t,
+ version: *mut ::core::ffi::c_char,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve GSP firmware mode.
+
+ The caller passes in integer pointers. GSP firmware enablement and default mode information is returned with
+ corresponding parameters. The return value in \a isEnabled and \a defaultMode should be treated as boolean.
+
+ @param device Device handle
+ @param isEnabled Pointer to specify if GSP firmware is enabled
+ @param defaultMode Pointer to specify if GSP firmware is supported by default on \a device
+
+ @return
+ - \ref NVML_SUCCESS if GSP firmware mode is sucessfully retrieved
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or any of \a isEnabled or \a defaultMode is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGspFirmwareMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ isEnabled: *mut ::core::ffi::c_uint,
+ defaultMode: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries the state of per process accounting mode.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ See \ref nvmlDeviceGetAccountingStats for more details.
+ See \ref nvmlDeviceSetAccountingMode
+
+ @param device The identifier of the target device
+ @param mode Reference in which to return the current accounting mode
+
+ @return
+ - \ref NVML_SUCCESS if the mode has been successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode are NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetAccountingMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries process's accounting stats.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ Accounting stats capture GPU utilization and other statistics across the lifetime of a process.
+ Accounting stats can be queried during life time of the process and after its termination.
+ The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and
+ updated to actual running time after its termination.
+ Accounting stats are kept in a circular buffer, newly created processes overwrite information about old
+ processes.
+
+ See \ref nvmlAccountingStats_t for description of each returned metric.
+ List of processes that can be queried can be retrieved from \ref nvmlDeviceGetAccountingPids.
+
+ @note Accounting Mode needs to be on. See \ref nvmlDeviceGetAccountingMode.
+ @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be
+ queried since they don't contribute to GPU utilization.
+ @note In case of pid collision stats of only the latest process (that terminated last) will be reported
+
+ @warning On Kepler devices per process statistics are accurate only if there's one process running on a GPU.
+
+ @param device The identifier of the target device
+ @param pid Process Id of the target process to query stats for
+ @param stats Reference in which to return the process's accounting stats
+
+ @return
+ - \ref NVML_SUCCESS if stats have been successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a stats are NULL
+ - \ref NVML_ERROR_NOT_FOUND if process stats were not found
+ - \ref NVML_ERROR_NOT_SUPPORTED if \a device doesn't support this feature or accounting mode is disabled
+ or on vGPU host.
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetAccountingBufferSize*/
+ fn nvmlDeviceGetAccountingStats(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pid: ::core::ffi::c_uint,
+ stats: *mut cuda_types::nvml::nvmlAccountingStats_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries list of processes that can be queried for accounting stats. The list of processes returned
+ can be in running or terminated state.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ To just query the number of processes ready to be queried, call this function with *count = 0 and
+ pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty.
+
+ For more details see \ref nvmlDeviceGetAccountingStats.
+
+ @note In case of PID collision some processes might not be accessible before the circular buffer is full.
+
+ @param device The identifier of the target device
+ @param count Reference in which to provide the \a pids array size, and
+ to return the number of elements ready to be queried
+ @param pids Reference in which to return list of process ids
+
+ @return
+ - \ref NVML_SUCCESS if pids were successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if \a device doesn't support this feature or accounting mode is disabled
+ or on vGPU host.
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to
+ expected value)
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetAccountingBufferSize*/
+ fn nvmlDeviceGetAccountingPids(
+ device: cuda_types::nvml::nvmlDevice_t,
+ count: *mut ::core::ffi::c_uint,
+ pids: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the number of processes that the circular buffer with accounting pids can hold.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ This is the maximum number of processes that accounting information will be stored for before information
+ about oldest processes will get overwritten by information about new processes.
+
+ @param device The identifier of the target device
+ @param bufferSize Reference in which to provide the size (in number of elements)
+ of the circular buffer for accounting stats.
+
+ @return
+ - \ref NVML_SUCCESS if buffer size was successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a bufferSize is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetAccountingStats
+ @see nvmlDeviceGetAccountingPids*/
+ fn nvmlDeviceGetAccountingBufferSize(
+ device: cuda_types::nvml::nvmlDevice_t,
+ bufferSize: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the list of retired pages by source, including pages that are pending retirement
+ The address information provided from this API is the hardware address of the page that was retired. Note
+ that this does not match the virtual address used in CUDA, but will match the address information in XID 63
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param cause Filter page addresses by cause of retirement
+ @param pageCount Reference in which to provide the \a addresses buffer size, and
+ to return the number of retired pages that match \a cause
+ Set to 0 to query the size without allocating an \a addresses buffer
+ @param addresses Buffer to write the page addresses into
+
+ @return
+ - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the
+ matching page addresses. \a pageCount is set to the needed size.
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or
+ \a addresses is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetRetiredPages(
+ device: cuda_types::nvml::nvmlDevice_t,
+ cause: cuda_types::nvml::nvmlPageRetirementCause_t,
+ pageCount: *mut ::core::ffi::c_uint,
+ addresses: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the list of retired pages by source, including pages that are pending retirement
+ The address information provided from this API is the hardware address of the page that was retired. Note
+ that this does not match the virtual address used in CUDA, but will match the address information in XID 63
+
+ \note nvmlDeviceGetRetiredPages_v2 adds an additional timestamps parameter to return the time of each page's
+ retirement.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param cause Filter page addresses by cause of retirement
+ @param pageCount Reference in which to provide the \a addresses buffer size, and
+ to return the number of retired pages that match \a cause
+ Set to 0 to query the size without allocating an \a addresses buffer
+ @param addresses Buffer to write the page addresses into
+ @param timestamps Buffer to write the timestamps of page retirement, additional for _v2
+
+ @return
+ - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the
+ matching page addresses. \a pageCount is set to the needed size.
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or
+ \a addresses is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetRetiredPages_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ cause: cuda_types::nvml::nvmlPageRetirementCause_t,
+ pageCount: *mut ::core::ffi::c_uint,
+ addresses: *mut ::core::ffi::c_ulonglong,
+ timestamps: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Check if any pages are pending retirement and need a reboot to fully retire.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param isPending Reference in which to return the pending status
+
+ @return
+ - \ref NVML_SUCCESS if \a isPending was populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isPending is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetRetiredPagesPendingStatus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ isPending: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get number of remapped rows. The number of rows reported will be based on
+ the cause of the remapping. isPending indicates whether or not there are
+ pending remappings. A reset will be required to actually remap the row.
+ failureOccurred will be set if a row remapping ever failed in the past. A
+ pending remapping won't affect future work on the GPU since
+ error-containment and dynamic page blacklisting will take care of that.
+
+ @note On MIG-enabled GPUs with active instances, querying the number of
+ remapped rows is not supported
+
+ For Ampere &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param corrRows Reference for number of rows remapped due to correctable errors
+ @param uncRows Reference for number of rows remapped due to uncorrectable errors
+ @param isPending Reference for whether or not remappings are pending
+ @param failureOccurred Reference that is set when a remapping has failed in the past
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a corrRows, \a uncRows, \a isPending or \a failureOccurred is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN Unexpected error*/
+ fn nvmlDeviceGetRemappedRows(
+ device: cuda_types::nvml::nvmlDevice_t,
+ corrRows: *mut ::core::ffi::c_uint,
+ uncRows: *mut ::core::ffi::c_uint,
+ isPending: *mut ::core::ffi::c_uint,
+ failureOccurred: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the row remapper histogram. Returns the remap availability for each bank
+ on the GPU.
+
+ @param device Device handle
+ @param values Histogram values
+
+ @return
+ - \ref NVML_SUCCESS On success
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlDeviceGetRowRemapperHistogram(
+ device: cuda_types::nvml::nvmlDevice_t,
+ values: *mut cuda_types::nvml::nvmlRowRemapperHistogramValues_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get architecture for device
+
+ @param device The identifier of the target device
+ @param arch Reference where architecture is returned, if call successful.
+ Set to NVML_DEVICE_ARCH_* upon success
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a arch (output refererence) are invalid*/
+ fn nvmlDeviceGetArchitecture(
+ device: cuda_types::nvml::nvmlDevice_t,
+ arch: *mut cuda_types::nvml::nvmlDeviceArchitecture_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the frequency monitor fault status for the device.
+
+ For Ampere &tm; or newer fully supported devices.
+ Requires root user.
+
+ See \ref nvmlClkMonStatus_t for details on decoding the status output.
+
+ @param device The identifier of the target device
+ @param status Reference in which to return the clkmon fault status
+
+ @return
+ - \ref NVML_SUCCESS if \a status has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a status is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetClkMonStatus()*/
+ fn nvmlDeviceGetClkMonStatus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ status: *mut cuda_types::nvml::nvmlClkMonStatus_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current utilization and process ID
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running.
+ Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at
+ by \a utilization. One utilization sample structure is returned per process running, that had some non-zero utilization
+ during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values
+ are returned as "unsigned int" values. If no valid sample entries are found since the lastSeenTimeStamp, NVML_ERROR_NOT_FOUND
+ is returned.
+
+ To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with
+ \a utilization set to NULL. The caller should allocate a buffer of size
+ processSamplesCount * sizeof(nvmlProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed
+ in \a utilization, and \a processSamplesCount set to the number of entries the buffer is sized for.
+
+ On successful return, the function updates \a processSamplesCount with the number of process utilization sample
+ structures that were actually written. This may differ from a previously read value as instances are created or
+ destroyed.
+
+ lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0
+ to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp
+ to a timeStamp retrieved from a previous query to read utilization since the previous query.
+
+ @note On MIG-enabled GPUs, querying process utilization is not currently supported.
+
+ @param device The identifier of the target device
+ @param utilization Pointer to caller-supplied buffer in which guest process utilization samples are returned
+ @param processSamplesCount Pointer to caller-supplied array size, and returns number of processes running
+ @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp.
+
+ @return
+ - \ref NVML_SUCCESS if \a utilization has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetProcessUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ utilization: *mut cuda_types::nvml::nvmlProcessUtilizationSample_t,
+ processSamplesCount: *mut ::core::ffi::c_uint,
+ lastSeenTimeStamp: ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the recent utilization and process ID for all running processes
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder, jpeg decoder, OFA (Optical Flow Accelerator)
+ for all running processes. Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at
+ by \a procesesUtilInfo->procUtilArray. One utilization sample structure is returned per process running, that had some non-zero utilization
+ during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values
+ are returned as "unsigned int" values.
+
+ The caller should allocate a buffer of size processSamplesCount * sizeof(nvmlProcessUtilizationInfo_t). If the buffer is too small, the API will
+ return \a NVML_ERROR_INSUFFICIENT_SIZE, with the recommended minimal buffer size at \a procesesUtilInfo->processSamplesCount. The caller should
+ invoke the function again with the allocated buffer passed in \a procesesUtilInfo->procUtilArray, and \a procesesUtilInfo->processSamplesCount
+ set to the number no less than the recommended value by the previous API return.
+
+ On successful return, the function updates \a procesesUtilInfo->processSamplesCount with the number of process utilization info structures
+ that were actually written. This may differ from a previously read value as instances are created or destroyed.
+
+ \a procesesUtilInfo->lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0
+ to read utilization based on all the samples maintained by the driver's internal sample buffer. Set \a procesesUtilInfo->lastSeenTimeStamp
+ to a timeStamp retrieved from a previous query to read utilization since the previous query.
+
+ \a procesesUtilInfo->version is the version number of the structure nvmlProcessesUtilizationInfo_t, the caller should set the correct version
+ number to retrieve the specific version of processes utilization information.
+
+ @note On MIG-enabled GPUs, querying process utilization is not currently supported.
+
+ @param device The identifier of the target device
+ @param procesesUtilInfo Pointer to the caller-provided structure of nvmlProcessesUtilizationInfo_t.
+
+ @return
+ - \ref NVML_SUCCESS if \a procesesUtilInfo->procUtilArray has been populated
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a procesesUtilInfo is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_VERSION_MISMATCH if the version of \a procesesUtilInfo is invalid
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a procesesUtilInfo->procUtilArray is NULL, or the buffer size of procesesUtilInfo->procUtilArray is too small.
+ The caller should check the minimul array size from the returned procesesUtilInfo->processSamplesCount, and call
+ the function again with a buffer no smaller than procesesUtilInfo->processSamplesCount * sizeof(nvmlProcessUtilizationInfo_t)
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetProcessesUtilizationInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ procesesUtilInfo: *mut cuda_types::nvml::nvmlProcessesUtilizationInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the LED state for the unit. The LED can be either green (0) or amber (1).
+
+ For S-class products.
+ Requires root/admin permissions.
+
+ This operation takes effect immediately.
+
+
+ <b>Current S-Class products don't provide unique LEDs for each unit. As such, both front
+ and back LEDs will be toggled in unison regardless of which unit is specified with this command.</b>
+
+ See \ref nvmlLedColor_t for available colors.
+
+ @param unit The identifier of the target unit
+ @param color The target LED color
+
+ @return
+ - \ref NVML_SUCCESS if the LED color has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a color is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlUnitGetLedState()*/
+ fn nvmlUnitSetLedState(
+ unit: cuda_types::nvml::nvmlUnit_t,
+ color: cuda_types::nvml::nvmlLedColor_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the persistence mode for the device.
+
+ For all products.
+ For Linux only.
+ Requires root/admin permissions.
+
+ The persistence mode determines whether the GPU driver software is torn down after the last client
+ exits.
+
+ This operation takes effect immediately. It is not persistent across reboots. After each reboot the
+ persistence mode is reset to "Disabled".
+
+ See \ref nvmlEnableState_t for available modes.
+
+ After calling this API with mode set to NVML_FEATURE_DISABLED on a device that has its own NUMA
+ memory, the given device handle will no longer be valid, and to continue to interact with this
+ device, a new handle should be obtained from one of the nvmlDeviceGetHandleBy*() APIs. This
+ limitation is currently only applicable to devices that have a coherent NVLink connection to
+ system memory.
+
+ @param device The identifier of the target device
+ @param mode The target persistence mode
+
+ @return
+ - \ref NVML_SUCCESS if the persistence mode was set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetPersistenceMode()*/
+ fn nvmlDeviceSetPersistenceMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the compute mode for the device.
+
+ For all products.
+ Requires root/admin permissions.
+
+ The compute mode determines whether a GPU can be used for compute operations and whether it can
+ be shared across contexts.
+
+ This operation takes effect immediately. Under Linux it is not persistent across reboots and
+ always resets to "Default". Under windows it is persistent.
+
+ Under windows compute mode may only be set to DEFAULT when running in WDDM
+
+ @note On MIG-enabled GPUs, compute mode would be set to DEFAULT and changing it is not supported.
+
+ See \ref nvmlComputeMode_t for details on available compute modes.
+
+ @param device The identifier of the target device
+ @param mode The target compute mode
+
+ @return
+ - \ref NVML_SUCCESS if the compute mode was set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetComputeMode()*/
+ fn nvmlDeviceSetComputeMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: cuda_types::nvml::nvmlComputeMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the ECC mode for the device.
+
+ For Kepler &tm; or newer fully supported devices.
+ Only applicable to devices with ECC.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher.
+ Requires root/admin permissions.
+
+ The ECC mode determines whether the GPU enables its ECC support.
+
+ This operation takes effect after the next reboot.
+
+ See \ref nvmlEnableState_t for details on available modes.
+
+ @param device The identifier of the target device
+ @param ecc The target ECC mode
+
+ @return
+ - \ref NVML_SUCCESS if the ECC mode was set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a ecc is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetEccMode()*/
+ fn nvmlDeviceSetEccMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ecc: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Clear the ECC error and other memory error counts for the device.
+
+ For Kepler &tm; or newer fully supported devices.
+ Only applicable to devices with ECC.
+ Requires \a NVML_INFOROM_ECC version 2.0 or higher to clear aggregate location-based ECC counts.
+ Requires \a NVML_INFOROM_ECC version 1.0 or higher to clear all other ECC counts.
+ Requires root/admin permissions.
+ Requires ECC Mode to be enabled.
+
+ Sets all of the specified ECC counters to 0, including both detailed and total counts.
+
+ This operation takes effect immediately.
+
+ See \ref nvmlMemoryErrorType_t for details on available counter types.
+
+ @param device The identifier of the target device
+ @param counterType Flag that indicates which type of errors should be cleared.
+
+ @return
+ - \ref NVML_SUCCESS if the error counts were cleared
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counterType is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see
+ - nvmlDeviceGetDetailedEccErrors()
+ - nvmlDeviceGetTotalEccErrors()*/
+ fn nvmlDeviceClearEccErrorCounts(
+ device: cuda_types::nvml::nvmlDevice_t,
+ counterType: cuda_types::nvml::nvmlEccCounterType_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the driver model for the device.
+
+ For Fermi &tm; or newer fully supported devices.
+ For windows only.
+ Requires root/admin permissions.
+
+ On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached
+ to the device it must run in WDDM mode.
+
+ It is possible to force the change to WDM (TCC) while the display is still attached with a force flag (nvmlFlagForce).
+ This should only be done if the host is subsequently powered down and the display is detached from the device
+ before the next reboot.
+
+ This operation takes effect after the next reboot.
+
+ Windows driver model may only be set to WDDM when running in DEFAULT compute mode.
+
+ Change driver model to WDDM is not supported when GPU doesn't support graphics acceleration or
+ will not support it after reboot. See \ref nvmlDeviceSetGpuOperationMode.
+
+ See \ref nvmlDriverModel_t for details on available driver models.
+ See \ref nvmlFlagDefault and \ref nvmlFlagForce
+
+ @param device The identifier of the target device
+ @param driverModel The target driver model
+ @param flags Flags that change the default behavior
+
+ @return
+ - \ref NVML_SUCCESS if the driver model has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a driverModel is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows or the device does not support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetDriverModel()*/
+ fn nvmlDeviceSetDriverModel(
+ device: cuda_types::nvml::nvmlDevice_t,
+ driverModel: cuda_types::nvml::nvmlDriverModel_t,
+ flags: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set clocks that device will lock to.
+
+ Sets the clocks that the device will be running at to the value in the range of minGpuClockMHz to maxGpuClockMHz.
+ Setting this will supersede application clock values and take effect regardless if a cuda app is running.
+ See /ref nvmlDeviceSetApplicationsClocks
+
+ Can be used as a setting to request constant performance.
+
+ This can be called with a pair of integer clock frequencies in MHz, or a pair of /ref nvmlClockLimitId_t values.
+ See the table below for valid combinations of these values.
+
+ minGpuClock | maxGpuClock | Effect
+ ------------+-------------+--------------------------------------------------
+ tdp | tdp | Lock clock to TDP
+ unlimited | tdp | Upper bound is TDP but clock may drift below this
+ tdp | unlimited | Lower bound is TDP but clock may boost above this
+ unlimited | unlimited | Unlocked (== nvmlDeviceResetGpuLockedClocks)
+
+ If one arg takes one of these values, the other must be one of these values as
+ well. Mixed numeric and symbolic calls return NVML_ERROR_INVALID_ARGUMENT.
+
+ Requires root/admin permissions.
+
+ After system reboot or driver reload applications clocks go back to their default value.
+ See \ref nvmlDeviceResetGpuLockedClocks.
+
+ For Volta &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param minGpuClockMHz Requested minimum gpu clock in MHz
+ @param maxGpuClockMHz Requested maximum gpu clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if new settings were successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minGpuClockMHz and \a maxGpuClockMHz
+ is not a valid clock combination
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetGpuLockedClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minGpuClockMHz: ::core::ffi::c_uint,
+ maxGpuClockMHz: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Resets the gpu clock to the default value
+
+ This is the gpu clock that will be used after system reboot or driver reload.
+ Default values are idle clocks, but the current values can be changed using \ref nvmlDeviceSetApplicationsClocks.
+
+ @see nvmlDeviceSetGpuLockedClocks
+
+ For Volta &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if new settings were successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceResetGpuLockedClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set memory clocks that device will lock to.
+
+ Sets the device's memory clocks to the value in the range of minMemClockMHz to maxMemClockMHz.
+ Setting this will supersede application clock values and take effect regardless of whether a cuda app is running.
+ See /ref nvmlDeviceSetApplicationsClocks
+
+ Can be used as a setting to request constant performance.
+
+ Requires root/admin permissions.
+
+ After system reboot or driver reload applications clocks go back to their default value.
+ See \ref nvmlDeviceResetMemoryLockedClocks.
+
+ For Ampere &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param minMemClockMHz Requested minimum memory clock in MHz
+ @param maxMemClockMHz Requested maximum memory clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if new settings were successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minGpuClockMHz and \a maxGpuClockMHz
+ is not a valid clock combination
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetMemoryLockedClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ minMemClockMHz: ::core::ffi::c_uint,
+ maxMemClockMHz: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Resets the memory clock to the default value
+
+ This is the memory clock that will be used after system reboot or driver reload.
+ Default values are idle clocks, but the current values can be changed using \ref nvmlDeviceSetApplicationsClocks.
+
+ @see nvmlDeviceSetMemoryLockedClocks
+
+ For Ampere &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if new settings were successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceResetMemoryLockedClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set clocks that applications will lock to.
+
+ Sets the clocks that compute and graphics applications will be running at.
+ e.g. CUDA driver requests these clocks during context creation which means this property
+ defines clocks at which CUDA applications will be running unless some overspec event
+ occurs (e.g. over power, over thermal or external HW brake).
+
+ Can be used as a setting to request constant performance.
+
+ On Pascal and newer hardware, this will automatically disable automatic boosting of clocks.
+
+ On K80 and newer Kepler and Maxwell GPUs, users desiring fixed performance should also call
+ \ref nvmlDeviceSetAutoBoostedClocksEnabled to prevent clocks from automatically boosting
+ above the clock value being set.
+
+ For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices.
+ Requires root/admin permissions.
+
+ See \ref nvmlDeviceGetSupportedMemoryClocks and \ref nvmlDeviceGetSupportedGraphicsClocks
+ for details on how to list available clocks combinations.
+
+ After system reboot or driver reload applications clocks go back to their default value.
+ See \ref nvmlDeviceResetApplicationsClocks.
+
+ @param device The identifier of the target device
+ @param memClockMHz Requested memory clock in MHz
+ @param graphicsClockMHz Requested graphics clock in MHz
+
+ @return
+ - \ref NVML_SUCCESS if new settings were successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memClockMHz and \a graphicsClockMHz
+ is not a valid clock combination
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetApplicationsClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ memClockMHz: ::core::ffi::c_uint,
+ graphicsClockMHz: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Resets the application clock to the default value
+
+ This is the applications clock that will be used after system reboot or driver reload.
+ Default value is constant, but the current value an be changed using \ref nvmlDeviceSetApplicationsClocks.
+
+ On Pascal and newer hardware, if clocks were previously locked with \ref nvmlDeviceSetApplicationsClocks,
+ this call will unlock clocks. This returns clocks their default behavior ofautomatically boosting above
+ base clocks as thermal limits allow.
+
+ @see nvmlDeviceGetApplicationsClock
+ @see nvmlDeviceSetApplicationsClocks
+
+ For Fermi &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices.
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if new settings were successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceResetApplicationsClocks(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Try to set the current state of Auto Boosted clocks on a device.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates
+ to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock
+ rates are desired.
+
+ Non-root users may use this API by default but can be restricted by root from using this API by calling
+ \ref nvmlDeviceSetAPIRestriction with apiType=NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS.
+ Note: Persistence Mode is required to modify current Auto Boost settings, therefore, it must be enabled.
+
+ On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks.
+ Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost
+ behavior.
+
+ @param device The identifier of the target device
+ @param enabled What state to try to set Auto Boosted clocks of the target device to
+
+ @return
+ - \ref NVML_SUCCESS If the Auto Boosted clocks were successfully set to the state specified by \a enabled
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+*/
+ fn nvmlDeviceSetAutoBoostedClocksEnabled(
+ device: cuda_types::nvml::nvmlDevice_t,
+ enabled: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will
+ return to when no compute running processes (e.g. CUDA application which have an active context) are running
+
+ For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices.
+ Requires root/admin permissions.
+
+ Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates
+ to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock
+ rates are desired.
+
+ On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks.
+ Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost
+ behavior.
+
+ @param device The identifier of the target device
+ @param enabled What state to try to set default Auto Boosted clocks of the target device to
+ @param flags Flags that change the default behavior. Currently Unused.
+
+ @return
+ - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state.
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+*/
+ fn nvmlDeviceSetDefaultAutoBoostedClocksEnabled(
+ device: cuda_types::nvml::nvmlDevice_t,
+ enabled: cuda_types::nvml::nvmlEnableState_t,
+ flags: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets the speed of the fan control policy to default.
+
+ For all cuda-capable discrete products with fans
+
+ @param device The identifier of the target device
+ @param fan The index of the fan, starting at zero
+
+ return
+ NVML_SUCCESS if speed has been adjusted
+ NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ NVML_ERROR_INVALID_ARGUMENT if device is invalid
+ NVML_ERROR_NOT_SUPPORTED if the device does not support this
+ (doesn't have fans)
+ NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetDefaultFanSpeed_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fan: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets current fan control policy.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ Requires privileged user.
+
+ For all cuda-capable discrete products with fans
+
+ device The identifier of the target \a device
+ policy The fan control \a policy to set
+
+ return
+ NVML_SUCCESS if \a policy has been set
+ NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a policy is null or the \a fan given doesn't reference
+ a fan that exists.
+ NVML_ERROR_NOT_SUPPORTED if the \a device is older than Maxwell
+ NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetFanControlPolicy(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fan: ::core::ffi::c_uint,
+ policy: cuda_types::nvml::nvmlFanControlPolicy_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets the temperature threshold for the GPU with the specified threshold type in degrees C.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds.
+
+ @param device The identifier of the target device
+ @param thresholdType The type of threshold value to be set
+ @param temp Reference which hold the value to be set
+ @return
+ - \ref NVML_SUCCESS if \a temp has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetTemperatureThreshold(
+ device: cuda_types::nvml::nvmlDevice_t,
+ thresholdType: cuda_types::nvml::nvmlTemperatureThresholds_t,
+ temp: *mut ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set new power limit of this device.
+
+ For Kepler &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values.
+
+ \note Limit is not persistent across reboots or driver unloads.
+ Enable persistent mode to prevent driver from unloading when no application is using the device.
+
+ @param device The identifier of the target device
+ @param limit Power management limit in milliwatts to set
+
+ @return
+ - \ref NVML_SUCCESS if \a limit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is out of range
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceGetPowerManagementLimitConstraints
+ @see nvmlDeviceGetPowerManagementDefaultLimit*/
+ fn nvmlDeviceSetPowerManagementLimit(
+ device: cuda_types::nvml::nvmlDevice_t,
+ limit: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets new GOM. See \a nvmlGpuOperationMode_t for details.
+
+ For GK110 M-class and X-class Tesla &tm; products from the Kepler family.
+ Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products.
+ Not supported on Quadro &reg; and Tesla &tm; C-class products.
+ Requires root/admin permissions.
+
+ Changing GOMs requires a reboot.
+ The reboot requirement might be removed in the future.
+
+ Compute only GOMs don't support graphics acceleration. Under windows switching to these GOMs when
+ pending driver model is WDDM is not supported. See \ref nvmlDeviceSetDriverModel.
+
+ @param device The identifier of the target device
+ @param mode Target GOM
+
+ @return
+ - \ref NVML_SUCCESS if \a mode has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode incorrect
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support GOM or specific mode
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlGpuOperationMode_t
+ @see nvmlDeviceGetGpuOperationMode*/
+ fn nvmlDeviceSetGpuOperationMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: cuda_types::nvml::nvmlGpuOperationMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Changes the root/admin restructions on certain APIs. See \a nvmlRestrictedAPI_t for the list of supported APIs.
+ This method can be used by a root/admin user to give non-root/admin access to certain otherwise-restricted APIs.
+ The new setting lasts for the lifetime of the NVIDIA driver; it is not persistent. See \a nvmlDeviceGetAPIRestriction
+ to query the current restriction settings.
+
+ For Kepler &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ @param device The identifier of the target device
+ @param apiType Target API type for this operation
+ @param isRestricted The target restriction
+
+ @return
+ - \ref NVML_SUCCESS if \a isRestricted has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a apiType incorrect
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support changing API restrictions or the device does not support
+ the feature that api restrictions are being set for (E.G. Enabling/disabling auto
+ boosted clocks is not supported by the device)
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlRestrictedAPI_t*/
+ fn nvmlDeviceSetAPIRestriction(
+ device: cuda_types::nvml::nvmlDevice_t,
+ apiType: cuda_types::nvml::nvmlRestrictedAPI_t,
+ isRestricted: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets the speed of a specified fan.
+
+ WARNING: This function changes the fan control policy to manual. It means that YOU have to monitor
+ the temperature and adjust the fan speed accordingly.
+ If you set the fan speed too low you can burn your GPU!
+ Use nvmlDeviceSetDefaultFanSpeed_v2 to restore default control policy.
+
+ For all cuda-capable discrete products with fans that are Maxwell or Newer.
+
+ device The identifier of the target device
+ fan The index of the fan, starting at zero
+ speed The target speed of the fan [0-100] in % of max speed
+
+ return
+ NVML_SUCCESS if the fan speed has been set
+ NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ NVML_ERROR_INVALID_ARGUMENT if the device is not valid, or the speed is outside acceptable ranges,
+ or if the fan index doesn't reference an actual fan.
+ NVML_ERROR_NOT_SUPPORTED if the device is older than Maxwell.
+ NVML_ERROR_UNKNOWN if there was an unexpected error.*/
+ fn nvmlDeviceSetFanSpeed_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ fan: ::core::ffi::c_uint,
+ speed: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the GPCCLK VF offset value
+ @param[in] device The identifier of the target device
+ @param[in] offset The GPCCLK VF offset value to set
+
+ @return
+ - \ref NVML_SUCCESS if \a offset has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetGpcClkVfOffset(
+ device: cuda_types::nvml::nvmlDevice_t,
+ offset: ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the MemClk (Memory Clock) VF offset value. It requires elevated privileges.
+ @param[in] device The identifier of the target device
+ @param[in] offset The MemClk VF offset value to set
+
+ @return
+ - \ref NVML_SUCCESS if \a offset has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetMemClkVfOffset(
+ device: cuda_types::nvml::nvmlDevice_t,
+ offset: ::core::ffi::c_int,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set Conf Computing Unprotected Memory Size.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param device Device Handle
+ @param sizeKiB Unprotected Memory size to be set in KiB
+
+ @return
+ - \ref NVML_SUCCESS if \a sizeKiB successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlDeviceSetConfComputeUnprotectedMemSize(
+ device: cuda_types::nvml::nvmlDevice_t,
+ sizeKiB: ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set Conf Computing GPUs ready state.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux, Windows TCC.
+
+ @param isAcceptingWork GPU accepting new work, NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE or
+ NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE
+
+ return
+ - \ref NVML_SUCCESS if \a current GPUs ready state is successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a isAcceptingWork is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlSystemSetConfComputeGpusReadyState(
+ isAcceptingWork: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set Conf Computing key rotation threshold.
+
+ %HOPPER_OR_NEWER%
+ Supported on Linux, Windows TCC.
+
+ This function is to set the confidential compute key rotation threshold parameters.
+ @ref pKeyRotationThrInfo->maxAttackerAdvantage should be in the range from
+ NVML_CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MIN to NVML_CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MAX.
+ Default value is 60.
+
+ @param pKeyRotationThrInfo Reference to the key rotation threshold data
+
+ @return
+ - \ref NVML_SUCCESS if \a key rotation threashold max attacker advantage has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL
+ - \ref NVML_ERROR_INVALID_STATE if confidential compute GPU ready state is enabled
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlSystemSetConfComputeKeyRotationThresholdInfo(
+ pKeyRotationThrInfo: *mut cuda_types::nvml::nvmlConfComputeSetKeyRotationThresholdInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Enables or disables per process accounting.
+
+ For Kepler &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ @note This setting is not persistent and will default to disabled after driver unloads.
+ Enable persistence mode to be sure the setting doesn't switch off to disabled.
+
+ @note Enabling accounting mode has no negative impact on the GPU performance.
+
+ @note Disabling accounting clears all accounting pids information.
+
+ @note On MIG-enabled GPUs, accounting mode would be set to DISABLED and changing it is not supported.
+
+ See \ref nvmlDeviceGetAccountingMode
+ See \ref nvmlDeviceGetAccountingStats
+ See \ref nvmlDeviceClearAccountingPids
+
+ @param device The identifier of the target device
+ @param mode The target accounting mode
+
+ @return
+ - \ref NVML_SUCCESS if the new mode has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetAccountingMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Clears accounting information about all processes that have already terminated.
+
+ For Kepler &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ See \ref nvmlDeviceGetAccountingMode
+ See \ref nvmlDeviceGetAccountingStats
+ See \ref nvmlDeviceSetAccountingMode
+
+ @param device The identifier of the target device
+
+ @return
+ - \ref NVML_SUCCESS if accounting information has been cleared
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceClearAccountingPids(
+ device: cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the state of the device's NvLink for the link specified
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param isActive \a nvmlEnableState_t where NVML_FEATURE_ENABLED indicates that
+ the link is active and NVML_FEATURE_DISABLED indicates it
+ is inactive
+
+ @return
+ - \ref NVML_SUCCESS if \a isActive has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a isActive is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkState(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ isActive: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the version of the device's NvLink for the link specified
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param version Requested NvLink version
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a version is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkVersion(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ version: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the requested capability from the device's NvLink for the link specified
+ Please refer to the \a nvmlNvLinkCapability_t structure for the specific caps that can be queried
+ The return value should be treated as a boolean.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param capability Specifies the \a nvmlNvLinkCapability_t to be queried
+ @param capResult A boolean for the queried capability indicating that feature is available
+
+ @return
+ - \ref NVML_SUCCESS if \a capResult has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a capability is invalid or \a capResult is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkCapability(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ capability: cuda_types::nvml::nvmlNvLinkCapability_t,
+ capResult: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the PCI information for the remote node on a NvLink link
+ Note: pciSubSystemId is not filled in this function and is indeterminate
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param pci \a nvmlPciInfo_t of the remote node for the specified link
+
+ @return
+ - \ref NVML_SUCCESS if \a pci has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a pci is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkRemotePciInfo_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ pci: *mut cuda_types::nvml::nvmlPciInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the specified error counter value
+ Please refer to \a nvmlNvLinkErrorCounter_t for error counters that are available
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param counter Specifies the NvLink counter to be queried
+ @param counterValue Returned counter value
+
+ @return
+ - \ref NVML_SUCCESS if \a counter has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid or \a counterValue is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkErrorCounter(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ counter: cuda_types::nvml::nvmlNvLinkErrorCounter_t,
+ counterValue: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Resets all error counters to zero
+ Please refer to \a nvmlNvLinkErrorCounter_t for the list of error counters that are reset
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+
+ @return
+ - \ref NVML_SUCCESS if the reset is successful
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceResetNvLinkErrorCounters(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Setting utilization counter control is no longer supported.
+
+ Set the NVLINK utilization counter control information for the specified counter, 0 or 1.
+ Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition. Performs a reset
+ of the counters if the reset parameter is non-zero.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param counter Specifies the counter that should be set (0 or 1).
+ @param link Specifies the NvLink link to be queried
+ @param control A reference to the \a nvmlNvLinkUtilizationControl_t to set
+ @param reset Resets the counters on set if non-zero
+
+ @return
+ - \ref NVML_SUCCESS if the control has been set successfully
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetNvLinkUtilizationControl(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ counter: ::core::ffi::c_uint,
+ control: *mut cuda_types::nvml::nvmlNvLinkUtilizationControl_t,
+ reset: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Getting utilization counter control is no longer supported.
+
+ Get the NVLINK utilization counter control information for the specified counter, 0 or 1.
+ Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param counter Specifies the counter that should be set (0 or 1).
+ @param link Specifies the NvLink link to be queried
+ @param control A reference to the \a nvmlNvLinkUtilizationControl_t to place information
+
+ @return
+ - \ref NVML_SUCCESS if the control has been set successfully
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkUtilizationControl(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ counter: ::core::ffi::c_uint,
+ control: *mut cuda_types::nvml::nvmlNvLinkUtilizationControl_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Use \ref nvmlDeviceGetFieldValues with NVML_FI_DEV_NVLINK_THROUGHPUT_* as field values instead.
+
+ Retrieve the NVLINK utilization counter based on the current control for a specified counter.
+ In general it is good practice to use \a nvmlDeviceSetNvLinkUtilizationControl
+ before reading the utilization counters as they have no default state
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param counter Specifies the counter that should be read (0 or 1).
+ @param rxcounter Receive counter return value
+ @param txcounter Transmit counter return value
+
+ @return
+ - \ref NVML_SUCCESS if \a rxcounter and \a txcounter have been successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, or \a link is invalid or \a rxcounter or \a txcounter are NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkUtilizationCounter(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ counter: ::core::ffi::c_uint,
+ rxcounter: *mut ::core::ffi::c_ulonglong,
+ txcounter: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Freezing NVLINK utilization counters is no longer supported.
+
+ Freeze the NVLINK utilization counters
+ Both the receive and transmit counters are operated on by this function
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be queried
+ @param counter Specifies the counter that should be frozen (0 or 1).
+ @param freeze NVML_FEATURE_ENABLED = freeze the receive and transmit counters
+ NVML_FEATURE_DISABLED = unfreeze the receive and transmit counters
+
+ @return
+ - \ref NVML_SUCCESS if counters were successfully frozen or unfrozen
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, \a counter, or \a freeze is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceFreezeNvLinkUtilizationCounter(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ counter: ::core::ffi::c_uint,
+ freeze: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Deprecated: Resetting NVLINK utilization counters is no longer supported.
+
+ Reset the NVLINK utilization counters
+ Both the receive and transmit counters are operated on by this function
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param link Specifies the NvLink link to be reset
+ @param counter Specifies the counter that should be reset (0 or 1)
+
+ @return
+ - \ref NVML_SUCCESS if counters were successfully reset
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceResetNvLinkUtilizationCounter(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ counter: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the NVLink device type of the remote device connected over the given link.
+
+ @param device The device handle of the target GPU
+ @param link The NVLink link index on the target GPU
+ @param pNvLinkDeviceType Pointer in which the output remote device type is returned
+
+ @return
+ - \ref NVML_SUCCESS if \a pNvLinkDeviceType has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NOT_SUPPORTED if NVLink is not supported
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid, or
+ \a pNvLinkDeviceType is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is
+ otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetNvLinkRemoteDeviceType(
+ device: cuda_types::nvml::nvmlDevice_t,
+ link: ::core::ffi::c_uint,
+ pNvLinkDeviceType: *mut cuda_types::nvml::nvmlIntNvLinkDeviceType_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Create an empty set of events.
+ Event set should be freed by \ref nvmlEventSetFree
+
+ For Fermi &tm; or newer fully supported devices.
+ @param set Reference in which to return the event handle
+
+ @return
+ - \ref NVML_SUCCESS if the event has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a set is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlEventSetFree*/
+ fn nvmlEventSetCreate(
+ set: *mut cuda_types::nvml::nvmlEventSet_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Starts recording of events on a specified devices and add the events to specified \ref nvmlEventSet_t
+
+ For Fermi &tm; or newer fully supported devices.
+ Ecc events are available only on ECC enabled devices (see \ref nvmlDeviceGetTotalEccErrors)
+ Power capping events are available only on Power Management enabled devices (see \ref nvmlDeviceGetPowerManagementMode)
+
+ For Linux only.
+
+ \b IMPORTANT: Operations on \a set are not thread safe
+
+ This call starts recording of events on specific device.
+ All events that occurred before this call are not recorded.
+ Checking if some event occurred can be done with \ref nvmlEventSetWait_v2
+
+ If function reports NVML_ERROR_UNKNOWN, event set is in undefined state and should be freed.
+ If function reports NVML_ERROR_NOT_SUPPORTED, event set can still be used. None of the requested eventTypes
+ are registered in that case.
+
+ @param device The identifier of the target device
+ @param eventTypes Bitmask of \ref nvmlEventType to record
+ @param set Set to which add new event types
+
+ @return
+ - \ref NVML_SUCCESS if the event has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventTypes is invalid or \a set is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the platform does not support this feature or some of requested event types
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlEventType
+ @see nvmlDeviceGetSupportedEventTypes
+ @see nvmlEventSetWait
+ @see nvmlEventSetFree*/
+ fn nvmlDeviceRegisterEvents(
+ device: cuda_types::nvml::nvmlDevice_t,
+ eventTypes: ::core::ffi::c_ulonglong,
+ set: cuda_types::nvml::nvmlEventSet_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns information about events supported on device
+
+ For Fermi &tm; or newer fully supported devices.
+
+ Events are not supported on Windows. So this function returns an empty mask in \a eventTypes on Windows.
+
+ @param device The identifier of the target device
+ @param eventTypes Reference in which to return bitmask of supported events
+
+ @return
+ - \ref NVML_SUCCESS if the eventTypes has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventType is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlEventType
+ @see nvmlDeviceRegisterEvents*/
+ fn nvmlDeviceGetSupportedEventTypes(
+ device: cuda_types::nvml::nvmlDevice_t,
+ eventTypes: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Waits on events and delivers events
+
+ For Fermi &tm; or newer fully supported devices.
+
+ If some events are ready to be delivered at the time of the call, function returns immediately.
+ If there are no events ready to be delivered, function sleeps till event arrives
+ but not longer than specified timeout. This function in certain conditions can return before
+ specified timeout passes (e.g. when interrupt arrives)
+
+ On Windows, in case of xid error, the function returns the most recent xid error type seen by the system.
+ If there are multiple xid errors generated before nvmlEventSetWait is invoked then the last seen xid error
+ type is returned for all xid error events.
+
+ On Linux, every xid error event would return the associated event data and other information if applicable.
+
+ In MIG mode, if device handle is provided, the API reports all the events for the available instances,
+ only if the caller has appropriate privileges. In absence of required privileges, only the events which
+ affect all the instances (i.e. whole device) are reported.
+
+ This API does not currently support per-instance event reporting using MIG device handles.
+
+ @param set Reference to set of events to wait on
+ @param data Reference in which to return event data
+ @param timeoutms Maximum amount of wait time in milliseconds for registered event
+
+ @return
+ - \ref NVML_SUCCESS if the data has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a data is NULL
+ - \ref NVML_ERROR_TIMEOUT if no event arrived in specified timeout or interrupt arrived
+ - \ref NVML_ERROR_GPU_IS_LOST if a GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlEventType
+ @see nvmlDeviceRegisterEvents*/
+ fn nvmlEventSetWait_v2(
+ set: cuda_types::nvml::nvmlEventSet_t,
+ data: *mut cuda_types::nvml::nvmlEventData_t,
+ timeoutms: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Releases events in the set
+
+ For Fermi &tm; or newer fully supported devices.
+
+ @param set Reference to events to be released
+
+ @return
+ - \ref NVML_SUCCESS if the event has been successfully released
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlDeviceRegisterEvents*/
+ fn nvmlEventSetFree(
+ set: cuda_types::nvml::nvmlEventSet_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Modify the drain state of a GPU. This method forces a GPU to no longer accept new incoming requests.
+ Any new NVML process will no longer see this GPU. Persistence mode for this GPU must be turned off before
+ this call is made.
+ Must be called as administrator.
+ For Linux only.
+
+ For Pascal &tm; or newer fully supported devices.
+ Some Kepler devices supported.
+
+ @param pciInfo The PCI address of the GPU drain state to be modified
+ @param newState The drain state that should be entered, see \ref nvmlEnableState_t
+
+ @return
+ - \ref NVML_SUCCESS if counters were successfully reset
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a newState is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation
+ - \ref NVML_ERROR_IN_USE if the device has persistence mode turned on
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceModifyDrainState(
+ pciInfo: *mut cuda_types::nvml::nvmlPciInfo_t,
+ newState: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Query the drain state of a GPU. This method is used to check if a GPU is in a currently draining
+ state.
+ For Linux only.
+
+ For Pascal &tm; or newer fully supported devices.
+ Some Kepler devices supported.
+
+ @param pciInfo The PCI address of the GPU drain state to be queried
+ @param currentState The current drain state for this GPU, see \ref nvmlEnableState_t
+
+ @return
+ - \ref NVML_SUCCESS if counters were successfully reset
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a currentState is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceQueryDrainState(
+ pciInfo: *mut cuda_types::nvml::nvmlPciInfo_t,
+ currentState: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** This method will remove the specified GPU from the view of both NVML and the NVIDIA kernel driver
+ as long as no other processes are attached. If other processes are attached, this call will return
+ NVML_ERROR_IN_USE and the GPU will be returned to its original "draining" state. Note: the
+ only situation where a process can still be attached after nvmlDeviceModifyDrainState() is called
+ to initiate the draining state is if that process was using, and is still using, a GPU before the
+ call was made. Also note, persistence mode counts as an attachment to the GPU thus it must be disabled
+ prior to this call.
+
+ For long-running NVML processes please note that this will change the enumeration of current GPUs.
+ For example, if there are four GPUs present and GPU1 is removed, the new enumeration will be 0-2.
+ Also, device handles after the removed GPU will not be valid and must be re-established.
+ Must be run as administrator.
+ For Linux only.
+
+ For Pascal &tm; or newer fully supported devices.
+ Some Kepler devices supported.
+
+ @param pciInfo The PCI address of the GPU to be removed
+ @param gpuState Whether the GPU is to be removed, from the OS
+ see \ref nvmlDetachGpuState_t
+ @param linkState Requested upstream PCIe link state, see \ref nvmlPcieLinkState_t
+
+ @return
+ - \ref NVML_SUCCESS if counters were successfully reset
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
+ - \ref NVML_ERROR_IN_USE if the device is still in use and cannot be removed*/
+ fn nvmlDeviceRemoveGpu_v2(
+ pciInfo: *mut cuda_types::nvml::nvmlPciInfo_t,
+ gpuState: cuda_types::nvml::nvmlDetachGpuState_t,
+ linkState: cuda_types::nvml::nvmlPcieLinkState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Request the OS and the NVIDIA kernel driver to rediscover a portion of the PCI subsystem looking for GPUs that
+ were previously removed. The portion of the PCI tree can be narrowed by specifying a domain, bus, and device.
+ If all are zeroes then the entire PCI tree will be searched. Please note that for long-running NVML processes
+ the enumeration will change based on how many GPUs are discovered and where they are inserted in bus order.
+
+ In addition, all newly discovered GPUs will be initialized and their ECC scrubbed which may take several seconds
+ per GPU. Also, all device handles are no longer guaranteed to be valid post discovery.
+
+ Must be run as administrator.
+ For Linux only.
+
+ For Pascal &tm; or newer fully supported devices.
+ Some Kepler devices supported.
+
+ @param pciInfo The PCI tree to be searched. Only the domain, bus, and device
+ fields are used in this call.
+
+ @return
+ - \ref NVML_SUCCESS if counters were successfully reset
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciInfo is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if the operating system does not support this feature
+ - \ref NVML_ERROR_OPERATING_SYSTEM if the operating system is denying this feature
+ - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceDiscoverGpus(
+ pciInfo: *mut cuda_types::nvml::nvmlPciInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Request values for a list of fields for a device. This API allows multiple fields to be queried at once.
+ If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs
+ will be populated from a single call rather than making a driver call for each fieldId.
+
+ @param device The device handle of the GPU to request field values for
+ @param valuesCount Number of entries in values that should be retrieved
+ @param values Array of \a valuesCount structures to hold field values.
+ Each value's fieldId must be populated prior to this call
+
+ @return
+ - \ref NVML_SUCCESS if any values in \a values were populated. Note that you must
+ check the nvmlReturn field of each value for each individual
+ status
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a values is NULL*/
+ fn nvmlDeviceGetFieldValues(
+ device: cuda_types::nvml::nvmlDevice_t,
+ valuesCount: ::core::ffi::c_int,
+ values: *mut cuda_types::nvml::nvmlFieldValue_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Clear values for a list of fields for a device. This API allows multiple fields to be cleared at once.
+
+ @param device The device handle of the GPU to request field values for
+ @param valuesCount Number of entries in values that should be cleared
+ @param values Array of \a valuesCount structures to hold field values.
+ Each value's fieldId must be populated prior to this call
+
+ @return
+ - \ref NVML_SUCCESS if any values in \a values were cleared. Note that you must
+ check the nvmlReturn field of each value for each individual
+ status
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a values is NULL*/
+ fn nvmlDeviceClearFieldValues(
+ device: cuda_types::nvml::nvmlDevice_t,
+ valuesCount: ::core::ffi::c_int,
+ values: *mut cuda_types::nvml::nvmlFieldValue_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** This method is used to get the virtualization mode corresponding to the GPU.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device Identifier of the target device
+ @param pVirtualMode Reference to virtualization mode. One of NVML_GPU_VIRTUALIZATION_?
+
+ @return
+ - \ref NVML_SUCCESS if \a pVirtualMode is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVirtualizationMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pVirtualMode: *mut cuda_types::nvml::nvmlGpuVirtualizationMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries if SR-IOV host operation is supported on a vGPU supported device.
+
+ Checks whether SR-IOV host capability is supported by the device and the
+ driver, and indicates device is in SR-IOV mode if both of these conditions
+ are true.
+
+ @param device The identifier of the target device
+ @param pHostVgpuMode Reference in which to return the current vGPU mode
+
+ @return
+ - \ref NVML_SUCCESS if device's vGPU mode has been successfully retrieved
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device handle is 0 or \a pVgpuMode is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if \a device doesn't support this feature.
+ - \ref NVML_ERROR_UNKNOWN if any unexpected error occurred*/
+ fn nvmlDeviceGetHostVgpuMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pHostVgpuMode: *mut cuda_types::nvml::nvmlHostVgpuMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** This method is used to set the virtualization mode corresponding to the GPU.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device Identifier of the target device
+ @param virtualMode virtualization mode. One of NVML_GPU_VIRTUALIZATION_?
+
+ @return
+ - \ref NVML_SUCCESS if \a virtualMode is set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a virtualMode is NULL
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_SUPPORTED if setting of virtualization mode is not supported.
+ - \ref NVML_ERROR_NO_PERMISSION if setting of virtualization mode is not allowed for this client.*/
+ fn nvmlDeviceSetVirtualizationMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ virtualMode: cuda_types::nvml::nvmlGpuVirtualizationMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the vGPU heterogeneous mode for the device.
+
+ When in heterogeneous mode, a vGPU can concurrently host timesliced vGPUs with differing framebuffer sizes.
+
+ On successful return, the function returns \a pHeterogeneousMode->mode with the current vGPU heterogeneous mode.
+ \a pHeterogeneousMode->version is the version number of the structure nvmlVgpuHeterogeneousMode_t, the caller should
+ set the correct version number to retrieve the vGPU heterogeneous mode.
+ \a pHeterogeneousMode->mode can either be \ref NVML_FEATURE_ENABLED or \ref NVML_FEATURE_DISABLED.
+
+ @param device The identifier of the target device
+ @param pHeterogeneousMode Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid or \a pHeterogeneousMode is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support this feature
+ - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pHeterogeneousMode is invalid
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlDeviceGetVgpuHeterogeneousMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pHeterogeneousMode: *mut cuda_types::nvml::nvmlVgpuHeterogeneousMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Enable or disable vGPU heterogeneous mode for the device.
+
+ When in heterogeneous mode, a vGPU can concurrently host timesliced vGPUs with differing framebuffer sizes.
+
+ API would return an appropriate error code upon unsuccessful activation. For example, the heterogeneous mode
+ set will fail with error \ref NVML_ERROR_IN_USE if any vGPU instance is active on the device. The caller of this API
+ is expected to shutdown the vGPU VMs and retry setting the \a mode.
+ On successful return, the function updates the vGPU heterogeneous mode with the user provided \a pHeterogeneousMode->mode.
+ \a pHeterogeneousMode->version is the version number of the structure nvmlVgpuHeterogeneousMode_t, the caller should
+ set the correct version number to set the vGPU heterogeneous mode.
+
+ @param device Identifier of the target device
+ @param pHeterogeneousMode Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a pHeterogeneousMode is NULL or \a pHeterogeneousMode->mode is invalid
+ - \ref NVML_ERROR_IN_USE If the \a device is in use
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or \a device doesn't support this feature
+ - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pHeterogeneousMode is invalid
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlDeviceSetVgpuHeterogeneousMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pHeterogeneousMode: *const cuda_types::nvml::nvmlVgpuHeterogeneousMode_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Query the placement ID of active vGPU instance.
+
+ When in vGPU heterogeneous mode, this function returns a valid placement ID as \a pPlacement->placementId
+ else NVML_INVALID_VGPU_PLACEMENT_ID is returned.
+ \a pPlacement->version is the version number of the structure nvmlVgpuPlacementId_t, the caller should
+ set the correct version number to get placement id of the vGPU instance \a vgpuInstance.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param pPlacement Pointer to vGPU placement ID structure \a nvmlVgpuPlacementId_t
+
+ @return
+ - \ref NVML_SUCCESS If information is successfully retrieved
+ - \ref NVML_ERROR_NOT_FOUND If \a vgpuInstance does not match a valid active vGPU instance
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuInstance is invalid or \a pPlacement is NULL
+ - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pPlacement is invalid
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlVgpuInstanceGetPlacementId(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ pPlacement: *mut cuda_types::nvml::nvmlVgpuPlacementId_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Query the supported vGPU placement ID of the vGPU type.
+
+ An array of supported vGPU placement IDs for the vGPU type ID indicated by \a vgpuTypeId is returned in the
+ caller-supplied buffer of \a pPlacementList->placementIds. Memory needed for the placementIds array should be
+ allocated based on maximum instances of a vGPU type which can be queried via \ref nvmlVgpuTypeGetMaxInstances().
+
+ This function will return supported placement IDs even if GPU is not in vGPU heterogeneous mode.
+
+ @param device Identifier of the target device
+ @param vgpuTypeId Handle to vGPU type. The vGPU type ID
+ @param pPlacementList Pointer to the vGPU placement structure \a nvmlVgpuPlacementList_t
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a vgpuTypeId is invalid or \a pPlacementList is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device or \a vgpuTypeId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pPlacementList is invalid
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlDeviceGetVgpuTypeSupportedPlacements(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ pPlacementList: *mut cuda_types::nvml::nvmlVgpuPlacementList_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Query the creatable vGPU placement ID of the vGPU type.
+
+ An array of creatable vGPU placement IDs for the vGPU type ID indicated by \a vgpuTypeId is returned in the
+ caller-supplied buffer of \a pPlacementList->placementIds. Memory needed for the placementIds array should be
+ allocated based on maximum instances of a vGPU type which can be queried via \ref nvmlVgpuTypeGetMaxInstances().
+ The creatable vGPU placement IDs may differ over time, as there may be restrictions on what type of vGPU the
+ vGPU instance is running.
+
+ The function will return \ref NVML_ERROR_NOT_SUPPORTED if the \a device is not in vGPU heterogeneous mode.
+
+ @param device The identifier of the target device
+ @param vgpuTypeId Handle to vGPU type. The vGPU type ID
+ @param pPlacementList Pointer to the list of vGPU placement structure \a nvmlVgpuPlacementList_t
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a vgpuTypeId is invalid or \a pPlacementList is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device or \a vgpuTypeId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pPlacementList is invalid
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlDeviceGetVgpuTypeCreatablePlacements(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ pPlacementList: *mut cuda_types::nvml::nvmlVgpuPlacementList_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the static GSP heap size of the vGPU type in bytes
+
+ @param vgpuTypeId Handle to vGPU type
+ @param gspHeapSize Reference to return the GSP heap size value
+ @return
+ - \ref NVML_SUCCESS Successful completion
+ - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuTypeId is invalid, or \a gspHeapSize is NULL
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlVgpuTypeGetGspHeapSize(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ gspHeapSize: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the static framebuffer reservation of the vGPU type in bytes
+
+ @param vgpuTypeId Handle to vGPU type
+ @param fbReservation Reference to return the framebuffer reservation
+ @return
+ - \ref NVML_SUCCESS Successful completion
+ - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuTypeId is invalid, or \a fbReservation is NULL
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlVgpuTypeGetFbReservation(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ fbReservation: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the desirable vGPU capability of a device
+
+ Refer to the \a nvmlDeviceVgpuCapability_t structure for the specific capabilities that can be set.
+ See \ref nvmlEnableState_t for available state.
+
+ @param device The identifier of the target device
+ @param capability Specifies the \a nvmlDeviceVgpuCapability_t to be set
+ @param state The target capability mode
+
+ @return
+ - \ref NVML_SUCCESS Successful completion
+ - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid, or \a capability is invalid, or \a state is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state, or \a device not in vGPU mode
+ - \ref NVML_ERROR_UNKNOWN On any unexpected error*/
+ fn nvmlDeviceSetVgpuCapabilities(
+ device: cuda_types::nvml::nvmlDevice_t,
+ capability: cuda_types::nvml::nvmlDeviceVgpuCapability_t,
+ state: cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the vGPU Software licensable features.
+
+ Identifies whether the system supports vGPU Software Licensing. If it does, return the list of licensable feature(s)
+ and their current license status.
+
+ @param device Identifier of the target device
+ @param pGridLicensableFeatures Pointer to structure in which vGPU software licensable features are returned
+
+ @return
+ - \ref NVML_SUCCESS if licensable features are successfully retrieved
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pGridLicensableFeatures is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGridLicensableFeatures_v4(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pGridLicensableFeatures: *mut cuda_types::nvml::nvmlGridLicensableFeatures_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the requested vGPU driver capability.
+
+ Refer to the \a nvmlVgpuDriverCapability_t structure for the specific capabilities that can be queried.
+ The return value in \a capResult should be treated as a boolean, with a non-zero value indicating that the capability
+ is supported.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param capability Specifies the \a nvmlVgpuDriverCapability_t to be queried
+ @param capResult A boolean for the queried capability indicating that feature is supported
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a capability is invalid, or \a capResult is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED the API is not supported in current state or \a devices not in vGPU mode
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlGetVgpuDriverCapabilities(
+ capability: cuda_types::nvml::nvmlVgpuDriverCapability_t,
+ capResult: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the requested vGPU capability for GPU.
+
+ Refer to the \a nvmlDeviceVgpuCapability_t structure for the specific capabilities that can be queried.
+ The return value in \a capResult reports a non-zero value indicating that the capability
+ is supported, and also reports the capability's data based on the queried capability.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param capability Specifies the \a nvmlDeviceVgpuCapability_t to be queried
+ @param capResult Specifies that the queried capability is supported, and also returns capability's data
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a capability is invalid, or \a capResult is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED the API is not supported in current state or \a device not in vGPU mode
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuCapabilities(
+ device: cuda_types::nvml::nvmlDevice_t,
+ capability: cuda_types::nvml::nvmlDeviceVgpuCapability_t,
+ capResult: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the supported vGPU types on a physical GPU (device).
+
+ An array of supported vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer
+ pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount
+ is used to return the number of vGPU types written to the buffer.
+
+ If the supplied buffer is not large enough to accommodate the vGPU type array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount.
+ To query the number of vGPU types supported for the GPU, call this function with *vgpuCount = 0.
+ The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are supported.
+
+ @param device The identifier of the target device
+ @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types
+ @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL or \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetSupportedVgpus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuCount: *mut ::core::ffi::c_uint,
+ vgpuTypeIds: *mut cuda_types::nvml::nvmlVgpuTypeId_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the currently creatable vGPU types on a physical GPU (device).
+
+ An array of creatable vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer
+ pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount
+ is used to return the number of vGPU types written to the buffer.
+
+ The creatable vGPU types for a device may differ over time, as there may be restrictions on what type of vGPU types
+ can concurrently run on a device. For example, if only one vGPU type is allowed at a time on a device, then the creatable
+ list will be restricted to whatever vGPU type is already running on the device.
+
+ If the supplied buffer is not large enough to accommodate the vGPU type array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount.
+ To query the number of vGPU types that can be created for the GPU, call this function with *vgpuCount = 0.
+ The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are creatable.
+
+ @param device The identifier of the target device
+ @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types
+ @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetCreatableVgpus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuCount: *mut ::core::ffi::c_uint,
+ vgpuTypeIds: *mut cuda_types::nvml::nvmlVgpuTypeId_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the class of a vGPU type. It will not exceed 64 characters in length (including the NUL terminator).
+ See \ref nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param vgpuTypeClass Pointer to string array to return class in
+ @param size Size of string
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeClass is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetClass(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ vgpuTypeClass: *mut ::core::ffi::c_char,
+ size: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the vGPU type name.
+
+ The name is an alphanumeric string that denotes a particular vGPU, e.g. GRID M60-2Q. It will not
+ exceed 64 characters in length (including the NUL terminator). See \ref
+ nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param vgpuTypeName Pointer to buffer to return name
+ @param size Size of buffer
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a name is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetName(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ vgpuTypeName: *mut ::core::ffi::c_char,
+ size: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the GPU Instance Profile ID for the given vGPU type ID.
+ The API will return a valid GPU Instance Profile ID for the MIG capable vGPU types, else INVALID_GPU_INSTANCE_PROFILE_ID is
+ returned.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param gpuInstanceProfileId GPU Instance Profile ID
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_NOT_SUPPORTED if \a device is not in vGPU Host virtualization mode
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a gpuInstanceProfileId is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetGpuInstanceProfileId(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ gpuInstanceProfileId: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the device ID of a vGPU type.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param deviceID Device ID and vendor ID of the device contained in single 32 bit value
+ @param subsystemID Subsystem ID and subsystem vendor ID of the device contained in single 32 bit value
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a deviceId or \a subsystemID are NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetDeviceID(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ deviceID: *mut ::core::ffi::c_ulonglong,
+ subsystemID: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the vGPU framebuffer size in bytes.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param fbSize Pointer to framebuffer size in bytes
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a fbSize is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetFramebufferSize(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ fbSize: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve count of vGPU's supported display heads.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param numDisplayHeads Pointer to number of display heads
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a numDisplayHeads is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetNumDisplayHeads(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ numDisplayHeads: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve vGPU display head's maximum supported resolution.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param displayIndex Zero-based index of display head
+ @param xdim Pointer to maximum number of pixels in X dimension
+ @param ydim Pointer to maximum number of pixels in Y dimension
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a xdim or \a ydim are NULL, or \a displayIndex
+ is out of range.
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetResolution(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ displayIndex: ::core::ffi::c_uint,
+ xdim: *mut ::core::ffi::c_uint,
+ ydim: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve license requirements for a vGPU type
+
+ The license type and version required to run the specified vGPU type is returned as an alphanumeric string, in the form
+ "<license name>,<version>", for example "GRID-Virtual-PC,2.0". If a vGPU is runnable with* more than one type of license,
+ the licenses are delimited by a semicolon, for example "GRID-Virtual-PC,2.0;GRID-Virtual-WS,2.0;GRID-Virtual-WS-Ext,2.0".
+
+ The total length of the returned string will not exceed 128 characters, including the NUL terminator.
+ See \ref nvmlVgpuConstants::NVML_GRID_LICENSE_BUFFER_SIZE.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param vgpuTypeLicenseString Pointer to buffer to return license info
+ @param size Size of \a vgpuTypeLicenseString buffer
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeLicenseString is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetLicense(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ vgpuTypeLicenseString: *mut ::core::ffi::c_char,
+ size: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the static frame rate limit value of the vGPU type
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param frameRateLimit Reference to return the frame rate limit value
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a frameRateLimit is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetFrameRateLimit(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ frameRateLimit: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the maximum number of vGPU instances creatable on a device for given vGPU type
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param vgpuTypeId Handle to vGPU type
+ @param vgpuInstanceCount Pointer to get the max number of vGPU instances
+ that can be created on a deicve for given vgpuTypeId
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid or is not supported on target device,
+ or \a vgpuInstanceCount is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetMaxInstances(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ vgpuInstanceCount: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the maximum number of vGPU instances supported per VM for given vGPU type
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param vgpuInstanceCountPerVm Pointer to get the max number of vGPU instances supported per VM for given \a vgpuTypeId
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuInstanceCountPerVm is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetMaxInstancesPerVm(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ vgpuInstanceCountPerVm: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the active vGPU instances on a device.
+
+ An array of active vGPU instances is returned in the caller-supplied buffer pointed at by \a vgpuInstances. The
+ array element count is passed in \a vgpuCount, and \a vgpuCount is used to return the number of vGPU instances
+ written to the buffer.
+
+ If the supplied buffer is not large enough to accommodate the vGPU instance array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuInstance_t array required in \a vgpuCount.
+ To query the number of active vGPU instances, call this function with *vgpuCount = 0. The code will return
+ NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU Types are supported.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param device The identifier of the target device
+ @param vgpuCount Pointer which passes in the array size as well as get
+ back the number of types
+ @param vgpuInstances Pointer to array in which to return list of vGPU instances
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a vgpuCount is NULL
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetActiveVgpus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuCount: *mut ::core::ffi::c_uint,
+ vgpuInstances: *mut cuda_types::nvml::nvmlVgpuInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the VM ID associated with a vGPU instance.
+
+ The VM ID is returned as a string, not exceeding 80 characters in length (including the NUL terminator).
+ See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE.
+
+ The format of the VM ID varies by platform, and is indicated by the type identifier returned in \a vmIdType.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param vmId Pointer to caller-supplied buffer to hold VM ID
+ @param size Size of buffer in bytes
+ @param vmIdType Pointer to hold VM ID type
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vmId or \a vmIdType is NULL, or \a vgpuInstance is 0
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetVmID(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ vmId: *mut ::core::ffi::c_char,
+ size: ::core::ffi::c_uint,
+ vmIdType: *mut cuda_types::nvml::nvmlVgpuVmIdType_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the UUID of a vGPU instance.
+
+ The UUID is a globally unique identifier associated with the vGPU, and is returned as a 5-part hexadecimal string,
+ not exceeding 80 characters in length (including the NULL terminator).
+ See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param uuid Pointer to caller-supplied buffer to hold vGPU UUID
+ @param size Size of buffer in bytes
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a uuid is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetUUID(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ uuid: *mut ::core::ffi::c_char,
+ size: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the NVIDIA driver version installed in the VM associated with a vGPU.
+
+ The version is returned as an alphanumeric string in the caller-supplied buffer \a version. The length of the version
+ string will not exceed 80 characters in length (including the NUL terminator).
+ See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE.
+
+ nvmlVgpuInstanceGetVmDriverVersion() may be called at any time for a vGPU instance. The guest VM driver version is
+ returned as "Not Available" if no NVIDIA driver is installed in the VM, or the VM has not yet booted to the point where the
+ NVIDIA driver is loaded and initialized.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param version Caller-supplied buffer to return driver version string
+ @param length Size of \a version buffer
+
+ @return
+ - \ref NVML_SUCCESS if \a version has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetVmDriverVersion(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ version: *mut ::core::ffi::c_char,
+ length: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the framebuffer usage in bytes.
+
+ Framebuffer usage is the amont of vGPU framebuffer memory that is currently in use by the VM.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance The identifier of the target instance
+ @param fbUsage Pointer to framebuffer usage in bytes
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a fbUsage is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetFbUsage(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ fbUsage: *mut ::core::ffi::c_ulonglong,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** @deprecated Use \ref nvmlVgpuInstanceGetLicenseInfo_v2.
+
+ Retrieve the current licensing state of the vGPU instance.
+
+ If the vGPU is currently licensed, \a licensed is set to 1, otherwise it is set to 0.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param licensed Reference to return the licensing status
+
+ @return
+ - \ref NVML_SUCCESS if \a licensed has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a licensed is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetLicenseStatus(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ licensed: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the vGPU type of a vGPU instance.
+
+ Returns the vGPU type ID of vgpu assigned to the vGPU instance.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param vgpuTypeId Reference to return the vgpuTypeId
+
+ @return
+ - \ref NVML_SUCCESS if \a vgpuTypeId has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a vgpuTypeId is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetType(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ vgpuTypeId: *mut cuda_types::nvml::nvmlVgpuTypeId_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the frame rate limit set for the vGPU instance.
+
+ Returns the value of the frame rate limit set for the vGPU instance
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param frameRateLimit Reference to return the frame rate limit
+
+ @return
+ - \ref NVML_SUCCESS if \a frameRateLimit has been set
+ - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a frameRateLimit is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetFrameRateLimit(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ frameRateLimit: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the current ECC mode of vGPU instance.
+
+ @param vgpuInstance The identifier of the target vGPU instance
+ @param eccMode Reference in which to return the current ECC mode
+
+ @return
+ - \ref NVML_SUCCESS if the vgpuInstance's ECC mode has been successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mode is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetEccMode(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ eccMode: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param encoderCapacity Reference to an unsigned int for the encoder capacity
+
+ @return
+ - \ref NVML_SUCCESS if \a encoderCapacity has been retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a encoderQueryType is invalid
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetEncoderCapacity(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ encoderCapacity: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param encoderCapacity Unsigned int for the encoder capacity value
+
+ @return
+ - \ref NVML_SUCCESS if \a encoderCapacity has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a encoderCapacity is out of range of 0-100.
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceSetEncoderCapacity(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ encoderCapacity: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the current encoder statistics of a vGPU Instance
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param sessionCount Reference to an unsigned int for count of active encoder sessions
+ @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions
+ @param averageLatency Reference to an unsigned int for encode latency in microseconds
+
+ @return
+ - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount , or \a averageFps or \a averageLatency is NULL
+ or \a vgpuInstance is 0.
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetEncoderStats(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ sessionCount: *mut ::core::ffi::c_uint,
+ averageFps: *mut ::core::ffi::c_uint,
+ averageLatency: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves information about all active encoder sessions on a vGPU Instance.
+
+ An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The
+ array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions
+ written to the buffer.
+
+ If the supplied buffer is not large enough to accommodate the active session array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount.
+ To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return
+ NVML_SUCCESS with number of active encoder sessions updated in *sessionCount.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param sessionCount Reference to caller supplied array size, and returns
+ the number of sessions.
+ @param sessionInfo Reference to caller supplied array in which the list
+ of session information us returned.
+
+ @return
+ - \ref NVML_SUCCESS if \a sessionInfo is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is
+returned in \a sessionCount
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL, or \a vgpuInstance is 0.
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetEncoderSessions(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ sessionCount: *mut ::core::ffi::c_uint,
+ sessionInfo: *mut cuda_types::nvml::nvmlEncoderSessionInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the active frame buffer capture sessions statistics of a vGPU Instance
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param fbcStats Reference to nvmlFBCStats_t structure containing NvFBC stats
+
+ @return
+ - \ref NVML_SUCCESS if \a fbcStats is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a fbcStats is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetFBCStats(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ fbcStats: *mut cuda_types::nvml::nvmlFBCStats_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves information about active frame buffer capture sessions on a vGPU Instance.
+
+ An array of active FBC sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The
+ array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions
+ written to the buffer.
+
+ If the supplied buffer is not large enough to accommodate the active session array, the function returns
+ NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlFBCSessionInfo_t array required in \a sessionCount.
+ To query the number of active FBC sessions, call this function with *sessionCount = 0. The code will return
+ NVML_SUCCESS with number of active FBC sessions updated in *sessionCount.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @note hResolution, vResolution, averageFPS and averageLatency data for a FBC session returned in \a sessionInfo may
+ be zero if there are no new frames captured since the session started.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param sessionCount Reference to caller supplied array size, and returns the number of sessions.
+ @param sessionInfo Reference in which to return the session information
+
+ @return
+ - \ref NVML_SUCCESS if \a sessionInfo is fetched
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a sessionCount is NULL.
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetFBCSessions(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ sessionCount: *mut ::core::ffi::c_uint,
+ sessionInfo: *mut cuda_types::nvml::nvmlFBCSessionInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the GPU Instance ID for the given vGPU Instance.
+ The API will return a valid GPU Instance ID for MIG backed vGPU Instance, else INVALID_GPU_INSTANCE_ID is returned.
+
+ For Kepler &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param gpuInstanceId GPU Instance ID
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a gpuInstanceId is NULL.
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetGpuInstanceId(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ gpuInstanceId: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the PCI Id of the given vGPU Instance i.e. the PCI Id of the GPU as seen inside the VM.
+
+ The vGPU PCI id is returned as "00000000:00:00.0" if NVIDIA driver is not installed on the vGPU instance.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param vgpuPciId Caller-supplied buffer to return vGPU PCI Id string
+ @param length Size of the vgpuPciId buffer
+
+ @return
+ - \ref NVML_SUCCESS if vGPU PCI Id is sucessfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a vgpuPciId is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small, \a length is set to required length
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetGpuPciId(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ vgpuPciId: *mut ::core::ffi::c_char,
+ length: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the requested capability for a given vGPU type. Refer to the \a nvmlVgpuCapability_t structure
+ for the specific capabilities that can be queried. The return value in \a capResult should be treated as
+ a boolean, with a non-zero value indicating that the capability is supported.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuTypeId Handle to vGPU type
+ @param capability Specifies the \a nvmlVgpuCapability_t to be queried
+ @param capResult A boolean for the queried capability indicating that feature is supported
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a capability is invalid, or \a capResult is NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuTypeGetCapabilities(
+ vgpuTypeId: cuda_types::nvml::nvmlVgpuTypeId_t,
+ capability: cuda_types::nvml::nvmlVgpuCapability_t,
+ capResult: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieve the MDEV UUID of a vGPU instance.
+
+ The MDEV UUID is a globally unique identifier of the mdev device assigned to the VM, and is returned as a 5-part hexadecimal string,
+ not exceeding 80 characters in length (including the NULL terminator).
+ MDEV UUID is displayed only on KVM platform.
+ See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param mdevUuid Pointer to caller-supplied buffer to hold MDEV UUID
+ @param size Size of buffer in bytes
+
+ @return
+ - \ref NVML_SUCCESS successful completion
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_NOT_SUPPORTED on any hypervisor other than KVM
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mdevUuid is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetMdevUUID(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ mdevUuid: *mut ::core::ffi::c_char,
+ size: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns vGPU metadata structure for a running vGPU. The structure contains information about the vGPU and its associated VM
+ such as the currently installed NVIDIA guest driver version, together with host driver version and an opaque data section
+ containing internal state.
+
+ nvmlVgpuInstanceGetMetadata() may be called at any time for a vGPU instance. Some fields in the returned structure are
+ dependent on information obtained from the guest VM, which may not yet have reached a state where that information
+ is available. The current state of these dependent fields is reflected in the info structure's \ref nvmlVgpuGuestInfoState_t field.
+
+ The VMM may choose to read and save the vGPU's VM info as persistent metadata associated with the VM, and provide
+ it to Virtual GPU Manager when creating a vGPU for subsequent instances of the VM.
+
+ The caller passes in a buffer via \a vgpuMetadata, with the size of the buffer in \a bufferSize. If the vGPU Metadata structure
+ is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed
+ in \a bufferSize.
+
+ @param vgpuInstance vGPU instance handle
+ @param vgpuMetadata Pointer to caller-supplied buffer into which vGPU metadata is written
+ @param bufferSize Size of vgpuMetadata buffer
+
+ @return
+ - \ref NVML_SUCCESS vGPU metadata structure was successfully returned
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE vgpuMetadata buffer is too small, required size is returned in \a bufferSize
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a vgpuInstance is 0; if \a vgpuMetadata is NULL and the value of \a bufferSize is not 0.
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetMetadata(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ vgpuMetadata: *mut cuda_types::nvml::nvmlVgpuMetadata_t,
+ bufferSize: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns a vGPU metadata structure for the physical GPU indicated by \a device. The structure contains information about
+ the GPU and the currently installed NVIDIA host driver version that's controlling it, together with an opaque data section
+ containing internal state.
+
+ The caller passes in a buffer via \a pgpuMetadata, with the size of the buffer in \a bufferSize. If the \a pgpuMetadata
+ structure is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed
+ in \a bufferSize.
+
+ @param device The identifier of the target device
+ @param pgpuMetadata Pointer to caller-supplied buffer into which \a pgpuMetadata is written
+ @param bufferSize Pointer to size of \a pgpuMetadata buffer
+
+ @return
+ - \ref NVML_SUCCESS GPU metadata structure was successfully returned
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE pgpuMetadata buffer is too small, required size is returned in \a bufferSize
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a device is invalid; if \a pgpuMetadata is NULL and the value of \a bufferSize is not 0.
+ - \ref NVML_ERROR_NOT_SUPPORTED vGPU is not supported by the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuMetadata(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pgpuMetadata: *mut cuda_types::nvml::nvmlVgpuPgpuMetadata_t,
+ bufferSize: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Takes a vGPU instance metadata structure read from \ref nvmlVgpuInstanceGetMetadata(), and a vGPU metadata structure for a
+ physical GPU read from \ref nvmlDeviceGetVgpuMetadata(), and returns compatibility information of the vGPU instance and the
+ physical GPU.
+
+ The caller passes in a buffer via \a compatibilityInfo, into which a compatibility information structure is written. The
+ structure defines the states in which the vGPU / VM may be booted on the physical GPU. If the vGPU / VM compatibility
+ with the physical GPU is limited, a limit code indicates the factor limiting compatability.
+ (see \ref nvmlVgpuPgpuCompatibilityLimitCode_t for details).
+
+ Note: vGPU compatibility does not take into account dynamic capacity conditions that may limit a system's ability to
+ boot a given vGPU or associated VM.
+
+ @param vgpuMetadata Pointer to caller-supplied vGPU metadata structure
+ @param pgpuMetadata Pointer to caller-supplied GPU metadata structure
+ @param compatibilityInfo Pointer to caller-supplied buffer to hold compatibility info
+
+ @return
+ - \ref NVML_SUCCESS vGPU metadata structure was successfully returned
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuMetadata or \a pgpuMetadata or \a bufferSize are NULL
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlGetVgpuCompatibility(
+ vgpuMetadata: *mut cuda_types::nvml::nvmlVgpuMetadata_t,
+ pgpuMetadata: *mut cuda_types::nvml::nvmlVgpuPgpuMetadata_t,
+ compatibilityInfo: *mut cuda_types::nvml::nvmlVgpuPgpuCompatibility_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the properties of the physical GPU indicated by the device in an ascii-encoded string format.
+
+ The caller passes in a buffer via \a pgpuMetadata, with the size of the buffer in \a bufferSize. If the
+ string is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed
+ in \a bufferSize.
+
+ @param device The identifier of the target device
+ @param pgpuMetadata Pointer to caller-supplied buffer into which \a pgpuMetadata is written
+ @param bufferSize Pointer to size of \a pgpuMetadata buffer
+
+ @return
+ - \ref NVML_SUCCESS GPU metadata structure was successfully returned
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE \a pgpuMetadata buffer is too small, required size is returned in \a bufferSize
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a device is invalid; if \a pgpuMetadata is NULL and the value of \a bufferSize is not 0.
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the system
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetPgpuMetadataString(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pgpuMetadata: *mut ::core::ffi::c_char,
+ bufferSize: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the vGPU Software scheduler logs.
+ \a pSchedulerLog points to a caller-allocated structure to contain the logs. The number of elements returned will
+ never exceed \a NVML_SCHEDULER_SW_MAX_LOG_ENTRIES.
+
+ To get the entire logs, call the function atleast 5 times a second.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target \a device
+ @param pSchedulerLog Reference in which \a pSchedulerLog is written
+
+ @return
+ - \ref NVML_SUCCESS vGPU scheduler logs were successfully obtained
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerLog is NULL or \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuSchedulerLog(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pSchedulerLog: *mut cuda_types::nvml::nvmlVgpuSchedulerLog_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the vGPU scheduler state.
+ The information returned in \a nvmlVgpuSchedulerGetState_t is not relevant if the BEST EFFORT policy is set.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target \a device
+ @param pSchedulerState Reference in which \a pSchedulerState is returned
+
+ @return
+ - \ref NVML_SUCCESS vGPU scheduler state is successfully obtained
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerState is NULL or \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuSchedulerState(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pSchedulerState: *mut cuda_types::nvml::nvmlVgpuSchedulerGetState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Returns the vGPU scheduler capabilities.
+ The list of supported vGPU schedulers returned in \a nvmlVgpuSchedulerCapabilities_t is from
+ the NVML_VGPU_SCHEDULER_POLICY_*. This list enumerates the supported scheduler policies
+ if the engine is Graphics type.
+ The other values in \a nvmlVgpuSchedulerCapabilities_t are also applicable if the engine is
+ Graphics type. For other engine types, it is BEST EFFORT policy.
+ If ARR is supported and enabled, scheduling frequency and averaging factor are applicable
+ else timeSlice is applicable.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ @param device The identifier of the target \a device
+ @param pCapabilities Reference in which \a pCapabilities is written
+
+ @return
+ - \ref NVML_SUCCESS vGPU scheduler capabilities were successfully obtained
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pCapabilities is NULL or \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuSchedulerCapabilities(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pCapabilities: *mut cuda_types::nvml::nvmlVgpuSchedulerCapabilities_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Sets the vGPU scheduler state.
+
+ For Pascal &tm; or newer fully supported devices.
+
+ The scheduler state change won't persist across module load/unload.
+ Scheduler state and params will be allowed to set only when no VM is running.
+ In \a nvmlVgpuSchedulerSetState_t, IFF enableARRMode is enabled then
+ provide avgFactorForARR and frequency as input. If enableARRMode is disabled
+ then provide timeslice as input.
+
+ @param device The identifier of the target \a device
+ @param pSchedulerState vGPU \a pSchedulerState to set
+
+ @return
+ - \ref NVML_SUCCESS vGPU scheduler state has been successfully set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerState is NULL or \a device is invalid
+ - \ref NVML_ERROR_RESET_REQUIRED if setting \a pSchedulerState failed with fatal error,
+ reboot is required to overcome from this error.
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode
+ or if any vGPU instance currently exists on the \a device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceSetVgpuSchedulerState(
+ device: cuda_types::nvml::nvmlDevice_t,
+ pSchedulerState: *mut cuda_types::nvml::nvmlVgpuSchedulerSetState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Query the ranges of supported vGPU versions.
+
+ This function gets the linear range of supported vGPU versions that is preset for the NVIDIA vGPU Manager and the range set by an administrator.
+ If the preset range has not been overridden by \ref nvmlSetVgpuVersion, both ranges are the same.
+
+ The caller passes pointers to the following \ref nvmlVgpuVersion_t structures, into which the NVIDIA vGPU Manager writes the ranges:
+ 1. \a supported structure that represents the preset range of vGPU versions supported by the NVIDIA vGPU Manager.
+ 2. \a current structure that represents the range of supported vGPU versions set by an administrator. By default, this range is the same as the preset range.
+
+ @param supported Pointer to the structure in which the preset range of vGPU versions supported by the NVIDIA vGPU Manager is written
+ @param current Pointer to the structure in which the range of supported vGPU versions set by an administrator is written
+
+ @return
+ - \ref NVML_SUCCESS The vGPU version range structures were successfully obtained.
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported.
+ - \ref NVML_ERROR_INVALID_ARGUMENT The \a supported parameter or the \a current parameter is NULL.
+ - \ref NVML_ERROR_UNKNOWN An error occurred while the data was being fetched.*/
+ fn nvmlGetVgpuVersion(
+ supported: *mut cuda_types::nvml::nvmlVgpuVersion_t,
+ current: *mut cuda_types::nvml::nvmlVgpuVersion_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Override the preset range of vGPU versions supported by the NVIDIA vGPU Manager with a range set by an administrator.
+
+ This function configures the NVIDIA vGPU Manager with a range of supported vGPU versions set by an administrator. This range must be a subset of the
+ preset range that the NVIDIA vGPU Manager supports. The custom range set by an administrator takes precedence over the preset range and is advertised to
+ the guest VM for negotiating the vGPU version. See \ref nvmlGetVgpuVersion for details of how to query the preset range of versions supported.
+
+ This function takes a pointer to vGPU version range structure \ref nvmlVgpuVersion_t as input to override the preset vGPU version range that the NVIDIA vGPU Manager supports.
+
+ After host system reboot or driver reload, the range of supported versions reverts to the range that is preset for the NVIDIA vGPU Manager.
+
+ @note 1. The range set by the administrator must be a subset of the preset range that the NVIDIA vGPU Manager supports. Otherwise, an error is returned.
+ 2. If the range of supported guest driver versions does not overlap the range set by the administrator, the guest driver fails to load.
+ 3. If the range of supported guest driver versions overlaps the range set by the administrator, the guest driver will load with a negotiated
+ vGPU version that is the maximum value in the overlapping range.
+ 4. No VMs must be running on the host when this function is called. If a VM is running on the host, the call to this function fails.
+
+ @param vgpuVersion Pointer to a caller-supplied range of supported vGPU versions.
+
+ @return
+ - \ref NVML_SUCCESS The preset range of supported vGPU versions was successfully overridden.
+ - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported.
+ - \ref NVML_ERROR_IN_USE The range was not overridden because a VM is running on the host.
+ - \ref NVML_ERROR_INVALID_ARGUMENT The \a vgpuVersion parameter specifies a range that is outside the range supported by the NVIDIA vGPU Manager or if \a vgpuVersion is NULL.*/
+ fn nvmlSetVgpuVersion(
+ vgpuVersion: *mut cuda_types::nvml::nvmlVgpuVersion_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves current utilization for vGPUs on a physical GPU (device).
+
+ For Kepler &tm; or newer fully supported devices.
+
+ Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for vGPU instances running
+ on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer
+ pointed at by \a utilizationSamples. One utilization sample structure is returned per vGPU instance, and includes the
+ CPU timestamp at which the samples were recorded. Individual utilization values are returned as "unsigned int" values
+ in nvmlValue_t unions. The function sets the caller-supplied \a sampleValType to NVML_VALUE_TYPE_UNSIGNED_INT to
+ indicate the returned value type.
+
+ To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with
+ \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance
+ count in \a vgpuInstanceSamplesCount, or NVML_SUCCESS if the current vGPU instance count is zero. The caller should allocate
+ a buffer of size vgpuInstanceSamplesCount * sizeof(nvmlVgpuInstanceUtilizationSample_t). Invoke the function again with
+ the allocated buffer passed in \a utilizationSamples, and \a vgpuInstanceSamplesCount set to the number of entries the
+ buffer is sized for.
+
+ On successful return, the function updates \a vgpuInstanceSampleCount with the number of vGPU utilization sample
+ structures that were actually written. This may differ from a previously read value as vGPU instances are created or
+ destroyed.
+
+ lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0
+ to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp
+ to a timeStamp retrieved from a previous query to read utilization since the previous query.
+
+ @param device The identifier for the target device
+ @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp.
+ @param sampleValType Pointer to caller-supplied buffer to hold the type of returned sample values
+ @param vgpuInstanceSamplesCount Pointer to caller-supplied array size, and returns number of vGPU instances
+ @param utilizationSamples Pointer to caller-supplied buffer in which vGPU utilization samples are returned
+
+ @return
+ - \ref NVML_SUCCESS if utilization samples are successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuInstanceSamplesCount or \a sampleValType is
+ NULL, or a sample count of 0 is passed with a non-NULL \a utilizationSamples
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuInstanceSamplesCount is too small to return samples for all
+ vGPU instances currently executing on the device
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ lastSeenTimeStamp: ::core::ffi::c_ulonglong,
+ sampleValType: *mut cuda_types::nvml::nvmlValueType_t,
+ vgpuInstanceSamplesCount: *mut ::core::ffi::c_uint,
+ utilizationSamples: *mut cuda_types::nvml::nvmlVgpuInstanceUtilizationSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves recent utilization for vGPU instances running on a physical GPU (device).
+
+ For Kepler &tm; or newer fully supported devices.
+
+ Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, video decoder, jpeg decoder, and OFA for vGPU
+ instances running on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied
+ buffer pointed at by \a vgpuUtilInfo->vgpuUtilArray. One utilization sample structure is returned per vGPU instance, and includes the
+ CPU timestamp at which the samples were recorded. Individual utilization values are returned as "unsigned int" values
+ in nvmlValue_t unions. The function sets the caller-supplied \a vgpuUtilInfo->sampleValType to NVML_VALUE_TYPE_UNSIGNED_INT to
+ indicate the returned value type.
+
+ To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with
+ \a vgpuUtilInfo->vgpuUtilArray set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance
+ count in \a vgpuUtilInfo->vgpuInstanceCount, or NVML_SUCCESS if the current vGPU instance count is zero. The caller should allocate
+ a buffer of size vgpuUtilInfo->vgpuInstanceCount * sizeof(nvmlVgpuInstanceUtilizationInfo_t). Invoke the function again with
+ the allocated buffer passed in \a vgpuUtilInfo->vgpuUtilArray, and \a vgpuUtilInfo->vgpuInstanceCount set to the number of entries the
+ buffer is sized for.
+
+ On successful return, the function updates \a vgpuUtilInfo->vgpuInstanceCount with the number of vGPU utilization sample
+ structures that were actually written. This may differ from a previously read value as vGPU instances are created or
+ destroyed.
+
+ \a vgpuUtilInfo->lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0
+ to read utilization based on all the samples maintained by the driver's internal sample buffer. Set \a vgpuUtilInfo->lastSeenTimeStamp
+ to a timeStamp retrieved from a previous query to read utilization since the previous query.
+
+ @param device The identifier for the target device
+ @param vgpuUtilInfo Pointer to the caller-provided structure of nvmlVgpuInstancesUtilizationInfo_t
+
+ @return
+ - \ref NVML_SUCCESS if utilization samples are successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuUtilInfo is NULL, or \a vgpuUtilInfo->vgpuInstanceCount is 0
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_VERSION_MISMATCH if the version of \a vgpuUtilInfo is invalid
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a vgpuUtilInfo->vgpuUtilArray is NULL, or the buffer size of vgpuUtilInfo->vgpuInstanceCount is too small.
+ The caller should check the current vGPU instance count from the returned vgpuUtilInfo->vgpuInstanceCount, and call
+ the function again with a buffer of size vgpuUtilInfo->vgpuInstanceCount * sizeof(nvmlVgpuInstanceUtilizationInfo_t)
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuInstancesUtilizationInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuUtilInfo: *mut cuda_types::nvml::nvmlVgpuInstancesUtilizationInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves current utilization for processes running on vGPUs on a physical GPU (device).
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running on
+ vGPU instances active on a device. Utilization values are returned as an array of utilization sample structures in the
+ caller-supplied buffer pointed at by \a utilizationSamples. One utilization sample structure is returned per process running
+ on vGPU instances, that had some non-zero utilization during the last sample period. It includes the CPU timestamp at which
+ the samples were recorded. Individual utilization values are returned as "unsigned int" values.
+
+ To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with
+ \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance
+ count in \a vgpuProcessSamplesCount. The caller should allocate a buffer of size
+ vgpuProcessSamplesCount * sizeof(nvmlVgpuProcessUtilizationSample_t). Invoke the function again with
+ the allocated buffer passed in \a utilizationSamples, and \a vgpuProcessSamplesCount set to the number of entries the
+ buffer is sized for.
+
+ On successful return, the function updates \a vgpuSubProcessSampleCount with the number of vGPU sub process utilization sample
+ structures that were actually written. This may differ from a previously read value depending on the number of processes that are active
+ in any given sample period.
+
+ lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0
+ to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp
+ to a timeStamp retrieved from a previous query to read utilization since the previous query.
+
+ @param device The identifier for the target device
+ @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp.
+ @param vgpuProcessSamplesCount Pointer to caller-supplied array size, and returns number of processes running on vGPU instances
+ @param utilizationSamples Pointer to caller-supplied buffer in which vGPU sub process utilization samples are returned
+
+ @return
+ - \ref NVML_SUCCESS if utilization samples are successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuProcessSamplesCount or a sample count of 0 is
+ passed with a non-NULL \a utilizationSamples
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuProcessSamplesCount is too small to return samples for all
+ vGPU instances currently executing on the device
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuProcessUtilization(
+ device: cuda_types::nvml::nvmlDevice_t,
+ lastSeenTimeStamp: ::core::ffi::c_ulonglong,
+ vgpuProcessSamplesCount: *mut ::core::ffi::c_uint,
+ utilizationSamples: *mut cuda_types::nvml::nvmlVgpuProcessUtilizationSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves recent utilization for processes running on vGPU instances on a physical GPU (device).
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, video decoder, jpeg decoder, and OFA for processes running
+ on vGPU instances active on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied
+ buffer pointed at by \a vgpuProcUtilInfo->vgpuProcUtilArray. One utilization sample structure is returned per process running
+ on vGPU instances, that had some non-zero utilization during the last sample period. It includes the CPU timestamp at which
+ the samples were recorded. Individual utilization values are returned as "unsigned int" values.
+
+ To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with
+ \a vgpuProcUtilInfo->vgpuProcUtilArray set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current processes' count
+ running on vGPU instances in \a vgpuProcUtilInfo->vgpuProcessCount. The caller should allocate a buffer of size
+ vgpuProcUtilInfo->vgpuProcessCount * sizeof(nvmlVgpuProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed
+ in \a vgpuProcUtilInfo->vgpuProcUtilArray, and \a vgpuProcUtilInfo->vgpuProcessCount set to the number of entries the buffer is sized for.
+
+ On successful return, the function updates \a vgpuProcUtilInfo->vgpuProcessCount with the number of vGPU sub process utilization sample
+ structures that were actually written. This may differ from a previously read value depending on the number of processes that are active
+ in any given sample period.
+
+ vgpuProcUtilInfo->lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0
+ to read utilization based on all the samples maintained by the driver's internal sample buffer. Set vgpuProcUtilInfo->lastSeenTimeStamp
+ to a timeStamp retrieved from a previous query to read utilization since the previous query.
+
+ @param device The identifier for the target device
+ @param vgpuProcUtilInfo Pointer to the caller-provided structure of nvmlVgpuProcessesUtilizationInfo_t
+
+ @return
+ - \ref NVML_SUCCESS if utilization samples are successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a vgpuProcUtilInfo is null
+ - \ref NVML_ERROR_VERSION_MISMATCH if the version of \a vgpuProcUtilInfo is invalid
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a vgpuProcUtilInfo->vgpuProcUtilArray is null, or supplied \a vgpuProcUtilInfo->vgpuProcessCount
+ is too small to return samples for all processes on vGPU instances currently executing on the device.
+ The caller should check the current processes count from the returned \a vgpuProcUtilInfo->vgpuProcessCount,
+ and call the function again with a buffer of size
+ vgpuProcUtilInfo->vgpuProcessCount * sizeof(nvmlVgpuProcessUtilizationSample_t)
+ - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_NOT_FOUND if sample entries are not found
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetVgpuProcessesUtilizationInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ vgpuProcUtilInfo: *mut cuda_types::nvml::nvmlVgpuProcessesUtilizationInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries the state of per process accounting mode on vGPU.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance The identifier of the target vGPU instance
+ @param mode Reference in which to return the current accounting mode
+
+ @return
+ - \ref NVML_SUCCESS if the mode has been successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mode is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature
+ - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetAccountingMode(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ mode: *mut cuda_types::nvml::nvmlEnableState_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries list of processes running on vGPU that can be queried for accounting stats. The list of processes
+ returned can be in running or terminated state.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ To just query the maximum number of processes that can be queried, call this function with *count = 0 and
+ pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty.
+
+ For more details see \ref nvmlVgpuInstanceGetAccountingStats.
+
+ @note In case of PID collision some processes might not be accessible before the circular buffer is full.
+
+ @param vgpuInstance The identifier of the target vGPU instance
+ @param count Reference in which to provide the \a pids array size, and
+ to return the number of elements ready to be queried
+ @param pids Reference in which to return list of process ids
+
+ @return
+ - \ref NVML_SUCCESS if pids were successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a count is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled
+ - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to expected value)
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see nvmlVgpuInstanceGetAccountingPids*/
+ fn nvmlVgpuInstanceGetAccountingPids(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ count: *mut ::core::ffi::c_uint,
+ pids: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Queries process's accounting stats.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ Accounting stats capture GPU utilization and other statistics across the lifetime of a process, and
+ can be queried during life time of the process or after its termination.
+ The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and
+ updated to actual running time after its termination.
+ Accounting stats are kept in a circular buffer, newly created processes overwrite information about old
+ processes.
+
+ See \ref nvmlAccountingStats_t for description of each returned metric.
+ List of processes that can be queried can be retrieved from \ref nvmlVgpuInstanceGetAccountingPids.
+
+ @note Accounting Mode needs to be on. See \ref nvmlVgpuInstanceGetAccountingMode.
+ @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be
+ queried since they don't contribute to GPU utilization.
+ @note In case of pid collision stats of only the latest process (that terminated last) will be reported
+
+ @param vgpuInstance The identifier of the target vGPU instance
+ @param pid Process Id of the target process to query stats for
+ @param stats Reference in which to return the process's accounting stats
+
+ @return
+ - \ref NVML_SUCCESS if stats have been successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a stats is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ or \a stats is not found
+ - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetAccountingStats(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ pid: ::core::ffi::c_uint,
+ stats: *mut cuda_types::nvml::nvmlAccountingStats_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Clears accounting information of the vGPU instance that have already terminated.
+
+ For Maxwell &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ @note Accounting Mode needs to be on. See \ref nvmlVgpuInstanceGetAccountingMode.
+ @note Only compute and graphics applications stats are reported and can be cleared since monitoring applications
+ stats don't contribute to GPU utilization.
+
+ @param vgpuInstance The identifier of the target vGPU instance
+
+ @return
+ - \ref NVML_SUCCESS if accounting information has been cleared
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid
+ - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
+ - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceClearAccountingPids(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Query the license information of the vGPU instance.
+
+ For Maxwell &tm; or newer fully supported devices.
+
+ @param vgpuInstance Identifier of the target vGPU instance
+ @param licenseInfo Pointer to vGPU license information structure
+
+ @return
+ - \ref NVML_SUCCESS if information is successfully retrieved
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a licenseInfo is NULL
+ - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system
+ - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlVgpuInstanceGetLicenseInfo_v2(
+ vgpuInstance: cuda_types::nvml::nvmlVgpuInstance_t,
+ licenseInfo: *mut cuda_types::nvml::nvmlVgpuLicenseInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Retrieves the number of excluded GPU devices in the system.
+
+ For all products.
+
+ @param deviceCount Reference in which to return the number of excluded devices
+
+ @return
+ - \ref NVML_SUCCESS if \a deviceCount has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL*/
+ fn nvmlGetExcludedDeviceCount(
+ deviceCount: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Acquire the device information for an excluded GPU device, based on its index.
+
+ For all products.
+
+ Valid indices are derived from the \a deviceCount returned by
+ \ref nvmlGetExcludedDeviceCount(). For example, if \a deviceCount is 2 the valid indices
+ are 0 and 1, corresponding to GPU 0 and GPU 1.
+
+ @param index The index of the target GPU, >= 0 and < \a deviceCount
+ @param info Reference in which to return the device information
+
+ @return
+ - \ref NVML_SUCCESS if \a device has been set
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a info is NULL
+
+ @see nvmlGetExcludedDeviceCount*/
+ fn nvmlGetExcludedDeviceInfoByIndex(
+ index: ::core::ffi::c_uint,
+ info: *mut cuda_types::nvml::nvmlExcludedDeviceInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set MIG mode for the device.
+
+ For Ampere &tm; or newer fully supported devices.
+ Requires root user.
+
+ This mode determines whether a GPU instance can be created.
+
+ This API may unbind or reset the device to activate the requested mode. Thus, the attributes associated with the
+ device, such as minor number, might change. The caller of this API is expected to query such attributes again.
+
+ On certain platforms like pass-through virtualization, where reset functionality may not be exposed directly, VM
+ reboot is required. \a activationStatus would return \ref NVML_ERROR_RESET_REQUIRED for such cases.
+
+ \a activationStatus would return the appropriate error code upon unsuccessful activation. For example, if device
+ unbind fails because the device isn't idle, \ref NVML_ERROR_IN_USE would be returned. The caller of this API
+ is expected to idle the device and retry setting the \a mode.
+
+ @note On Windows, only disabling MIG mode is supported. \a activationStatus would return \ref
+ NVML_ERROR_NOT_SUPPORTED as GPU reset is not supported on Windows through this API.
+
+ @param device The identifier of the target device
+ @param mode The mode to be set, \ref NVML_DEVICE_MIG_DISABLE or
+ \ref NVML_DEVICE_MIG_ENABLE
+ @param activationStatus The activationStatus status
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device,\a mode or \a activationStatus are invalid
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG mode*/
+ fn nvmlDeviceSetMigMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ mode: ::core::ffi::c_uint,
+ activationStatus: *mut cuda_types::nvml::nvmlReturn_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get MIG mode for the device.
+
+ For Ampere &tm; or newer fully supported devices.
+
+ Changing MIG modes may require device unbind or reset. The "pending" MIG mode refers to the target mode following the
+ next activation trigger.
+
+ @param device The identifier of the target device
+ @param currentMode Returns the current mode, \ref NVML_DEVICE_MIG_DISABLE or
+ \ref NVML_DEVICE_MIG_ENABLE
+ @param pendingMode Returns the pending mode, \ref NVML_DEVICE_MIG_DISABLE or
+ \ref NVML_DEVICE_MIG_ENABLE
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a currentMode or \a pendingMode are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG mode*/
+ fn nvmlDeviceGetMigMode(
+ device: cuda_types::nvml::nvmlDevice_t,
+ currentMode: *mut ::core::ffi::c_uint,
+ pendingMode: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instance profile information
+
+ Information provided by this API is immutable throughout the lifetime of a MIG mode.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+ @param profile One of the NVML_GPU_INSTANCE_PROFILE_*
+ @param info Returns detailed profile information
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile or \a info are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG or \a profile isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlDeviceGetGpuInstanceProfileInfo(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profile: ::core::ffi::c_uint,
+ info: *mut cuda_types::nvml::nvmlGpuInstanceProfileInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Versioned wrapper around \ref nvmlDeviceGetGpuInstanceProfileInfo that accepts a versioned
+ \ref nvmlGpuInstanceProfileInfo_v2_t or later output structure.
+
+ @note The caller must set the \ref nvmlGpuInstanceProfileInfo_v2_t.version field to the
+ appropriate version prior to calling this function. For example:
+ \code
+ nvmlGpuInstanceProfileInfo_v2_t profileInfo =
+ { .version = nvmlGpuInstanceProfileInfo_v2 };
+ nvmlReturn_t result = nvmlDeviceGetGpuInstanceProfileInfoV(device,
+ profile,
+ &profileInfo);
+ \endcode
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device The identifier of the target device
+ @param profile One of the NVML_GPU_INSTANCE_PROFILE_*
+ @param info Returns detailed profile information
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a info, or \a info->version are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profile isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlDeviceGetGpuInstanceProfileInfoV(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profile: ::core::ffi::c_uint,
+ info: *mut cuda_types::nvml::nvmlGpuInstanceProfileInfo_v2_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instance placements.
+
+ A placement represents the location of a GPU instance within a device. This API only returns all the possible
+ placements for the given profile regardless of whether MIG is enabled or not.
+ A created GPU instance occupies memory slices described by its placement. Creation of new GPU instance will
+ fail if there is overlap with the already occupied memory slices.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param device The identifier of the target device
+ @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo
+ @param placements Returns placements allowed for the profile. Can be NULL to discover number
+ of allowed placements for this profile. If non-NULL must be large enough
+ to accommodate the placements supported by the profile.
+ @param count Returns number of allowed placemenets for the profile.
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId or \a count are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG or \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlDeviceGetGpuInstancePossiblePlacements_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profileId: ::core::ffi::c_uint,
+ placements: *mut cuda_types::nvml::nvmlGpuInstancePlacement_t,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instance profile capacity.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param device The identifier of the target device
+ @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo
+ @param count Returns remaining instance count for the profile ID
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId or \a count are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlDeviceGetGpuInstanceRemainingCapacity(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profileId: ::core::ffi::c_uint,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Create GPU instance.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ If the parent device is unbound, reset or the GPU instance is destroyed explicitly, the GPU instance handle would
+ become invalid. The GPU instance must be recreated to acquire a valid handle.
+
+ @param device The identifier of the target device
+ @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo
+ @param gpuInstance Returns the GPU instance handle
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a profileId or \a gpuInstance are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested GPU instance could not be created*/
+ fn nvmlDeviceCreateGpuInstance(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profileId: ::core::ffi::c_uint,
+ gpuInstance: *mut cuda_types::nvml::nvmlGpuInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Create GPU instance with the specified placement.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ If the parent device is unbound, reset or the GPU instance is destroyed explicitly, the GPU instance handle would
+ become invalid. The GPU instance must be recreated to acquire a valid handle.
+
+ @param device The identifier of the target device
+ @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo
+ @param placement The requested placement. See \ref nvmlDeviceGetGpuInstancePossiblePlacements_v2
+ @param gpuInstance Returns the GPU instance handle
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a profileId, \a placement or \a gpuInstance
+ are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested GPU instance could not be created*/
+ fn nvmlDeviceCreateGpuInstanceWithPlacement(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profileId: ::core::ffi::c_uint,
+ placement: *const cuda_types::nvml::nvmlGpuInstancePlacement_t,
+ gpuInstance: *mut cuda_types::nvml::nvmlGpuInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Destroy GPU instance.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param gpuInstance The GPU instance handle
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_IN_USE If the GPU instance is in use. This error would be returned if processes
+ (e.g. CUDA application) or compute instances are active on the
+ GPU instance.*/
+ fn nvmlGpuInstanceDestroy(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instances for given profile ID.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param device The identifier of the target device
+ @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo
+ @param gpuInstances Returns pre-exiting GPU instances, the buffer must be large enough to
+ accommodate the instances supported by the profile.
+ See \ref nvmlDeviceGetGpuInstanceProfileInfo
+ @param count The count of returned GPU instances
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId, \a gpuInstances or \a count are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlDeviceGetGpuInstances(
+ device: cuda_types::nvml::nvmlDevice_t,
+ profileId: ::core::ffi::c_uint,
+ gpuInstances: *mut cuda_types::nvml::nvmlGpuInstance_t,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instances for given instance ID.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param device The identifier of the target device
+ @param id The GPU instance ID
+ @param gpuInstance Returns GPU instance
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a id or \a gpuInstance are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_NOT_FOUND If the GPU instance is not found.*/
+ fn nvmlDeviceGetGpuInstanceById(
+ device: cuda_types::nvml::nvmlDevice_t,
+ id: ::core::ffi::c_uint,
+ gpuInstance: *mut cuda_types::nvml::nvmlGpuInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instance information.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param gpuInstance The GPU instance handle
+ @param info Return GPU instance information
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance or \a info are invalid
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlGpuInstanceGetInfo(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ info: *mut cuda_types::nvml::nvmlGpuInstanceInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instance profile information.
+
+ Information provided by this API is immutable throughout the lifetime of a MIG mode.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profile One of the NVML_COMPUTE_INSTANCE_PROFILE_*
+ @param engProfile One of the NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_*
+ @param info Returns detailed profile information
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a engProfile or \a info are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a profile isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlGpuInstanceGetComputeInstanceProfileInfo(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profile: ::core::ffi::c_uint,
+ engProfile: ::core::ffi::c_uint,
+ info: *mut cuda_types::nvml::nvmlComputeInstanceProfileInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Versioned wrapper around \ref nvmlGpuInstanceGetComputeInstanceProfileInfo that accepts a versioned
+ \ref nvmlComputeInstanceProfileInfo_v2_t or later output structure.
+
+ @note The caller must set the \ref nvmlGpuInstanceProfileInfo_v2_t.version field to the
+ appropriate version prior to calling this function. For example:
+ \code
+ nvmlComputeInstanceProfileInfo_v2_t profileInfo =
+ { .version = nvmlComputeInstanceProfileInfo_v2 };
+ nvmlReturn_t result = nvmlGpuInstanceGetComputeInstanceProfileInfoV(gpuInstance,
+ profile,
+ engProfile,
+ &profileInfo);
+ \endcode
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profile One of the NVML_COMPUTE_INSTANCE_PROFILE_*
+ @param engProfile One of the NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_*
+ @param info Returns detailed profile information
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a engProfile, \a info, or \a info->version are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a profile isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlGpuInstanceGetComputeInstanceProfileInfoV(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profile: ::core::ffi::c_uint,
+ engProfile: ::core::ffi::c_uint,
+ info: *mut cuda_types::nvml::nvmlComputeInstanceProfileInfo_v2_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instance profile capacity.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profileId The compute instance profile ID.
+ See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo
+ @param count Returns remaining instance count for the profile ID
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profileId or \a availableCount are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlGpuInstanceGetComputeInstanceRemainingCapacity(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profileId: ::core::ffi::c_uint,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instance placements.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ A placement represents the location of a compute instance within a GPU instance. This API only returns all the possible
+ placements for the given profile.
+ A created compute instance occupies compute slices described by its placement. Creation of new compute instance will
+ fail if there is overlap with the already occupied compute slices.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profileId The compute instance profile ID. See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo
+ @param placements Returns placements allowed for the profile. Can be NULL to discover number
+ of allowed placements for this profile. If non-NULL must be large enough
+ to accommodate the placements supported by the profile.
+ @param count Returns number of allowed placemenets for the profile.
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profileId or \a count are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlGpuInstanceGetComputeInstancePossiblePlacements(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profileId: ::core::ffi::c_uint,
+ placements: *mut cuda_types::nvml::nvmlComputeInstancePlacement_t,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Create compute instance.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ If the parent device is unbound, reset or the parent GPU instance is destroyed or the compute instance is destroyed
+ explicitly, the compute instance handle would become invalid. The compute instance must be recreated to acquire
+ a valid handle.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profileId The compute instance profile ID.
+ See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo
+ @param computeInstance Returns the compute instance handle
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a profileId or \a computeInstance
+ are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested compute instance could not be created*/
+ fn nvmlGpuInstanceCreateComputeInstance(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profileId: ::core::ffi::c_uint,
+ computeInstance: *mut cuda_types::nvml::nvmlComputeInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Create compute instance with the specified placement.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ If the parent device is unbound, reset or the parent GPU instance is destroyed or the compute instance is destroyed
+ explicitly, the compute instance handle would become invalid. The compute instance must be recreated to acquire
+ a valid handle.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profileId The compute instance profile ID.
+ See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo
+ @param placement The requested placement. See \ref nvmlGpuInstanceGetComputeInstancePossiblePlacements
+ @param computeInstance Returns the compute instance handle
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a profileId or \a computeInstance
+ are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested compute instance could not be created*/
+ fn nvmlGpuInstanceCreateComputeInstanceWithPlacement(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profileId: ::core::ffi::c_uint,
+ placement: *const cuda_types::nvml::nvmlComputeInstancePlacement_t,
+ computeInstance: *mut cuda_types::nvml::nvmlComputeInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Destroy compute instance.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param computeInstance The compute instance handle
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a computeInstance is invalid
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_IN_USE If the compute instance is in use. This error would be returned if
+ processes (e.g. CUDA application) are active on the compute instance.*/
+ fn nvmlComputeInstanceDestroy(
+ computeInstance: cuda_types::nvml::nvmlComputeInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instances for given profile ID.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param profileId The compute instance profile ID.
+ See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo
+ @param computeInstances Returns pre-exiting compute instances, the buffer must be large enough to
+ accommodate the instances supported by the profile.
+ See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo
+ @param count The count of returned compute instances
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profileId, \a computeInstances or \a count
+ are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlGpuInstanceGetComputeInstances(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ profileId: ::core::ffi::c_uint,
+ computeInstances: *mut cuda_types::nvml::nvmlComputeInstance_t,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instance for given instance ID.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+ Requires privileged user.
+
+ @param gpuInstance The identifier of the target GPU instance
+ @param id The compute instance ID
+ @param computeInstance Returns compute instance
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a ID or \a computeInstance are invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation
+ - \ref NVML_ERROR_NOT_FOUND If the compute instance is not found.*/
+ fn nvmlGpuInstanceGetComputeInstanceById(
+ gpuInstance: cuda_types::nvml::nvmlGpuInstance_t,
+ id: ::core::ffi::c_uint,
+ computeInstance: *mut cuda_types::nvml::nvmlComputeInstance_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instance information.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param computeInstance The compute instance handle
+ @param info Return compute instance information
+
+ @return
+ - \ref NVML_SUCCESS Upon success
+ - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT If \a computeInstance or \a info are invalid
+ - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation*/
+ fn nvmlComputeInstanceGetInfo_v2(
+ computeInstance: cuda_types::nvml::nvmlComputeInstance_t,
+ info: *mut cuda_types::nvml::nvmlComputeInstanceInfo_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Test if the given handle refers to a MIG device.
+
+ A MIG device handle is an NVML abstraction which maps to a MIG compute instance.
+ These overloaded references can be used (with some restrictions) interchangeably
+ with a GPU device handle to execute queries at a per-compute instance granularity.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device NVML handle to test
+ @param isMigDevice True when handle refers to a MIG device
+
+ @return
+ - \ref NVML_SUCCESS if \a device status was successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device handle or \a isMigDevice reference is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this check is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceIsMigDeviceHandle(
+ device: cuda_types::nvml::nvmlDevice_t,
+ isMigDevice: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPU instance ID for the given MIG device handle.
+
+ GPU instance IDs are unique per device and remain valid until the GPU instance is destroyed.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device Target MIG device handle
+ @param id GPU instance ID
+
+ @return
+ - \ref NVML_SUCCESS if instance ID was successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a id reference is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetGpuInstanceId(
+ device: cuda_types::nvml::nvmlDevice_t,
+ id: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get compute instance ID for the given MIG device handle.
+
+ Compute instance IDs are unique per GPU instance and remain valid until the compute instance
+ is destroyed.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device Target MIG device handle
+ @param id Compute instance ID
+
+ @return
+ - \ref NVML_SUCCESS if instance ID was successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a id reference is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetComputeInstanceId(
+ device: cuda_types::nvml::nvmlDevice_t,
+ id: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the maximum number of MIG devices that can exist under a given parent NVML device.
+
+ Returns zero if MIG is not supported or enabled.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device Target device handle
+ @param count Count of MIG devices
+
+ @return
+ - \ref NVML_SUCCESS if \a count was successfully retrieved
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a count reference is invalid
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMaxMigDeviceCount(
+ device: cuda_types::nvml::nvmlDevice_t,
+ count: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get MIG device handle for the given index under its parent NVML device.
+
+ If the compute instance is destroyed either explicitly or by destroying,
+ resetting or unbinding the parent GPU instance or the GPU device itself
+ the MIG device handle would remain invalid and must be requested again
+ using this API. Handles may be reused and their properties can change in
+ the process.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param device Reference to the parent GPU device handle
+ @param index Index of the MIG device
+ @param migDevice Reference to the MIG device handle
+
+ @return
+ - \ref NVML_SUCCESS if \a migDevice handle was successfully created
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a index or \a migDevice reference is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_NOT_FOUND if no valid MIG device was found at \a index
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetMigDeviceHandleByIndex(
+ device: cuda_types::nvml::nvmlDevice_t,
+ index: ::core::ffi::c_uint,
+ migDevice: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get parent device handle from a MIG device handle.
+
+ For Ampere &tm; or newer fully supported devices.
+ Supported on Linux only.
+
+ @param migDevice MIG device handle
+ @param device Device handle
+
+ @return
+ - \ref NVML_SUCCESS if \a device handle was successfully created
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a migDevice or \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetDeviceHandleFromMigDeviceHandle(
+ migDevice: cuda_types::nvml::nvmlDevice_t,
+ device: *mut cuda_types::nvml::nvmlDevice_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Calculate GPM metrics from two samples.
+
+ For Hopper &tm; or newer fully supported devices.
+
+ @param metricsGet IN/OUT: populated \a nvmlGpmMetricsGet_t struct
+
+ @return
+ - \ref NVML_SUCCESS on success
+ - Nonzero NVML_ERROR_? enum on error*/
+ fn nvmlGpmMetricsGet(
+ metricsGet: *mut cuda_types::nvml::nvmlGpmMetricsGet_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Free an allocated sample buffer that was allocated with \ref nvmlGpmSampleAlloc()
+
+ For Hopper &tm; or newer fully supported devices.
+
+ @param gpmSample Sample to free
+
+ @return
+ - \ref NVML_SUCCESS on success
+ - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid pointer is provided*/
+ fn nvmlGpmSampleFree(
+ gpmSample: cuda_types::nvml::nvmlGpmSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Allocate a sample buffer to be used with NVML GPM . You will need to allocate
+ at least two of these buffers to use with the NVML GPM feature
+
+ For Hopper &tm; or newer fully supported devices.
+
+ @param gpmSample Where the allocated sample will be stored
+
+ @return
+ - \ref NVML_SUCCESS on success
+ - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid pointer is provided
+ - \ref NVML_ERROR_MEMORY if system memory is insufficient*/
+ fn nvmlGpmSampleAlloc(
+ gpmSample: *mut cuda_types::nvml::nvmlGpmSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Read a sample of GPM metrics into the provided \a gpmSample buffer. After
+ two samples are gathered, you can call nvmlGpmMetricGet on those samples to
+ retrive metrics
+
+ For Hopper &tm; or newer fully supported devices.
+
+ @param device Device to get samples for
+ @param gpmSample Buffer to read samples into
+
+ @return
+ - \ref NVML_SUCCESS on success
+ - Nonzero NVML_ERROR_? enum on error*/
+ fn nvmlGpmSampleGet(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpmSample: cuda_types::nvml::nvmlGpmSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Read a sample of GPM metrics into the provided \a gpmSample buffer for a MIG GPU Instance.
+
+ After two samples are gathered, you can call nvmlGpmMetricGet on those
+ samples to retrive metrics
+
+ For Hopper &tm; or newer fully supported devices.
+
+ @param device Device to get samples for
+ @param gpuInstanceId MIG GPU Instance ID
+ @param gpmSample Buffer to read samples into
+
+ @return
+ - \ref NVML_SUCCESS on success
+ - Nonzero NVML_ERROR_? enum on error*/
+ fn nvmlGpmMigSampleGet(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpuInstanceId: ::core::ffi::c_uint,
+ gpmSample: cuda_types::nvml::nvmlGpmSample_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Indicate whether the supplied device supports GPM
+
+ @param device NVML device to query for
+ @param gpmSupport Structure to indicate GPM support \a nvmlGpmSupport_t. Indicates
+ GPM support per system for the supplied device
+
+ @return
+ - NVML_SUCCESS on success
+ - Nonzero NVML_ERROR_? enum if there is an error in processing the query*/
+ fn nvmlGpmQueryDeviceSupport(
+ device: cuda_types::nvml::nvmlDevice_t,
+ gpmSupport: *mut cuda_types::nvml::nvmlGpmSupport_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get GPM stream state.
+
+ %HOPPER_OR_NEWER%
+ Supported on Linux, Windows TCC.
+
+ @param device The identifier of the target device
+ @param state Returns GPM stream state
+ NVML_FEATURE_DISABLED or NVML_FEATURE_ENABLED
+
+ @return
+ - \ref NVML_SUCCESS if \a current GPM stream state were successfully queried
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a state is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlGpmQueryIfStreamingEnabled(
+ device: cuda_types::nvml::nvmlDevice_t,
+ state: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set GPM stream state.
+
+ %HOPPER_OR_NEWER%
+ Supported on Linux, Windows TCC.
+
+ @param device The identifier of the target device
+ @param state GPM stream state,
+ NVML_FEATURE_DISABLED or NVML_FEATURE_ENABLED
+
+ @return
+ - \ref NVML_SUCCESS if \a current GPM stream state is successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device*/
+ fn nvmlGpmSetStreamingEnabled(
+ device: cuda_types::nvml::nvmlDevice_t,
+ state: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set NvLink Low Power Threshold for device.
+
+ %HOPPER_OR_NEWER%
+
+ @param device The identifier of the target device
+ @param info Reference to \a nvmlNvLinkPowerThres_t struct
+ input parameters
+
+ @return
+ - \ref NVML_SUCCESS if the \a Threshold is successfully set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a Threshold is not within range
+ - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device
+*/
+ fn nvmlDeviceSetNvLinkDeviceLowPowerThreshold(
+ device: cuda_types::nvml::nvmlDevice_t,
+ info: *mut cuda_types::nvml::nvmlNvLinkPowerThres_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set the global nvlink bandwith mode
+
+ @param nvlinkBwMode nvlink bandwidth mode
+ @return
+ - \ref NVML_SUCCESS on success
+ - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid argument is provided
+ - \ref NVML_ERROR_IN_USE if P2P object exists
+ - \ref NVML_ERROR_NOT_SUPPORTED if GPU is not Hopper or newer architecture.
+ - \ref NVML_ERROR_NO_PERMISSION if not root user*/
+ fn nvmlSystemSetNvlinkBwMode(
+ nvlinkBwMode: ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get the global nvlink bandwith mode
+
+ @param nvlinkBwMode reference of nvlink bandwidth mode
+ @return
+ - \ref NVML_SUCCESS on success
+ - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid pointer is provided
+ - \ref NVML_ERROR_NOT_SUPPORTED if GPU is not Hopper or newer architecture.
+ - \ref NVML_ERROR_NO_PERMISSION if not root user*/
+ fn nvmlSystemGetNvlinkBwMode(
+ nvlinkBwMode: *mut ::core::ffi::c_uint,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Set new power limit of this device.
+
+ For Kepler &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values.
+
+ See \ref nvmlPowerValue_v2_t for more information on the struct.
+
+ \note Limit is not persistent across reboots or driver unloads.
+ Enable persistent mode to prevent driver from unloading when no application is using the device.
+
+ This API replaces nvmlDeviceSetPowerManagementLimit. It can be used as a drop-in replacement for the older version.
+
+ @param device The identifier of the target device
+ @param powerValue Power management limit in milliwatts to set
+
+ @return
+ - \ref NVML_SUCCESS if \a limit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a powerValue is NULL or contains invalid values
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error
+
+ @see NVML_FI_DEV_POWER_AVERAGE
+ @see NVML_FI_DEV_POWER_INSTANT
+ @see NVML_FI_DEV_POWER_MIN_LIMIT
+ @see NVML_FI_DEV_POWER_MAX_LIMIT
+ @see NVML_FI_DEV_POWER_CURRENT_LIMIT*/
+ fn nvmlDeviceSetPowerManagementLimit_v2(
+ device: cuda_types::nvml::nvmlDevice_t,
+ powerValue: *mut cuda_types::nvml::nvmlPowerValue_v2_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+ #[must_use]
+ /** Get SRAM ECC error status of this device.
+
+ For Ampere &tm; or newer fully supported devices.
+ Requires root/admin permissions.
+
+ See \ref nvmlEccSramErrorStatus_v1_t for more information on the struct.
+
+ @param device The identifier of the target device
+ @param status Returns SRAM ECC error status
+
+ @return
+ - \ref NVML_SUCCESS if \a limit has been set
+ - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
+ - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counters is NULL
+ - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
+ - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
+ - \ref NVML_ERROR_VERSION_MISMATCH if the version of \a nvmlEccSramErrorStatus_t is invalid
+ - \ref NVML_ERROR_UNKNOWN on any unexpected error*/
+ fn nvmlDeviceGetSramEccErrorStatus(
+ device: cuda_types::nvml::nvmlDevice_t,
+ status: *mut cuda_types::nvml::nvmlEccSramErrorStatus_t,
+ ) -> cuda_types::nvml::nvmlReturn_t;
+}