aboutsummaryrefslogtreecommitdiffhomepage
path: root/include/vulkan/vulkan_beta.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/vulkan/vulkan_beta.h')
-rw-r--r--include/vulkan/vulkan_beta.h113
1 files changed, 113 insertions, 0 deletions
diff --git a/include/vulkan/vulkan_beta.h b/include/vulkan/vulkan_beta.h
index 75fabd4..f00ed3f 100644
--- a/include/vulkan/vulkan_beta.h
+++ b/include/vulkan/vulkan_beta.h
@@ -643,6 +643,119 @@ typedef struct VkVideoEncodeH265GopRemainingFrameInfoEXT {
+// VK_AMDX_shader_enqueue is a preprocessor guard. Do not pass it to API calls.
+#define VK_AMDX_shader_enqueue 1
+#define VK_AMDX_SHADER_ENQUEUE_SPEC_VERSION 1
+#define VK_AMDX_SHADER_ENQUEUE_EXTENSION_NAME "VK_AMDX_shader_enqueue"
+#define VK_SHADER_INDEX_UNUSED_AMDX (~0U)
+typedef struct VkPhysicalDeviceShaderEnqueueFeaturesAMDX {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderEnqueue;
+} VkPhysicalDeviceShaderEnqueueFeaturesAMDX;
+
+typedef struct VkPhysicalDeviceShaderEnqueuePropertiesAMDX {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxExecutionGraphDepth;
+ uint32_t maxExecutionGraphShaderOutputNodes;
+ uint32_t maxExecutionGraphShaderPayloadSize;
+ uint32_t maxExecutionGraphShaderPayloadCount;
+ uint32_t executionGraphDispatchAddressAlignment;
+} VkPhysicalDeviceShaderEnqueuePropertiesAMDX;
+
+typedef struct VkExecutionGraphPipelineScratchSizeAMDX {
+ VkStructureType sType;
+ void* pNext;
+ VkDeviceSize size;
+} VkExecutionGraphPipelineScratchSizeAMDX;
+
+typedef struct VkExecutionGraphPipelineCreateInfoAMDX {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCreateFlags flags;
+ uint32_t stageCount;
+ const VkPipelineShaderStageCreateInfo* pStages;
+ const VkPipelineLibraryCreateInfoKHR* pLibraryInfo;
+ VkPipelineLayout layout;
+ VkPipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+} VkExecutionGraphPipelineCreateInfoAMDX;
+
+typedef union VkDeviceOrHostAddressConstAMDX {
+ VkDeviceAddress deviceAddress;
+ const void* hostAddress;
+} VkDeviceOrHostAddressConstAMDX;
+
+typedef struct VkDispatchGraphInfoAMDX {
+ uint32_t nodeIndex;
+ uint32_t payloadCount;
+ VkDeviceOrHostAddressConstAMDX payloads;
+ uint64_t payloadStride;
+} VkDispatchGraphInfoAMDX;
+
+typedef struct VkDispatchGraphCountInfoAMDX {
+ uint32_t count;
+ VkDeviceOrHostAddressConstAMDX infos;
+ uint64_t stride;
+} VkDispatchGraphCountInfoAMDX;
+
+typedef struct VkPipelineShaderStageNodeCreateInfoAMDX {
+ VkStructureType sType;
+ const void* pNext;
+ const char* pName;
+ uint32_t index;
+} VkPipelineShaderStageNodeCreateInfoAMDX;
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateExecutionGraphPipelinesAMDX)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkExecutionGraphPipelineCreateInfoAMDX* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
+typedef VkResult (VKAPI_PTR *PFN_vkGetExecutionGraphPipelineScratchSizeAMDX)(VkDevice device, VkPipeline executionGraph, VkExecutionGraphPipelineScratchSizeAMDX* pSizeInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkGetExecutionGraphPipelineNodeIndexAMDX)(VkDevice device, VkPipeline executionGraph, const VkPipelineShaderStageNodeCreateInfoAMDX* pNodeInfo, uint32_t* pNodeIndex);
+typedef void (VKAPI_PTR *PFN_vkCmdInitializeGraphScratchMemoryAMDX)(VkCommandBuffer commandBuffer, VkDeviceAddress scratch);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatchGraphAMDX)(VkCommandBuffer commandBuffer, VkDeviceAddress scratch, const VkDispatchGraphCountInfoAMDX* pCountInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatchGraphIndirectAMDX)(VkCommandBuffer commandBuffer, VkDeviceAddress scratch, const VkDispatchGraphCountInfoAMDX* pCountInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatchGraphIndirectCountAMDX)(VkCommandBuffer commandBuffer, VkDeviceAddress scratch, VkDeviceAddress countInfo);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateExecutionGraphPipelinesAMDX(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ uint32_t createInfoCount,
+ const VkExecutionGraphPipelineCreateInfoAMDX* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipelines);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetExecutionGraphPipelineScratchSizeAMDX(
+ VkDevice device,
+ VkPipeline executionGraph,
+ VkExecutionGraphPipelineScratchSizeAMDX* pSizeInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetExecutionGraphPipelineNodeIndexAMDX(
+ VkDevice device,
+ VkPipeline executionGraph,
+ const VkPipelineShaderStageNodeCreateInfoAMDX* pNodeInfo,
+ uint32_t* pNodeIndex);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdInitializeGraphScratchMemoryAMDX(
+ VkCommandBuffer commandBuffer,
+ VkDeviceAddress scratch);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatchGraphAMDX(
+ VkCommandBuffer commandBuffer,
+ VkDeviceAddress scratch,
+ const VkDispatchGraphCountInfoAMDX* pCountInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatchGraphIndirectAMDX(
+ VkCommandBuffer commandBuffer,
+ VkDeviceAddress scratch,
+ const VkDispatchGraphCountInfoAMDX* pCountInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatchGraphIndirectCountAMDX(
+ VkCommandBuffer commandBuffer,
+ VkDeviceAddress scratch,
+ VkDeviceAddress countInfo);
+#endif
+
+
// VK_NV_displacement_micromap is a preprocessor guard. Do not pass it to API calls.
#define VK_NV_displacement_micromap 1
#define VK_NV_DISPLACEMENT_MICROMAP_SPEC_VERSION 2