add missing Filament headers for Windows

This commit is contained in:
Nick Fisher
2025-04-02 22:15:11 +08:00
parent a8cf071f2f
commit 508c184f1a
204 changed files with 53591 additions and 20074 deletions

View File

@@ -39,7 +39,7 @@ add_definitions(-DUNICODE -D_UNICODE)
# of modifying this function.
function(APPLY_STANDARD_SETTINGS TARGET)
target_compile_features(${TARGET} PUBLIC cxx_std_17)
target_compile_options(${TARGET} PRIVATE /W4 /WX /wd"4100")
target_compile_options(${TARGET} PRIVATE /W4 /wd"4100")
target_compile_options(${TARGET} PRIVATE /EHsc)
target_compile_definitions(${TARGET} PRIVATE "_HAS_EXCEPTIONS=0")
target_compile_definitions(${TARGET} PRIVATE "$<$<CONFIG:Debug>:_DEBUG>")

View File

@@ -0,0 +1,994 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**********************************************************************************************
* Generated by bluevk/bluevk-gen.py
* DO NOT EDIT
**********************************************************************************************/
#ifndef TNT_FILAMENT_BLUEVK_H
#define TNT_FILAMENT_BLUEVK_H
#define VK_ENABLE_BETA_EXTENSIONS
// BlueVK dynamically loads all function pointers, so it cannot allow function prototypes, which
// would assume static linking for Vulkan entry points.
#if defined(VULKAN_H_) && !defined(VK_NO_PROTOTYPES)
#error Please do not include vulkan.h when using BlueVK
#endif
// Even though we don't use function prototypes, we still need to include vulkan.h for all Vulkan
// types, including the PFN_ types.
#ifndef VULKAN_H_
#ifndef VK_NO_PROTOTYPES
#define VK_NO_PROTOTYPES
#endif
#if defined(__ANDROID__)
#define VK_USE_PLATFORM_ANDROID_KHR 1
#elif defined(FILAMENT_IOS)
#define VK_USE_PLATFORM_IOS_MVK 1
#elif defined(__linux__)
#if defined(FILAMENT_SUPPORTS_XCB)
#define VK_USE_PLATFORM_XCB_KHR 1
#endif
#if defined(FILAMENT_SUPPORTS_XLIB)
#define VK_USE_PLATFORM_XLIB_KHR 1
#endif
#if defined(FILAMENT_SUPPORTS_WAYLAND)
#define VK_USE_PLATFORM_WAYLAND_KHR 1
#endif
#elif defined(__APPLE__)
#define VK_USE_PLATFORM_MACOS_MVK 1
#elif defined(WIN32)
#define VK_USE_PLATFORM_WIN32_KHR 1
#endif
#include <vulkan/vulkan.h>
#endif
#include <utils/unwindows.h>
namespace bluevk {
// Returns false if BlueGL could not find the Vulkan shared library.
bool initialize();
void bindInstance(VkInstance instance);
#if defined(VK_VERSION_1_0)
extern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;
extern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;
extern PFN_vkAllocateMemory vkAllocateMemory;
extern PFN_vkBeginCommandBuffer vkBeginCommandBuffer;
extern PFN_vkBindBufferMemory vkBindBufferMemory;
extern PFN_vkBindImageMemory vkBindImageMemory;
extern PFN_vkCmdBeginQuery vkCmdBeginQuery;
extern PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;
extern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;
extern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;
extern PFN_vkCmdBindPipeline vkCmdBindPipeline;
extern PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;
extern PFN_vkCmdBlitImage vkCmdBlitImage;
extern PFN_vkCmdClearAttachments vkCmdClearAttachments;
extern PFN_vkCmdClearColorImage vkCmdClearColorImage;
extern PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;
extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
extern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;
extern PFN_vkCmdCopyImage vkCmdCopyImage;
extern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;
extern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;
extern PFN_vkCmdDispatch vkCmdDispatch;
extern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;
extern PFN_vkCmdDraw vkCmdDraw;
extern PFN_vkCmdDrawIndexed vkCmdDrawIndexed;
extern PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;
extern PFN_vkCmdDrawIndirect vkCmdDrawIndirect;
extern PFN_vkCmdEndQuery vkCmdEndQuery;
extern PFN_vkCmdEndRenderPass vkCmdEndRenderPass;
extern PFN_vkCmdExecuteCommands vkCmdExecuteCommands;
extern PFN_vkCmdFillBuffer vkCmdFillBuffer;
extern PFN_vkCmdNextSubpass vkCmdNextSubpass;
extern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;
extern PFN_vkCmdPushConstants vkCmdPushConstants;
extern PFN_vkCmdResetEvent vkCmdResetEvent;
extern PFN_vkCmdResetQueryPool vkCmdResetQueryPool;
extern PFN_vkCmdResolveImage vkCmdResolveImage;
extern PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;
extern PFN_vkCmdSetDepthBias vkCmdSetDepthBias;
extern PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;
extern PFN_vkCmdSetEvent vkCmdSetEvent;
extern PFN_vkCmdSetLineWidth vkCmdSetLineWidth;
extern PFN_vkCmdSetScissor vkCmdSetScissor;
extern PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;
extern PFN_vkCmdSetStencilReference vkCmdSetStencilReference;
extern PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;
extern PFN_vkCmdSetViewport vkCmdSetViewport;
extern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;
extern PFN_vkCmdWaitEvents vkCmdWaitEvents;
extern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;
extern PFN_vkCreateBuffer vkCreateBuffer;
extern PFN_vkCreateBufferView vkCreateBufferView;
extern PFN_vkCreateCommandPool vkCreateCommandPool;
extern PFN_vkCreateComputePipelines vkCreateComputePipelines;
extern PFN_vkCreateDescriptorPool vkCreateDescriptorPool;
extern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;
extern PFN_vkCreateDevice vkCreateDevice;
extern PFN_vkCreateEvent vkCreateEvent;
extern PFN_vkCreateFence vkCreateFence;
extern PFN_vkCreateFramebuffer vkCreateFramebuffer;
extern PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;
extern PFN_vkCreateImage vkCreateImage;
extern PFN_vkCreateImageView vkCreateImageView;
extern PFN_vkCreateInstance vkCreateInstance;
extern PFN_vkCreatePipelineCache vkCreatePipelineCache;
extern PFN_vkCreatePipelineLayout vkCreatePipelineLayout;
extern PFN_vkCreateQueryPool vkCreateQueryPool;
extern PFN_vkCreateRenderPass vkCreateRenderPass;
extern PFN_vkCreateSampler vkCreateSampler;
extern PFN_vkCreateSemaphore vkCreateSemaphore;
extern PFN_vkCreateShaderModule vkCreateShaderModule;
extern PFN_vkDestroyBuffer vkDestroyBuffer;
extern PFN_vkDestroyBufferView vkDestroyBufferView;
extern PFN_vkDestroyCommandPool vkDestroyCommandPool;
extern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;
extern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;
extern PFN_vkDestroyDevice vkDestroyDevice;
extern PFN_vkDestroyEvent vkDestroyEvent;
extern PFN_vkDestroyFence vkDestroyFence;
extern PFN_vkDestroyFramebuffer vkDestroyFramebuffer;
extern PFN_vkDestroyImage vkDestroyImage;
extern PFN_vkDestroyImageView vkDestroyImageView;
extern PFN_vkDestroyInstance vkDestroyInstance;
extern PFN_vkDestroyPipeline vkDestroyPipeline;
extern PFN_vkDestroyPipelineCache vkDestroyPipelineCache;
extern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;
extern PFN_vkDestroyQueryPool vkDestroyQueryPool;
extern PFN_vkDestroyRenderPass vkDestroyRenderPass;
extern PFN_vkDestroySampler vkDestroySampler;
extern PFN_vkDestroySemaphore vkDestroySemaphore;
extern PFN_vkDestroyShaderModule vkDestroyShaderModule;
extern PFN_vkDeviceWaitIdle vkDeviceWaitIdle;
extern PFN_vkEndCommandBuffer vkEndCommandBuffer;
extern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties;
extern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties;
extern PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties;
extern PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties;
extern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices;
extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
extern PFN_vkFreeCommandBuffers vkFreeCommandBuffers;
extern PFN_vkFreeDescriptorSets vkFreeDescriptorSets;
extern PFN_vkFreeMemory vkFreeMemory;
extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
extern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;
extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
extern PFN_vkGetDeviceQueue vkGetDeviceQueue;
extern PFN_vkGetEventStatus vkGetEventStatus;
extern PFN_vkGetFenceStatus vkGetFenceStatus;
extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
extern PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;
extern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;
extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
extern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures;
extern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties;
extern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties;
extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
extern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties;
extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties;
extern PFN_vkGetPipelineCacheData vkGetPipelineCacheData;
extern PFN_vkGetQueryPoolResults vkGetQueryPoolResults;
extern PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;
extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
extern PFN_vkMapMemory vkMapMemory;
extern PFN_vkMergePipelineCaches vkMergePipelineCaches;
extern PFN_vkQueueBindSparse vkQueueBindSparse;
extern PFN_vkQueueSubmit vkQueueSubmit;
extern PFN_vkQueueWaitIdle vkQueueWaitIdle;
extern PFN_vkResetCommandBuffer vkResetCommandBuffer;
extern PFN_vkResetCommandPool vkResetCommandPool;
extern PFN_vkResetDescriptorPool vkResetDescriptorPool;
extern PFN_vkResetEvent vkResetEvent;
extern PFN_vkResetFences vkResetFences;
extern PFN_vkSetEvent vkSetEvent;
extern PFN_vkUnmapMemory vkUnmapMemory;
extern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;
extern PFN_vkWaitForFences vkWaitForFences;
#endif // defined(VK_VERSION_1_0)
#if defined(VK_VERSION_1_1)
extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
extern PFN_vkBindImageMemory2 vkBindImageMemory2;
extern PFN_vkCmdDispatchBase vkCmdDispatchBase;
extern PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask;
extern PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate;
extern PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion;
extern PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate;
extern PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion;
extern PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion;
extern PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups;
extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
extern PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport;
extern PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures;
extern PFN_vkGetDeviceQueue2 vkGetDeviceQueue2;
extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
extern PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2;
extern PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties;
extern PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties;
extern PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties;
extern PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2;
extern PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2;
extern PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2;
extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
extern PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2;
extern PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2;
extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2;
extern PFN_vkTrimCommandPool vkTrimCommandPool;
extern PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;
#endif // defined(VK_VERSION_1_1)
#if defined(VK_VERSION_1_2)
extern PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;
extern PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;
extern PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;
extern PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;
extern PFN_vkCmdNextSubpass2 vkCmdNextSubpass2;
extern PFN_vkCreateRenderPass2 vkCreateRenderPass2;
extern PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;
extern PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;
extern PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;
extern PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;
extern PFN_vkResetQueryPool vkResetQueryPool;
extern PFN_vkSignalSemaphore vkSignalSemaphore;
extern PFN_vkWaitSemaphores vkWaitSemaphores;
#endif // defined(VK_VERSION_1_2)
#if defined(VK_VERSION_1_3)
extern PFN_vkCmdBeginRendering vkCmdBeginRendering;
extern PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2;
extern PFN_vkCmdBlitImage2 vkCmdBlitImage2;
extern PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2;
extern PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2;
extern PFN_vkCmdCopyImage2 vkCmdCopyImage2;
extern PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2;
extern PFN_vkCmdEndRendering vkCmdEndRendering;
extern PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;
extern PFN_vkCmdResetEvent2 vkCmdResetEvent2;
extern PFN_vkCmdResolveImage2 vkCmdResolveImage2;
extern PFN_vkCmdSetCullMode vkCmdSetCullMode;
extern PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable;
extern PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable;
extern PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp;
extern PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable;
extern PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable;
extern PFN_vkCmdSetEvent2 vkCmdSetEvent2;
extern PFN_vkCmdSetFrontFace vkCmdSetFrontFace;
extern PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable;
extern PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology;
extern PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable;
extern PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount;
extern PFN_vkCmdSetStencilOp vkCmdSetStencilOp;
extern PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable;
extern PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount;
extern PFN_vkCmdWaitEvents2 vkCmdWaitEvents2;
extern PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2;
extern PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot;
extern PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot;
extern PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements;
extern PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements;
extern PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements;
extern PFN_vkGetPhysicalDeviceToolProperties vkGetPhysicalDeviceToolProperties;
extern PFN_vkGetPrivateData vkGetPrivateData;
extern PFN_vkQueueSubmit2 vkQueueSubmit2;
extern PFN_vkSetPrivateData vkSetPrivateData;
#endif // defined(VK_VERSION_1_3)
#if defined(VK_AMD_buffer_marker)
extern PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;
#endif // defined(VK_AMD_buffer_marker)
#if defined(VK_AMD_display_native_hdr)
extern PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;
#endif // defined(VK_AMD_display_native_hdr)
#if defined(VK_AMD_draw_indirect_count)
extern PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;
extern PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;
#endif // defined(VK_AMD_draw_indirect_count)
#if defined(VK_AMD_shader_info)
extern PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD;
#endif // defined(VK_AMD_shader_info)
#if defined(VK_ANDROID_external_memory_android_hardware_buffer)
extern PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;
extern PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;
#endif // defined(VK_ANDROID_external_memory_android_hardware_buffer)
#if defined(VK_ANDROID_native_buffer)
extern PFN_vkAcquireImageANDROID vkAcquireImageANDROID;
extern PFN_vkGetSwapchainGrallocUsage2ANDROID vkGetSwapchainGrallocUsage2ANDROID;
extern PFN_vkGetSwapchainGrallocUsageANDROID vkGetSwapchainGrallocUsageANDROID;
extern PFN_vkQueueSignalReleaseImageANDROID vkQueueSignalReleaseImageANDROID;
#endif // defined(VK_ANDROID_native_buffer)
#if defined(VK_EXT_acquire_drm_display)
extern PFN_vkAcquireDrmDisplayEXT vkAcquireDrmDisplayEXT;
extern PFN_vkGetDrmDisplayEXT vkGetDrmDisplayEXT;
#endif // defined(VK_EXT_acquire_drm_display)
#if defined(VK_EXT_acquire_xlib_display)
extern PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT;
extern PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT;
#endif // defined(VK_EXT_acquire_xlib_display)
#if defined(VK_EXT_buffer_device_address)
extern PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT;
#endif // defined(VK_EXT_buffer_device_address)
#if defined(VK_EXT_calibrated_timestamps)
extern PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;
extern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT vkGetPhysicalDeviceCalibrateableTimeDomainsEXT;
#endif // defined(VK_EXT_calibrated_timestamps)
#if defined(VK_EXT_color_write_enable)
extern PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;
#endif // defined(VK_EXT_color_write_enable)
#if defined(VK_EXT_conditional_rendering)
extern PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;
extern PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;
#endif // defined(VK_EXT_conditional_rendering)
#if defined(VK_EXT_debug_marker)
extern PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT;
extern PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT;
extern PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT;
extern PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT;
extern PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT;
#endif // defined(VK_EXT_debug_marker)
#if defined(VK_EXT_debug_report)
extern PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT;
extern PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT;
extern PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT;
#endif // defined(VK_EXT_debug_report)
#if defined(VK_EXT_debug_utils)
extern PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT;
extern PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT;
extern PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT;
extern PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT;
extern PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT;
extern PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT;
extern PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT;
extern PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT;
extern PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT;
extern PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT;
extern PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT;
#endif // defined(VK_EXT_debug_utils)
#if defined(VK_EXT_direct_mode_display)
extern PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT;
#endif // defined(VK_EXT_direct_mode_display)
#if defined(VK_EXT_directfb_surface)
extern PFN_vkCreateDirectFBSurfaceEXT vkCreateDirectFBSurfaceEXT;
extern PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT vkGetPhysicalDeviceDirectFBPresentationSupportEXT;
#endif // defined(VK_EXT_directfb_surface)
#if defined(VK_EXT_discard_rectangles)
extern PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;
#endif // defined(VK_EXT_discard_rectangles)
#if defined(VK_EXT_display_control)
extern PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT;
extern PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT;
extern PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT;
extern PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;
#endif // defined(VK_EXT_display_control)
#if defined(VK_EXT_display_surface_counter)
extern PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT;
#endif // defined(VK_EXT_display_surface_counter)
#if defined(VK_EXT_extended_dynamic_state)
extern PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;
extern PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;
extern PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;
extern PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;
extern PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;
extern PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;
extern PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;
extern PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;
extern PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;
extern PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;
extern PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;
extern PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;
#endif // defined(VK_EXT_extended_dynamic_state)
#if defined(VK_EXT_extended_dynamic_state2)
extern PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;
extern PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;
extern PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;
extern PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;
extern PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;
#endif // defined(VK_EXT_extended_dynamic_state2)
#if defined(VK_EXT_external_memory_host)
extern PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;
#endif // defined(VK_EXT_external_memory_host)
#if defined(VK_EXT_full_screen_exclusive)
extern PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;
extern PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT vkGetPhysicalDeviceSurfacePresentModes2EXT;
extern PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;
#endif // defined(VK_EXT_full_screen_exclusive)
#if defined(VK_EXT_hdr_metadata)
extern PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;
#endif // defined(VK_EXT_hdr_metadata)
#if defined(VK_EXT_headless_surface)
extern PFN_vkCreateHeadlessSurfaceEXT vkCreateHeadlessSurfaceEXT;
#endif // defined(VK_EXT_headless_surface)
#if defined(VK_EXT_host_query_reset)
extern PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;
#endif // defined(VK_EXT_host_query_reset)
#if defined(VK_EXT_image_compression_control)
extern PFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT;
#endif // defined(VK_EXT_image_compression_control)
#if defined(VK_EXT_image_drm_format_modifier)
extern PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;
#endif // defined(VK_EXT_image_drm_format_modifier)
#if defined(VK_EXT_line_rasterization)
extern PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;
#endif // defined(VK_EXT_line_rasterization)
#if defined(VK_EXT_metal_objects)
extern PFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT;
#endif // defined(VK_EXT_metal_objects)
#if defined(VK_EXT_metal_surface)
extern PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT;
#endif // defined(VK_EXT_metal_surface)
#if defined(VK_EXT_multi_draw)
extern PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;
extern PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;
#endif // defined(VK_EXT_multi_draw)
#if defined(VK_EXT_pageable_device_local_memory)
extern PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT;
#endif // defined(VK_EXT_pageable_device_local_memory)
#if defined(VK_EXT_pipeline_properties)
extern PFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT;
#endif // defined(VK_EXT_pipeline_properties)
#if defined(VK_EXT_private_data)
extern PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;
extern PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;
extern PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;
extern PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;
#endif // defined(VK_EXT_private_data)
#if defined(VK_EXT_sample_locations)
extern PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;
extern PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT;
#endif // defined(VK_EXT_sample_locations)
#if defined(VK_EXT_tooling_info)
extern PFN_vkGetPhysicalDeviceToolPropertiesEXT vkGetPhysicalDeviceToolPropertiesEXT;
#endif // defined(VK_EXT_tooling_info)
#if defined(VK_EXT_transform_feedback)
extern PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;
extern PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;
extern PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT;
extern PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT;
extern PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT;
extern PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT;
#endif // defined(VK_EXT_transform_feedback)
#if defined(VK_EXT_validation_cache)
extern PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT;
extern PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT;
extern PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;
extern PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;
#endif // defined(VK_EXT_validation_cache)
#if defined(VK_EXT_vertex_input_dynamic_state)
extern PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;
#endif // defined(VK_EXT_vertex_input_dynamic_state)
#if defined(VK_FUCHSIA_buffer_collection)
extern PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA;
extern PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA;
extern PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA;
extern PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA;
extern PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA;
#endif // defined(VK_FUCHSIA_buffer_collection)
#if defined(VK_FUCHSIA_external_memory)
extern PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;
extern PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;
#endif // defined(VK_FUCHSIA_external_memory)
#if defined(VK_FUCHSIA_external_semaphore)
extern PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;
extern PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;
#endif // defined(VK_FUCHSIA_external_semaphore)
#if defined(VK_FUCHSIA_imagepipe_surface)
extern PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA;
#endif // defined(VK_FUCHSIA_imagepipe_surface)
#if defined(VK_GGP_stream_descriptor_surface)
extern PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP;
#endif // defined(VK_GGP_stream_descriptor_surface)
#if defined(VK_GOOGLE_display_timing)
extern PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;
extern PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;
#endif // defined(VK_GOOGLE_display_timing)
#if defined(VK_HUAWEI_invocation_mask)
extern PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;
#endif // defined(VK_HUAWEI_invocation_mask)
#if defined(VK_HUAWEI_subpass_shading)
extern PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;
extern PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;
#endif // defined(VK_HUAWEI_subpass_shading)
#if defined(VK_INTEL_performance_query)
extern PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;
extern PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;
extern PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;
extern PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;
extern PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;
extern PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;
extern PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;
extern PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;
extern PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;
#endif // defined(VK_INTEL_performance_query)
#if defined(VK_KHR_acceleration_structure)
extern PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;
extern PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;
extern PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;
extern PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;
extern PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;
extern PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;
extern PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;
extern PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;
extern PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;
extern PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;
extern PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;
extern PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;
extern PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;
extern PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;
extern PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;
extern PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;
#endif // defined(VK_KHR_acceleration_structure)
#if defined(VK_KHR_android_surface)
extern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR;
#endif // defined(VK_KHR_android_surface)
#if defined(VK_KHR_bind_memory2)
extern PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
extern PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
#endif // defined(VK_KHR_bind_memory2)
#if defined(VK_KHR_buffer_device_address)
extern PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;
extern PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;
extern PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;
#endif // defined(VK_KHR_buffer_device_address)
#if defined(VK_KHR_copy_commands2)
extern PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;
extern PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;
extern PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;
extern PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;
extern PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;
extern PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;
#endif // defined(VK_KHR_copy_commands2)
#if defined(VK_KHR_create_renderpass2)
extern PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;
extern PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;
extern PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;
extern PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;
#endif // defined(VK_KHR_create_renderpass2)
#if defined(VK_KHR_deferred_host_operations)
extern PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;
extern PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;
extern PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;
extern PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;
extern PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;
#endif // defined(VK_KHR_deferred_host_operations)
#if defined(VK_KHR_descriptor_update_template)
extern PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;
extern PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;
extern PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;
#endif // defined(VK_KHR_descriptor_update_template)
#if defined(VK_KHR_device_group)
extern PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR;
extern PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR;
extern PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR;
#endif // defined(VK_KHR_device_group)
#if defined(VK_KHR_device_group_creation)
extern PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR;
#endif // defined(VK_KHR_device_group_creation)
#if defined(VK_KHR_display)
extern PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR;
extern PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR;
extern PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR;
extern PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR;
extern PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR;
extern PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR;
extern PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR;
#endif // defined(VK_KHR_display)
#if defined(VK_KHR_display_swapchain)
extern PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR;
#endif // defined(VK_KHR_display_swapchain)
#if defined(VK_KHR_draw_indirect_count)
extern PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR;
extern PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR;
#endif // defined(VK_KHR_draw_indirect_count)
#if defined(VK_KHR_dynamic_rendering)
extern PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR;
extern PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR;
#endif // defined(VK_KHR_dynamic_rendering)
#if defined(VK_KHR_external_fence_capabilities)
extern PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR;
#endif // defined(VK_KHR_external_fence_capabilities)
#if defined(VK_KHR_external_fence_fd)
extern PFN_vkGetFenceFdKHR vkGetFenceFdKHR;
extern PFN_vkImportFenceFdKHR vkImportFenceFdKHR;
#endif // defined(VK_KHR_external_fence_fd)
#if defined(VK_KHR_external_fence_win32)
extern PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR;
extern PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR;
#endif // defined(VK_KHR_external_fence_win32)
#if defined(VK_KHR_external_memory_capabilities)
extern PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR;
#endif // defined(VK_KHR_external_memory_capabilities)
#if defined(VK_KHR_external_memory_fd)
extern PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR;
extern PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR;
#endif // defined(VK_KHR_external_memory_fd)
#if defined(VK_KHR_external_memory_win32)
extern PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR;
extern PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR;
#endif // defined(VK_KHR_external_memory_win32)
#if defined(VK_KHR_external_semaphore_capabilities)
extern PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR;
#endif // defined(VK_KHR_external_semaphore_capabilities)
#if defined(VK_KHR_external_semaphore_fd)
extern PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;
extern PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;
#endif // defined(VK_KHR_external_semaphore_fd)
#if defined(VK_KHR_external_semaphore_win32)
extern PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;
extern PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;
#endif // defined(VK_KHR_external_semaphore_win32)
#if defined(VK_KHR_fragment_shading_rate)
extern PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;
extern PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR;
#endif // defined(VK_KHR_fragment_shading_rate)
#if defined(VK_KHR_get_display_properties2)
extern PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR;
extern PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR;
extern PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR vkGetPhysicalDeviceDisplayPlaneProperties2KHR;
extern PFN_vkGetPhysicalDeviceDisplayProperties2KHR vkGetPhysicalDeviceDisplayProperties2KHR;
#endif // defined(VK_KHR_get_display_properties2)
#if defined(VK_KHR_get_memory_requirements2)
extern PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
extern PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
extern PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR;
#endif // defined(VK_KHR_get_memory_requirements2)
#if defined(VK_KHR_get_physical_device_properties2)
extern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR;
extern PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR;
extern PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR;
extern PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
extern PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR;
extern PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR;
extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR;
#endif // defined(VK_KHR_get_physical_device_properties2)
#if defined(VK_KHR_get_surface_capabilities2)
extern PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR;
extern PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR;
#endif // defined(VK_KHR_get_surface_capabilities2)
#if defined(VK_KHR_maintenance1)
extern PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;
#endif // defined(VK_KHR_maintenance1)
#if defined(VK_KHR_maintenance3)
extern PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;
#endif // defined(VK_KHR_maintenance3)
#if defined(VK_KHR_maintenance4)
extern PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR;
extern PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR;
extern PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR;
#endif // defined(VK_KHR_maintenance4)
#if defined(VK_KHR_performance_query)
extern PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;
extern PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR;
extern PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR;
extern PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;
#endif // defined(VK_KHR_performance_query)
#if defined(VK_KHR_pipeline_executable_properties)
extern PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;
extern PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;
extern PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;
#endif // defined(VK_KHR_pipeline_executable_properties)
#if defined(VK_KHR_present_wait)
extern PFN_vkWaitForPresentKHR vkWaitForPresentKHR;
#endif // defined(VK_KHR_present_wait)
#if defined(VK_KHR_push_descriptor)
extern PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;
#endif // defined(VK_KHR_push_descriptor)
#if (defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline))
extern PFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR;
#endif // (defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline))
#if defined(VK_KHR_ray_tracing_pipeline)
extern PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;
extern PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;
extern PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;
extern PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;
extern PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;
extern PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;
extern PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;
#endif // defined(VK_KHR_ray_tracing_pipeline)
#if defined(VK_KHR_sampler_ycbcr_conversion)
extern PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;
extern PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;
#endif // defined(VK_KHR_sampler_ycbcr_conversion)
#if defined(VK_KHR_shared_presentable_image)
extern PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR;
#endif // defined(VK_KHR_shared_presentable_image)
#if defined(VK_KHR_surface)
extern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR;
extern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR;
extern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR;
extern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR;
extern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR;
#endif // defined(VK_KHR_surface)
#if defined(VK_KHR_swapchain)
extern PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;
extern PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;
extern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;
extern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;
extern PFN_vkQueuePresentKHR vkQueuePresentKHR;
#endif // defined(VK_KHR_swapchain)
#if defined(VK_KHR_synchronization2)
extern PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;
extern PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;
extern PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;
extern PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;
extern PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;
extern PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;
#endif // defined(VK_KHR_synchronization2)
#if (defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker))
extern PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;
#endif // (defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker))
#if (defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints))
extern PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;
#endif // (defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints))
#if defined(VK_KHR_timeline_semaphore)
extern PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;
extern PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;
extern PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;
#endif // defined(VK_KHR_timeline_semaphore)
#if defined(VK_KHR_video_decode_queue)
extern PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;
#endif // defined(VK_KHR_video_decode_queue)
#if defined(VK_KHR_video_encode_queue)
extern PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;
#endif // defined(VK_KHR_video_encode_queue)
#if defined(VK_KHR_video_queue)
extern PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;
extern PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;
extern PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;
extern PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;
extern PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;
extern PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;
extern PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;
extern PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;
extern PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR vkGetPhysicalDeviceVideoCapabilitiesKHR;
extern PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR vkGetPhysicalDeviceVideoFormatPropertiesKHR;
extern PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;
extern PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;
#endif // defined(VK_KHR_video_queue)
#if defined(VK_KHR_wayland_surface)
extern PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR;
extern PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR;
#endif // defined(VK_KHR_wayland_surface)
#if defined(VK_KHR_win32_surface)
extern PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR;
extern PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR;
#endif // defined(VK_KHR_win32_surface)
#if defined(VK_KHR_xcb_surface)
extern PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR;
extern PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR;
#endif // defined(VK_KHR_xcb_surface)
#if defined(VK_KHR_xlib_surface)
extern PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR;
extern PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR;
#endif // defined(VK_KHR_xlib_surface)
#if defined(VK_MVK_ios_surface)
extern PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK;
#endif // defined(VK_MVK_ios_surface)
#if defined(VK_MVK_macos_surface)
extern PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK;
#endif // defined(VK_MVK_macos_surface)
#if defined(VK_NN_vi_surface)
extern PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN;
#endif // defined(VK_NN_vi_surface)
#if defined(VK_NVX_binary_import)
extern PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;
extern PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;
extern PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;
extern PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;
extern PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;
#endif // defined(VK_NVX_binary_import)
#if defined(VK_NVX_image_view_handle)
extern PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;
extern PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;
#endif // defined(VK_NVX_image_view_handle)
#if defined(VK_NV_acquire_winrt_display)
extern PFN_vkAcquireWinrtDisplayNV vkAcquireWinrtDisplayNV;
extern PFN_vkGetWinrtDisplayNV vkGetWinrtDisplayNV;
#endif // defined(VK_NV_acquire_winrt_display)
#if defined(VK_NV_clip_space_w_scaling)
extern PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV;
#endif // defined(VK_NV_clip_space_w_scaling)
#if defined(VK_NV_cooperative_matrix)
extern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV;
#endif // defined(VK_NV_cooperative_matrix)
#if defined(VK_NV_coverage_reduction_mode)
extern PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV;
#endif // defined(VK_NV_coverage_reduction_mode)
#if defined(VK_NV_device_diagnostic_checkpoints)
extern PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;
extern PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;
#endif // defined(VK_NV_device_diagnostic_checkpoints)
#if defined(VK_NV_device_generated_commands)
extern PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;
extern PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;
extern PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;
extern PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;
extern PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;
extern PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;
#endif // defined(VK_NV_device_generated_commands)
#if defined(VK_NV_external_memory_capabilities)
extern PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV;
#endif // defined(VK_NV_external_memory_capabilities)
#if defined(VK_NV_external_memory_rdma)
extern PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;
#endif // defined(VK_NV_external_memory_rdma)
#if defined(VK_NV_external_memory_win32)
extern PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;
#endif // defined(VK_NV_external_memory_win32)
#if defined(VK_NV_fragment_shading_rate_enums)
extern PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;
#endif // defined(VK_NV_fragment_shading_rate_enums)
#if defined(VK_NV_mesh_shader)
extern PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;
extern PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;
extern PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV;
#endif // defined(VK_NV_mesh_shader)
#if defined(VK_NV_ray_tracing)
extern PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV;
extern PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV;
extern PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV;
extern PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV;
extern PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV;
extern PFN_vkCompileDeferredNV vkCompileDeferredNV;
extern PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV;
extern PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV;
extern PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV;
extern PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV;
extern PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV;
extern PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV;
#endif // defined(VK_NV_ray_tracing)
#if defined(VK_NV_scissor_exclusive)
extern PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV;
#endif // defined(VK_NV_scissor_exclusive)
#if defined(VK_NV_shading_rate_image)
extern PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV;
extern PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;
extern PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;
#endif // defined(VK_NV_shading_rate_image)
#if defined(VK_QNX_screen_surface)
extern PFN_vkCreateScreenSurfaceQNX vkCreateScreenSurfaceQNX;
extern PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX vkGetPhysicalDeviceScreenPresentationSupportQNX;
#endif // defined(VK_QNX_screen_surface)
#if defined(VK_VALVE_descriptor_set_host_mapping)
extern PFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE;
extern PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE;
#endif // defined(VK_VALVE_descriptor_set_host_mapping)
#if (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1))
extern PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;
#endif // (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1))
#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template))
extern PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;
#endif // (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template))
#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))
extern PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;
extern PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;
extern PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR;
#endif // (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))
#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))
extern PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;
#endif // (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))
} // namespace bluevk
#if !defined(NDEBUG)
#include <utils/Log.h>
utils::io::ostream& operator<<(utils::io::ostream& out, const VkImageLayout& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkAttachmentLoadOp& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkAttachmentStoreOp& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkImageType& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkImageTiling& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkImageViewType& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkCommandBufferLevel& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkComponentSwizzle& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkDescriptorType& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkQueryType& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkBorderColor& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPipelineBindPoint& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPipelineCacheHeaderVersion& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPrimitiveTopology& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkSharingMode& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkIndexType& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkFilter& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkSamplerMipmapMode& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkSamplerAddressMode& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkCompareOp& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPolygonMode& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkFrontFace& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkBlendFactor& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkBlendOp& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkStencilOp& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkLogicOp& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkInternalAllocationType& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkSystemAllocationScope& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPhysicalDeviceType& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkVertexInputRate& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkFormat& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkStructureType& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkSubpassContents& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkResult& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkDynamicState& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkDescriptorUpdateTemplateType& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkObjectType& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkSemaphoreType& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPresentModeKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkColorSpaceKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkTimeDomainEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkDebugReportObjectTypeEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkDeviceMemoryReportEventTypeEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkRasterizationOrderAMD& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkValidationCheckEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkValidationFeatureEnableEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkValidationFeatureDisableEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkIndirectCommandsTokenTypeNV& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkDisplayPowerStateEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkDeviceEventTypeEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkDisplayEventTypeEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkViewportCoordinateSwizzleNV& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkDiscardRectangleModeEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPointClippingBehavior& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkSamplerReductionMode& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkTessellationDomainOrigin& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkSamplerYcbcrModelConversion& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkSamplerYcbcrRange& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkChromaLocation& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkBlendOverlapEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkCoverageModulationModeNV& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkCoverageReductionModeNV& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkValidationCacheHeaderVersionEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkShaderInfoTypeAMD& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkQueueGlobalPriorityKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkConservativeRasterizationModeEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkVendorId& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkDriverId& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkShadingRatePaletteEntryNV& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkCoarseSampleOrderTypeNV& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkCopyAccelerationStructureModeKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkBuildAccelerationStructureModeKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkAccelerationStructureTypeKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkGeometryTypeKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkAccelerationStructureMemoryRequirementsTypeNV& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkAccelerationStructureBuildTypeKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkRayTracingShaderGroupTypeKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkAccelerationStructureCompatibilityKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkShaderGroupShaderKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkMemoryOverallocationBehaviorAMD& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkScopeNV& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkComponentTypeNV& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPerformanceCounterScopeKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPerformanceCounterUnitKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPerformanceCounterStorageKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPerformanceConfigurationTypeINTEL& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkQueryPoolSamplingModeINTEL& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPerformanceOverrideTypeINTEL& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPerformanceParameterTypeINTEL& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPerformanceValueTypeINTEL& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkShaderFloatControlsIndependence& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkPipelineExecutableStatisticFormatKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkLineRasterizationModeEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkFragmentShadingRateCombinerOpKHR& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkFragmentShadingRateNV& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkFragmentShadingRateTypeNV& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkSubpassMergeStatusEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkProvokingVertexModeEXT& value);
utils::io::ostream& operator<<(utils::io::ostream& out, const VkAccelerationStructureMotionInstanceTypeNV& value);
#endif
#endif // TNT_FILAMENT_BLUEVK_H

View File

@@ -0,0 +1,116 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_BINARYTREEARRAY_H
#define TNT_UTILS_BINARYTREEARRAY_H
#include <utils/compiler.h>
#include <type_traits>
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
namespace utils {
class BinaryTreeArray {
// Simple fixed capacity stack
template<typename TYPE, size_t CAPACITY,
typename = std::enable_if_t<std::is_trivial_v<TYPE>>>
class stack {
TYPE mElements[CAPACITY];
size_t mSize = 0;
public:
bool empty() const noexcept { return mSize == 0; }
void push(TYPE const& v) noexcept {
assert(mSize < CAPACITY);
mElements[mSize++] = v;
}
void pop() noexcept {
assert(mSize > 0);
--mSize;
}
const TYPE& back() const noexcept {
return mElements[mSize - 1];
}
};
public:
static size_t count(size_t height) noexcept { return (1u << height) - 1; }
static size_t left(size_t i, size_t /*height*/) noexcept { return i + 1; }
static size_t right(size_t i, size_t height) noexcept {
return i + (size_t(1) << (height - 1));
}
// this builds the depth-first binary tree array top down (post-order)
template<typename Leaf, typename Node>
static void traverse(size_t height, Leaf leaf, Node node) noexcept {
struct TNode {
uint32_t index;
uint32_t col;
uint32_t height;
uint32_t next;
bool isLeaf() const noexcept { return height == 1; }
size_t left() const noexcept { return BinaryTreeArray::left(index, height); }
size_t right() const noexcept { return BinaryTreeArray::right(index, height); }
};
stack<TNode, 16> stack;
stack.push(TNode{ 0, 0, (uint32_t)height, (uint32_t)count(height) });
uint32_t prevLeft = 0;
uint32_t prevRight = 0;
uint32_t prevIndex = 0;
while (!stack.empty()) {
TNode const* const UTILS_RESTRICT curr = &stack.back();
const bool isLeaf = curr->isLeaf();
const uint32_t index = curr->index;
const uint32_t l = (uint32_t)curr->left();
const uint32_t r = (uint32_t)curr->right();
if (prevLeft == index || prevRight == index) {
if (!isLeaf) {
// the 'next' node of our left node's right descendants is our right child
stack.push({ l, 2 * curr->col, curr->height - 1, r });
}
} else if (l == prevIndex) {
if (!isLeaf) {
// the 'next' node of our right child is our own 'next' sibling
stack.push({ r, 2 * curr->col + 1, curr->height - 1, curr->next });
}
} else {
if (!isLeaf) {
node(index, l, r, curr->next);
} else {
leaf(index, curr->col, curr->next);
}
stack.pop();
}
prevLeft = l;
prevRight = r;
prevIndex = index;
}
}
};
} // namespace utils
#endif //TNT_UTILS_BINARYTREEARRAY_H

View File

@@ -0,0 +1,26 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_CONDITION_H
#define TNT_UTILS_CONDITION_H
#if defined(__ANDROID__)
#include <utils/linux/Condition.h>
#else
#include <utils/generic/Condition.h>
#endif
#endif // TNT_UTILS_CONDITION_H

View File

@@ -0,0 +1,92 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_COUNTDOWNLATCH_H
#define TNT_UTILS_COUNTDOWNLATCH_H
// note: we use our version of mutex/condition to keep this public header STL free
#include <utils/Condition.h>
#include <utils/Mutex.h>
#include <stdint.h>
#include <stddef.h>
namespace utils {
/**
* A count down latch is used to block one or several threads until the latch is signaled
* a certain number of times.
*
* Threads entering the latch are blocked until the latch is signaled enough times.
*
* @see CyclicBarrier
*/
class CountDownLatch {
public:
/**
* Creates a count down latch with a specified count. The minimum useful value is 1.
* @param count the latch counter initial value
*/
explicit CountDownLatch(size_t count) noexcept;
~CountDownLatch() = default;
/**
* Blocks until latch() is called \p count times.
* @see CountDownLatch(size_t count)
*/
void await() noexcept;
/**
* Releases threads blocked in await() when called \p count times. Calling latch() more than
* \p count times has no effect.
* @see reset()
*/
void latch() noexcept;
/**
* Resets the count-down latch to the given value.
*
* @param new_count New latch count. A value of zero will immediately unblock all waiting
* threads.
*
* @warning Use with caution. It's only safe to reset the latch count when you're sure
* that no threads are waiting in await(). This can be guaranteed in various ways, for
* instance, if you have a single thread calling await(), you could call reset() from that
* thread, or you could use a CyclicBarrier to make sure all threads using the CountDownLatch
* are at a known place (i.e.: not in await()) when reset() is called.
*/
void reset(size_t new_count) noexcept;
/**
* @return the number of times latch() has been called since construction or reset.
* @see reset(), CountDownLatch(size_t count)
*/
size_t getCount() const noexcept;
CountDownLatch() = delete;
CountDownLatch(const CountDownLatch&) = delete;
CountDownLatch& operator=(const CountDownLatch&) = delete;
private:
uint32_t m_initial_count;
uint32_t m_remaining_count;
mutable Mutex m_lock;
mutable Condition m_cv;
};
} // namespace utils
#endif // TNT_UTILS_COUNTDOWNLATCH_H

View File

@@ -0,0 +1,84 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_CYCLIC_BARRIER_H
#define TNT_UTILS_CYCLIC_BARRIER_H
#include <stddef.h>
// note: we use our version of mutex/condition to keep this public header STL free
#include <utils/Condition.h>
#include <utils/Mutex.h>
namespace utils {
/**
* A cyclic barrier is used to synchronize several threads to a particular execution point.
*
* Threads entering the barrier are halted until all threads reach the barrier.
*
* @see CountDownLatch
*/
class CyclicBarrier {
public:
/**
* Creates a cyclic barrier with a specified number of threads to synchronize. The minimum
* useful value is 2. A value of 0 is invalid and is silently changed to 1.
* @param num_threads Number of threads to synchronize.
*/
explicit CyclicBarrier(size_t num_threads) noexcept;
/**
* @return The number of thread that are synchronized.
*/
size_t getThreadCount() const noexcept;
/**
* @return Number of threads currently waiting on the barrier.
*/
size_t getWaitingThreadCount() const noexcept;
/**
* Blocks until getThreadCount()-1 other threads reach await().
*/
void await() noexcept;
/**
* Resets the cyclic barrier to its original state and releases all waiting threads.
*/
void reset() noexcept;
CyclicBarrier() = delete;
CyclicBarrier(const CyclicBarrier&) = delete;
CyclicBarrier& operator=(const CyclicBarrier&) = delete;
private:
enum class State {
TRAP, RELEASE
};
const size_t m_num_threads;
mutable Mutex m_lock;
mutable Condition m_cv;
State m_state = State::TRAP;
size_t m_trapped_threads = 0;
size_t m_released_threads = 0;
};
} // namespace utils
#endif // TNT_UTILS_CYCLIC_BARRIER_H

View File

@@ -0,0 +1,78 @@
/*
* Copyright (C) 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef TNT_UTILS_FIXEDCIRCULARBUFFER_H
#define TNT_UTILS_FIXEDCIRCULARBUFFER_H
#include <utils/debug.h>
#include <memory>
#include <optional>
#include <type_traits>
#include <stddef.h>
namespace utils {
template<typename T>
class FixedCircularBuffer {
public:
explicit FixedCircularBuffer(size_t capacity)
: mData(std::make_unique<T[]>(capacity)), mCapacity(capacity) {}
size_t size() const noexcept { return mSize; }
size_t capacity() const noexcept { return mCapacity; }
bool full() const noexcept { return mCapacity > 0 && mSize == mCapacity; }
bool empty() const noexcept { return mSize == 0; }
/**
* Push v into the buffer. If the buffer is already full, removes the oldest item and returns
* it. If this buffer has no capacity, simply returns v.
* @param v the new value to push into the buffer
* @return if the buffer was full, the oldest value which was displaced
*/
std::optional<T> push(T v) noexcept {
if (mCapacity == 0) {
return v;
}
std::optional<T> displaced = full() ? pop() : std::optional<T>{};
mData[mEnd] = v;
mEnd = (mEnd + 1) % mCapacity;
mSize++;
return displaced;
}
T pop() noexcept {
assert_invariant(mSize > 0);
T result = mData[mBegin];
mBegin = (mBegin + 1) % mCapacity;
mSize--;
return result;
}
private:
std::unique_ptr<T[]> mData;
size_t mBegin = 0;
size_t mEnd = 0;
size_t mSize = 0;
size_t mCapacity;
};
} // namespace utils
#endif // TNT_UTILS_FIXEDCIRCULARBUFFER_H

View File

@@ -0,0 +1,110 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_HASH_H
#define TNT_UTILS_HASH_H
#include <functional> // for std::hash
#include <string_view>
#include <utility>
#include <stdint.h>
#include <stddef.h>
namespace utils::hash {
inline size_t combine(size_t lhs, size_t rhs) noexcept {
std::pair<size_t, size_t> const p{ lhs, rhs };
return std::hash<std::string_view>{}({ (char*)&p, sizeof(p) });
}
// Hash function that takes an arbitrary swath of word-aligned data.
inline uint32_t murmur3(const uint32_t* key, size_t wordCount, uint32_t seed) noexcept {
uint32_t h = seed;
size_t i = wordCount;
do {
uint32_t k = *key++;
k *= 0xcc9e2d51u;
k = (k << 15u) | (k >> 17u);
k *= 0x1b873593u;
h ^= k;
h = (h << 13u) | (h >> 19u);
h = (h * 5u) + 0xe6546b64u;
} while (--i);
h ^= wordCount;
h ^= h >> 16u;
h *= 0x85ebca6bu;
h ^= h >> 13u;
h *= 0xc2b2ae35u;
h ^= h >> 16u;
return h;
}
// The hash yields the same result for a given byte sequence regardless of alignment.
inline uint32_t murmurSlow(const uint8_t* key, size_t byteCount, uint32_t seed) noexcept {
const size_t wordCount = (byteCount + 3) / 4;
const uint8_t* const last = key + byteCount;
// The remainder is identical to murmur3() except an inner loop safely "reads" an entire word.
uint32_t h = seed;
size_t wc = wordCount;
do {
uint32_t k = 0;
for (int i = 0; i < 4 && key < last; ++i, ++key) {
k >>= 8;
k |= uint32_t(*key) << 24;
}
k *= 0xcc9e2d51u;
k = (k << 15u) | (k >> 17u);
k *= 0x1b873593u;
h ^= k;
h = (h << 13u) | (h >> 19u);
h = (h * 5u) + 0xe6546b64u;
} while (--wc);
h ^= wordCount;
h ^= h >> 16u;
h *= 0x85ebca6bu;
h ^= h >> 13u;
h *= 0xc2b2ae35u;
h ^= h >> 16u;
return h;
}
template<typename T>
struct MurmurHashFn {
uint32_t operator()(const T& key) const noexcept {
static_assert(0 == (sizeof(key) & 3u), "Hashing requires a size that is a multiple of 4.");
return murmur3((const uint32_t*) &key, sizeof(key) / 4, 0);
}
};
// combines two hashes together
template<class T>
inline void combine(size_t& seed, const T& v) noexcept {
std::hash<T> hasher;
seed ^= hasher(v) + 0x9e3779b9u + (seed << 6u) + (seed >> 2u);
}
// combines two hashes together, faster but less good
template<class T>
inline void combine_fast(size_t& seed, const T& v) noexcept {
std::hash<T> hasher;
seed ^= hasher(v) << 1u;
}
} // namespace utils::hash
#endif // TNT_UTILS_HASH_H

View File

@@ -0,0 +1,615 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_JOBSYSTEM_H
#define TNT_UTILS_JOBSYSTEM_H
#include <utils/Allocator.h>
#include <utils/architecture.h>
#include <utils/compiler.h>
#include <utils/Condition.h>
#include <utils/debug.h>
#include <utils/memalign.h>
#include <utils/Mutex.h>
#include <utils/Slice.h>
#include <utils/ostream.h>
#include <utils/WorkStealingDequeue.h>
#include <tsl/robin_map.h>
#include <atomic>
#include <functional>
#include <mutex>
#include <type_traits>
#include <thread>
#include <vector>
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
namespace utils {
class JobSystem {
static constexpr size_t MAX_JOB_COUNT = 1 << 14; // 16384
static constexpr uint32_t JOB_COUNT_MASK = MAX_JOB_COUNT - 1;
static constexpr uint32_t WAITER_COUNT_SHIFT = 24;
static_assert(MAX_JOB_COUNT <= 0x7FFE, "MAX_JOB_COUNT must be <= 0x7FFE");
using WorkQueue = WorkStealingDequeue<uint16_t, MAX_JOB_COUNT>;
using Mutex = utils::Mutex;
using Condition = utils::Condition;
public:
class Job;
using ThreadId = uint8_t;
using JobFunc = void(*)(void*, JobSystem&, Job*);
static constexpr ThreadId invalidThreadId = 0xff;
class alignas(CACHELINE_SIZE) Job {
public:
Job() noexcept {} /* = default; */ /* clang bug */ // NOLINT(modernize-use-equals-default,cppcoreguidelines-pro-type-member-init)
Job(const Job&) = delete;
Job(Job&&) = delete;
private:
friend class JobSystem;
// Size is chosen so that we can store at least std::function<>
// the alignas() qualifier ensures we're multiple of a cache-line.
static constexpr size_t JOB_STORAGE_SIZE_BYTES =
sizeof(std::function<void()>) > 48 ? sizeof(std::function<void()>) : 48;
static constexpr size_t JOB_STORAGE_SIZE_WORDS =
(JOB_STORAGE_SIZE_BYTES + sizeof(void*) - 1) / sizeof(void*);
// keep it first, so it's correctly aligned with all architectures
// this is where we store the job's data, typically a std::function<>
// v7 | v8
void* storage[JOB_STORAGE_SIZE_WORDS]; // 48 | 48
JobFunc function; // 4 | 8
uint16_t parent; // 2 | 2
mutable ThreadId id = invalidThreadId; // 1 | 1
mutable std::atomic<uint8_t> refCount = { 1 }; // 1 | 1
std::atomic<uint32_t> runningJobCount = { 1 }; // 4 | 4
// 4 | 0 (padding)
// 64 | 64
};
#ifndef WIN32
// on windows std::function<void()> is bigger and forces the whole structure to be larger
static_assert(sizeof(Job) == 64);
#endif
explicit JobSystem(size_t threadCount = 0, size_t adoptableThreadsCount = 1) noexcept;
~JobSystem();
// Make the current thread part of the thread pool.
void adopt();
// Remove this adopted thread from the parent. This is intended to be used for
// shutting down a JobSystem. In particular, this doesn't allow the parent to
// adopt more thread.
void emancipate();
// If a parent is not specified when creating a job, that job will automatically take the
// root job as a parent.
// The root job is reset when waited on.
Job* setRootJob(Job* job) noexcept { return mRootJob = job; }
// use setRootJob() instead
UTILS_DEPRECATED
Job* setMasterJob(Job* job) noexcept { return setRootJob(job); }
Job* create(Job* parent, JobFunc func) noexcept;
// NOTE: All methods below must be called from the same thread and that thread must be
// owned by JobSystem's thread pool.
/*
* Job creation examples:
* ----------------------
*
* struct Functor {
* uintptr_t storage[6];
* void operator()(JobSystem&, Jobsystem::Job*);
* } functor;
*
* struct Foo {
* uintptr_t storage[6];
* void method(JobSystem&, Jobsystem::Job*);
* } foo;
*
* Functor and Foo size muse be <= uintptr_t[6]
*
* createJob()
* createJob(parent)
* createJob<Foo, &Foo::method>(parent, &foo)
* createJob<Foo, &Foo::method>(parent, foo)
* createJob<Foo, &Foo::method>(parent, std::ref(foo))
* createJob(parent, functor)
* createJob(parent, std::ref(functor))
* createJob(parent, [ up-to 6 uintptr_t ](JobSystem*, Jobsystem::Job*){ })
*
* Utility functions:
* ------------------
* These are less efficient, but handle any size objects using the heap if needed.
* (internally uses std::function<>), and don't require the callee to take
* a (JobSystem&, Jobsystem::Job*) as parameter.
*
* struct BigFoo {
* uintptr_t large[16];
* void operator()();
* void method(int answerToEverything);
* static void exec(BigFoo&) { }
* } bigFoo;
*
* jobs::createJob(js, parent, [ any-capture ](int answerToEverything){}, 42);
* jobs::createJob(js, parent, &BigFoo::method, &bigFoo, 42);
* jobs::createJob(js, parent, &BigFoo::exec, std::ref(bigFoo));
* jobs::createJob(js, parent, bigFoo);
* jobs::createJob(js, parent, std::ref(bigFoo));
* etc...
*
* struct SmallFunctor {
* uintptr_t storage[3];
* void operator()(T* data, size_t count);
* } smallFunctor;
*
* jobs::parallel_for(js, data, count, [ up-to 3 uintptr_t ](T* data, size_t count) { });
* jobs::parallel_for(js, data, count, smallFunctor);
* jobs::parallel_for(js, data, count, std::ref(smallFunctor));
*
*/
// creates an empty (no-op) job with an optional parent
Job* createJob(Job* parent = nullptr) noexcept {
return create(parent, nullptr);
}
// creates a job from a KNOWN method pointer w/ object passed by pointer
// the caller must ensure the object will outlive the Job
template<typename T, void(T::*method)(JobSystem&, Job*)>
Job* createJob(Job* parent, T* data) noexcept {
Job* job = create(parent, +[](void* storage, JobSystem& js, Job* job) {
T* const that = static_cast<T*>(reinterpret_cast<void**>(storage)[0]);
(that->*method)(js, job);
});
if (job) {
job->storage[0] = data;
}
return job;
}
// creates a job from a KNOWN method pointer w/ object passed by value
template<typename T, void(T::*method)(JobSystem&, Job*)>
Job* createJob(Job* parent, T data) noexcept {
static_assert(sizeof(data) <= sizeof(Job::storage), "user data too large");
Job* job = create(parent, [](void* storage, JobSystem& js, Job* job) {
T* const that = static_cast<T*>(storage);
(that->*method)(js, job);
that->~T();
});
if (job) {
new(job->storage) T(std::move(data));
}
return job;
}
// creates a job from a KNOWN method pointer w/ object passed by value
template<typename T, void(T::*method)(JobSystem&, Job*), typename ... ARGS>
Job* emplaceJob(Job* parent, ARGS&& ... args) noexcept {
static_assert(sizeof(T) <= sizeof(Job::storage), "user data too large");
Job* job = create(parent, [](void* storage, JobSystem& js, Job* job) {
T* const that = static_cast<T*>(storage);
(that->*method)(js, job);
that->~T();
});
if (job) {
new(job->storage) T(std::forward<ARGS>(args)...);
}
return job;
}
// creates a job from a functor passed by value
template<typename T>
Job* createJob(Job* parent, T functor) noexcept {
static_assert(sizeof(functor) <= sizeof(Job::storage), "functor too large");
Job* job = create(parent, [](void* storage, JobSystem& js, Job* job){
T* const that = static_cast<T*>(storage);
that->operator()(js, job);
that->~T();
});
if (job) {
new(job->storage) T(std::move(functor));
}
return job;
}
// creates a job from a functor passed by value
template<typename T, typename ... ARGS>
Job* emplaceJob(Job* parent, ARGS&& ... args) noexcept {
static_assert(sizeof(T) <= sizeof(Job::storage), "functor too large");
Job* job = create(parent, [](void* storage, JobSystem& js, Job* job){
T* const that = static_cast<T*>(storage);
that->operator()(js, job);
that->~T();
});
if (job) {
new(job->storage) T(std::forward<ARGS>(args)...);
}
return job;
}
/*
* Jobs are normally finished automatically, this can be used to cancel a job before it is run.
*
* Never use this once a flavor of run() has been called.
*/
void cancel(Job*& job) noexcept;
/*
* Adds a reference to a Job.
*
* This allows the caller to waitAndRelease() on this job from multiple threads.
* Use runAndWait() if waiting from multiple threads is not needed.
*
* This job MUST BE waited on with waitAndRelease(), or released with release().
*/
static Job* retain(Job* job) noexcept;
/*
* Releases a reference from a Job obtained with runAndRetain() or a call to retain().
*
* The job can't be used after this call.
*/
void release(Job*& job) noexcept;
void release(Job*&& job) noexcept {
Job* p = job;
release(p);
}
/*
* Add job to this thread's execution queue. Its reference will drop automatically.
* The current thread must be owned by JobSystem's thread pool. See adopt().
*
* The job can't be used after this call.
*/
void run(Job*& job) noexcept;
void run(Job*&& job) noexcept { // allows run(createJob(...));
Job* p = job;
run(p);
}
/*
* Add job to this thread's execution queue. Its reference will drop automatically.
* The current thread must be owned by JobSystem's thread pool. See adopt().
* id must be the current thread id obtained with JobSystem::getThreadId(Job*). This
* API is more efficient than the methods above.
*
* The job can't be used after this call.
*/
void run(Job*& job, ThreadId id) noexcept;
void run(Job*&& job, ThreadId id) noexcept { // allows run(createJob(...));
Job* p = job;
run(p, id);
}
/*
* Add job to this thread's execution queue and keep a reference to it.
* The current thread must be owned by JobSystem's thread pool. See adopt().
*
* This job MUST BE waited on with wait(), or released with release().
*/
Job* runAndRetain(Job* job) noexcept;
/*
* Wait on a job and destroys it.
* The current thread must be owned by JobSystem's thread pool. See adopt().
*
* The job must first be obtained from runAndRetain() or retain().
* The job can't be used after this call.
*/
void waitAndRelease(Job*& job) noexcept;
/*
* Runs and wait for a job. This is equivalent to calling
* runAndRetain(job);
* wait(job);
*
* The job can't be used after this call.
*/
void runAndWait(Job*& job) noexcept;
void runAndWait(Job*&& job) noexcept { // allows runAndWait(createJob(...));
Job* p = job;
runAndWait(p);
}
// for debugging
friend io::ostream& operator << (io::ostream& out, JobSystem const& js);
// utility functions...
// set the name of the current thread (on OSes that support it)
static void setThreadName(const char* threadName) noexcept;
enum class Priority {
NORMAL,
DISPLAY,
URGENT_DISPLAY,
BACKGROUND
};
static void setThreadPriority(Priority priority) noexcept;
static void setThreadAffinityById(size_t id) noexcept;
size_t getParallelSplitCount() const noexcept {
return mParallelSplitCount;
}
size_t getThreadCount() const { return mThreadCount; }
// returns the current ThreadId, which can be used with run(). This method can only be
// called from a job's function.
static ThreadId getThreadId(Job const* job) noexcept {
assert_invariant(job->id != invalidThreadId);
return job->id;
}
private:
// this is just to avoid using std::default_random_engine, since we're in a public header.
class default_random_engine {
static constexpr uint32_t m = 0x7fffffffu;
uint32_t mState; // must be 0 < seed < 0x7fffffff
public:
using result_type = uint32_t;
static constexpr result_type min() noexcept {
return 1;
}
static constexpr result_type max() noexcept {
return m - 1;
}
inline constexpr explicit default_random_engine(uint32_t seed = 1u) noexcept
: mState(((seed % m) == 0u) ? 1u : seed % m) {
}
inline uint32_t operator()() noexcept {
return mState = uint32_t((uint64_t(mState) * 48271u) % m);
}
};
struct alignas(CACHELINE_SIZE) ThreadState { // this causes 40-bytes padding
// make sure storage is cache-line aligned
WorkQueue workQueue;
// these are not accessed by the worker threads
alignas(CACHELINE_SIZE) // this causes 56-bytes padding
JobSystem* js; // this is in fact const and always initialized
std::thread thread; // unused for adopted threads
default_random_engine rndGen;
};
static_assert(sizeof(ThreadState) % CACHELINE_SIZE == 0,
"ThreadState doesn't align to a cache line");
ThreadState& getState() noexcept;
static void incRef(Job const* job) noexcept;
void decRef(Job const* job) noexcept;
Job* allocateJob() noexcept;
ThreadState* getStateToStealFrom(ThreadState& state) noexcept;
static bool hasJobCompleted(Job const* job) noexcept;
void requestExit() noexcept;
bool exitRequested() const noexcept;
bool hasActiveJobs() const noexcept;
void loop(ThreadState* state) noexcept;
bool execute(ThreadState& state) noexcept;
Job* steal(ThreadState& state) noexcept;
void finish(Job* job) noexcept;
void put(WorkQueue& workQueue, Job* job) noexcept;
Job* pop(WorkQueue& workQueue) noexcept;
Job* steal(WorkQueue& workQueue) noexcept;
[[nodiscard]]
uint32_t wait(std::unique_lock<Mutex>& lock, Job* job) noexcept;
void wait(std::unique_lock<Mutex>& lock) noexcept;
void wakeAll() noexcept;
void wakeOne() noexcept;
// these have thread contention, keep them together
Mutex mWaiterLock;
Condition mWaiterCondition;
std::atomic<int32_t> mActiveJobs = { 0 };
Arena<ObjectPoolAllocator<Job>, LockingPolicy::Mutex> mJobPool;
template <typename T>
using aligned_vector = std::vector<T, STLAlignedAllocator<T>>;
// These are essentially const, make sure they're on a different cache-lines than the
// read-write atomics.
// We can't use "alignas(CACHELINE_SIZE)" because the standard allocator can't make this
// guarantee.
char padding[CACHELINE_SIZE];
alignas(16) // at least we align to half (or quarter) cache-line
aligned_vector<ThreadState> mThreadStates; // actual data is stored offline
std::atomic<bool> mExitRequested = { false }; // this one is almost never written
std::atomic<uint16_t> mAdoptedThreads = { 0 }; // this one is almost never written
Job* const mJobStorageBase; // Base for conversion to indices
uint16_t mThreadCount = 0; // total # of threads in the pool
uint8_t mParallelSplitCount = 0; // # of split allowable in parallel_for
Job* mRootJob = nullptr;
Mutex mThreadMapLock; // this should have very little contention
tsl::robin_map<std::thread::id, ThreadState *> mThreadMap;
};
// -------------------------------------------------------------------------------------------------
// Utility functions built on top of JobSystem
namespace jobs {
// These are convenience C++11 style job creation methods that support lambdas
//
// IMPORTANT: these are less efficient to call and may perform heap allocation
// depending on the capture and parameters
//
template<typename CALLABLE, typename ... ARGS>
JobSystem::Job* createJob(JobSystem& js, JobSystem::Job* parent,
CALLABLE&& func, ARGS&&... args) noexcept {
struct Data {
explicit Data(std::function<void()> f) noexcept: f(std::move(f)) {}
std::function<void()> f;
// Renaming the method below could cause an Arrested Development.
void gob(JobSystem&, JobSystem::Job*) noexcept { f(); }
};
return js.emplaceJob<Data, &Data::gob>(parent,
std::bind(std::forward<CALLABLE>(func), std::forward<ARGS>(args)...));
}
template<typename CALLABLE, typename T, typename ... ARGS,
typename = std::enable_if_t<
std::is_member_function_pointer_v<std::remove_reference_t<CALLABLE>>
>
>
JobSystem::Job* createJob(JobSystem& js, JobSystem::Job* parent,
CALLABLE&& func, T&& o, ARGS&&... args) noexcept {
struct Data {
explicit Data(std::function<void()> f) noexcept: f(std::move(f)) {}
std::function<void()> f;
// Renaming the method below could cause an Arrested Development.
void gob(JobSystem&, JobSystem::Job*) noexcept { f(); }
};
return js.emplaceJob<Data, &Data::gob>(parent,
std::bind(std::forward<CALLABLE>(func), std::forward<T>(o), std::forward<ARGS>(args)...));
}
namespace details {
template<typename S, typename F>
struct ParallelForJobData {
using SplitterType = S;
using Functor = F;
using JobData = ParallelForJobData;
using size_type = uint32_t;
ParallelForJobData(size_type start, size_type count, uint8_t splits,
Functor functor,
const SplitterType& splitter) noexcept
: start(start), count(count),
functor(std::move(functor)),
splits(splits),
splitter(splitter) {
}
void parallelWithJobs(JobSystem& js, JobSystem::Job* parent) noexcept {
assert(parent);
// this branch is often miss-predicted (it both sides happen 50% of the calls)
right_side:
if (splitter.split(splits, count)) {
const size_type lc = count / 2;
JobSystem::Job* l = js.emplaceJob<JobData, &JobData::parallelWithJobs>(parent,
start, lc, splits + uint8_t(1), functor, splitter);
if (UTILS_UNLIKELY(l == nullptr)) {
// couldn't create a job, just pretend we're done splitting
goto execute;
}
// start the left side before attempting the right side, so we parallelize in case
// of job creation failure -- rare, but still.
js.run(l, JobSystem::getThreadId(parent));
// don't spawn a job for the right side, just reuse us -- spawning jobs is more
// costly than we'd like.
start += lc;
count -= lc;
++splits;
goto right_side;
} else {
execute:
// we're done splitting, do the real work here!
functor(start, count);
}
}
private:
size_type start; // 4
size_type count; // 4
Functor functor; // ?
uint8_t splits; // 1
SplitterType splitter; // 1
};
} // namespace details
// parallel jobs with start/count indices
template<typename S, typename F>
JobSystem::Job* parallel_for(JobSystem& js, JobSystem::Job* parent,
uint32_t start, uint32_t count, F functor, const S& splitter) noexcept {
using JobData = details::ParallelForJobData<S, F>;
return js.emplaceJob<JobData, &JobData::parallelWithJobs>(parent,
start, count, 0, std::move(functor), splitter);
}
// parallel jobs with pointer/count
template<typename T, typename S, typename F>
JobSystem::Job* parallel_for(JobSystem& js, JobSystem::Job* parent,
T* data, uint32_t count, F functor, const S& splitter) noexcept {
auto user = [data, f = std::move(functor)](uint32_t s, uint32_t c) {
f(data + s, c);
};
using JobData = details::ParallelForJobData<S, decltype(user)>;
return js.emplaceJob<JobData, &JobData::parallelWithJobs>(parent,
0, count, 0, std::move(user), splitter);
}
// parallel jobs on a Slice<>
template<typename T, typename S, typename F>
JobSystem::Job* parallel_for(JobSystem& js, JobSystem::Job* parent,
Slice<T> slice, F functor, const S& splitter) noexcept {
return parallel_for(js, parent, slice.data(), slice.size(), functor, splitter);
}
template<size_t COUNT, size_t MAX_SPLITS = 12>
class CountSplitter {
public:
bool split(size_t splits, size_t count) const noexcept {
return (splits < MAX_SPLITS && count >= COUNT * 2);
}
};
} // namespace jobs
} // namespace utils
#endif // TNT_UTILS_JOBSYSTEM_H

View File

@@ -0,0 +1,215 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_PROFILER_H
#define TNT_UTILS_PROFILER_H
#include <ratio>
#include <chrono> // note: This is safe (only used inline)
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#if defined(__linux__)
# include <unistd.h>
# include <sys/ioctl.h>
# include <linux/perf_event.h>
#endif
#include <utils/compiler.h>
namespace utils {
class Profiler {
public:
enum {
INSTRUCTIONS = 0, // must be zero
CPU_CYCLES = 1,
DCACHE_REFS = 2,
DCACHE_MISSES = 3,
BRANCHES = 4,
BRANCH_MISSES = 5,
ICACHE_REFS = 6,
ICACHE_MISSES = 7,
// Must be last one
EVENT_COUNT
};
enum {
EV_CPU_CYCLES = 1u << CPU_CYCLES,
EV_L1D_REFS = 1u << DCACHE_REFS,
EV_L1D_MISSES = 1u << DCACHE_MISSES,
EV_BPU_REFS = 1u << BRANCHES,
EV_BPU_MISSES = 1u << BRANCH_MISSES,
EV_L1I_REFS = 1u << ICACHE_REFS,
EV_L1I_MISSES = 1u << ICACHE_MISSES,
// helpers
EV_L1D_RATES = EV_L1D_REFS | EV_L1D_MISSES,
EV_L1I_RATES = EV_L1I_REFS | EV_L1I_MISSES,
EV_BPU_RATES = EV_BPU_REFS | EV_BPU_MISSES,
};
Profiler() noexcept; // must call resetEvents()
explicit Profiler(uint32_t eventMask) noexcept;
~Profiler() noexcept;
Profiler(const Profiler& rhs) = delete;
Profiler(Profiler&& rhs) = delete;
Profiler& operator=(const Profiler& rhs) = delete;
Profiler& operator=(Profiler&& rhs) = delete;
// selects which events are enabled.
uint32_t resetEvents(uint32_t eventMask) noexcept;
uint32_t getEnabledEvents() const noexcept { return mEnabledEvents; }
// could return false if performance counters are not supported/enabled
bool isValid() const { return mCountersFd[0] >= 0; }
class Counters {
friend class Profiler;
uint64_t nr;
uint64_t time_enabled;
uint64_t time_running;
struct {
uint64_t value;
uint64_t id;
} counters[EVENT_COUNT];
friend Counters operator-(Counters lhs, const Counters& rhs) noexcept {
lhs.nr -= rhs.nr;
lhs.time_enabled -= rhs.time_enabled;
lhs.time_running -= rhs.time_running;
for (size_t i = 0; i < EVENT_COUNT; ++i) {
lhs.counters[i].value -= rhs.counters[i].value;
}
return lhs;
}
public:
uint64_t getInstructions() const { return counters[INSTRUCTIONS].value; }
uint64_t getCpuCycles() const { return counters[CPU_CYCLES].value; }
uint64_t getL1DReferences() const { return counters[DCACHE_REFS].value; }
uint64_t getL1DMisses() const { return counters[DCACHE_MISSES].value; }
uint64_t getL1IReferences() const { return counters[ICACHE_REFS].value; }
uint64_t getL1IMisses() const { return counters[ICACHE_MISSES].value; }
uint64_t getBranchInstructions() const { return counters[BRANCHES].value; }
uint64_t getBranchMisses() const { return counters[BRANCH_MISSES].value; }
std::chrono::duration<uint64_t, std::nano> getWallTime() const {
return std::chrono::duration<uint64_t, std::nano>(time_enabled);
}
std::chrono::duration<uint64_t, std::nano> getRunningTime() const {
return std::chrono::duration<uint64_t, std::nano>(time_running);
}
double getIPC() const noexcept {
uint64_t cpuCycles = getCpuCycles();
uint64_t instructions = getInstructions();
return double(instructions) / double(cpuCycles);
}
double getCPI() const noexcept {
uint64_t cpuCycles = getCpuCycles();
uint64_t instructions = getInstructions();
return double(cpuCycles) / double(instructions);
}
double getL1DMissRate() const noexcept {
uint64_t cacheReferences = getL1DReferences();
uint64_t cacheMisses = getL1DMisses();
return double(cacheMisses) / double(cacheReferences);
}
double getL1DHitRate() const noexcept {
return 1.0 - getL1DMissRate();
}
double getL1IMissRate() const noexcept {
uint64_t cacheReferences = getL1IReferences();
uint64_t cacheMisses = getL1IMisses();
return double(cacheMisses) / double(cacheReferences);
}
double getL1IHitRate() const noexcept {
return 1.0 - getL1IMissRate();
}
double getBranchMissRate() const noexcept {
uint64_t branchReferences = getBranchInstructions();
uint64_t branchMisses = getBranchMisses();
return double(branchMisses) / double(branchReferences);
}
double getBranchHitRate() const noexcept {
return 1.0 - getBranchMissRate();
}
double getMPKI(uint64_t misses) const noexcept {
return (misses * 1000.0) / getInstructions();
}
};
#if defined(__linux__)
void reset() noexcept {
int fd = mCountersFd[0];
ioctl(fd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP);
}
void start() noexcept {
int fd = mCountersFd[0];
ioctl(fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP);
}
void stop() noexcept {
int fd = mCountersFd[0];
ioctl(fd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP);
}
Counters readCounters() noexcept;
#else // !__linux__
void reset() noexcept { }
void start() noexcept { }
void stop() noexcept { }
Counters readCounters() noexcept { return {}; }
#endif // __linux__
bool hasBranchRates() const noexcept {
return (mCountersFd[BRANCHES] >= 0) && (mCountersFd[BRANCH_MISSES] >= 0);
}
bool hasICacheRates() const noexcept {
return (mCountersFd[ICACHE_REFS] >= 0) && (mCountersFd[ICACHE_MISSES] >= 0);
}
private:
UTILS_UNUSED uint8_t mIds[EVENT_COUNT] = {};
int mCountersFd[EVENT_COUNT];
uint32_t mEnabledEvents = 0;
};
} // namespace utils
#endif // TNT_UTILS_PROFILER_H

View File

@@ -0,0 +1,185 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_QUADTREE_H
#define TNT_UTILS_QUADTREE_H
#include <utils/debug.h>
#include <array>
#include <type_traits>
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
namespace utils {
namespace QuadTreeUtils {
/**
* 16-bits morton encoding
* @param x 8-bits horizontal coordinate
* @param y 8-bits vertical coordinate
* @return morton encoding of (x,y)
*/
static inline constexpr uint16_t morton(uint8_t x, uint8_t y) noexcept {
uint32_t r = x | (uint32_t(y) << 16);
r = (r | (r << 4u)) & 0x0f0f0f0fu;
r = (r | (r << 2u)) & 0x33333333u;
r = (r | (r << 1u)) & 0x55555555u;
return uint16_t(r | (r >> 15u));
}
/**
* size of a quad-tree of height `height`
* @param height height of the Quad-tree
* @return the number of elements in the tree
*/
static inline constexpr size_t size(size_t height) noexcept {
return morton(uint8_t((1u << height) - 1u), 0u);
}
/**
* Index in the QuadTreeArray of a Quad-tree node referenced by its height and code
* @param l height of the node
* @param code morton code of the node
* @return index in the QuadTreeArray of this node
*/
static inline constexpr size_t index(size_t l, size_t code) noexcept {
return size(l) + code;
}
/**
* Index in the QuadTreeArray of the parent of the specified node
* @param l height of the node
* @param code morton code of the node
* @return index in the QuadTreeArray of this node's parent
*/
static inline constexpr size_t parent(size_t l, size_t code) noexcept {
assert_invariant(l > 0);
return index(l - 1u, code >> 2u);
}
// integrated unit-tests!
static_assert(size(0) == 0);
static_assert(size(1) == 1);
static_assert(size(2) == 5);
static_assert(size(3) == 21);
static_assert(size(4) == 85);
static_assert(size(5) == 341);
static_assert(size(6) == 1365);
static_assert(size(7) == 5461);
} // namespace QuadTreeUtils
/**
* A Quad-tree encoded as an array.
* @tparam T Type of the quad-tree nodes
* @tparam HEIGHT Height of the quad-tree
*/
template<typename T, size_t HEIGHT>
class QuadTreeArray : public std::array<T, QuadTreeUtils::size(HEIGHT)> {
static_assert(HEIGHT <= 7, "QuadTreeArray height must be <= 7 (16-bits morton)");
// Simple fixed capacity stack
template<typename TYPE, size_t CAPACITY,
typename = std::enable_if_t<std::is_trivial_v<TYPE>>>
class stack {
TYPE mElements[CAPACITY];
size_t mSize = 0;
public:
bool empty() const noexcept { return mSize == 0; }
void push(TYPE const& v) noexcept {
assert_invariant(mSize < CAPACITY);
mElements[mSize++] = v;
}
void pop() noexcept {
assert_invariant(mSize > 0);
--mSize;
}
const TYPE& back() const noexcept {
return mElements[mSize - 1];
}
};
public:
// code_t needs to be able to encode the # of entries for the largest level supported
// the tree. With 7 levels, the largest one as 4096 entries, 0 to 4095 so 12 bits suffice.
using code_t = uint16_t;
struct NodeId {
int8_t l : 4; // height of the node or -1 if invalid
code_t code : 12; // morton code of the node
static_assert(12 >= (HEIGHT - 1) * 2);
};
enum class TraversalResult {
EXIT, // end traversal
RECURSE, // proceed with the children
SKIP_CHILDREN // skip children
};
static inline constexpr size_t height() noexcept {
return HEIGHT;
}
/**
* non-recursive depth-first traversal
*
* @tparam Process closure to process each visited node
* @param l height of the node to start with
* @param code code of the node to start with
* @param h maximum height to visit, must be < height()
* @param process closure to process each visited node
*/
template<typename Process,
typename = std::enable_if_t<
std::is_invocable_r_v<TraversalResult, Process, NodeId>>>
static void traverse(int8_t l, code_t code, size_t maxHeight, Process&& process) noexcept {
assert_invariant(maxHeight < height());
const int8_t h = int8_t(maxHeight);
stack<NodeId, 4 * height()> stack;
stack.push({ l, code });
while (!stack.empty()) {
NodeId curr = stack.back();
stack.pop();
TraversalResult r = process(curr);
if (r == TraversalResult::EXIT) {
break;
}
if (r == TraversalResult::RECURSE && curr.l < h) {
for (size_t c = 0; c < 4; c++) {
stack.push({
int8_t(curr.l + 1u),
code_t((curr.code << 2u) | (3u - c))
});
}
}
}
}
template<typename Node,
typename = std::enable_if_t<
std::is_invocable_r_v<TraversalResult, Node, NodeId>>>
static void traverse(int8_t l, code_t code, Node&& process) noexcept {
traverse(l, code, height() - 1, std::forward<Node>(process));
}
};
} // namespace utils
#endif //TNT_UTILS_QUADTREE_H

View File

@@ -0,0 +1,88 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_RANGE_H
#define TNT_UTILS_RANGE_H
#include <iterator>
#include <stddef.h>
namespace utils {
template<typename T>
struct Range {
using value_type = T;
T first = 0;
T last = 0; // this actually refers to one past the last
size_t size() const noexcept { return last - first; }
bool empty() const noexcept { return !size(); }
bool contains(const T& t) const noexcept { return first <= t && t < last; }
bool overlaps(const Range<T>& that) const noexcept {
return (that.first < this->last) && (that.last > this->first);
}
class const_iterator {
friend struct Range;
T value = {};
public:
const_iterator() noexcept = default;
explicit const_iterator(T value) noexcept : value(value) {}
using value_type = T;
using pointer = value_type*;
using difference_type = ptrdiff_t;
using iterator_category = std::random_access_iterator_tag;
const value_type operator*() const { return value; }
const value_type operator[](size_t n) const { return value + n; }
const_iterator& operator++() { ++value; return *this; }
const_iterator& operator--() { --value; return *this; }
const const_iterator operator++(int) { const_iterator t(value); value++; return t; }
const const_iterator operator--(int) { const_iterator t(value); value--; return t; }
const_iterator operator+(size_t rhs) const { return { value + rhs }; }
const_iterator operator+(size_t rhs) { return { value + rhs }; }
const_iterator operator-(size_t rhs) const { return { value - rhs }; }
difference_type operator-(const_iterator const& rhs) const { return value - rhs.value; }
bool operator==(const_iterator const& rhs) const { return (value == rhs.value); }
bool operator!=(const_iterator const& rhs) const { return (value != rhs.value); }
bool operator>=(const_iterator const& rhs) const { return (value >= rhs.value); }
bool operator> (const_iterator const& rhs) const { return (value > rhs.value); }
bool operator<=(const_iterator const& rhs) const { return (value <= rhs.value); }
bool operator< (const_iterator const& rhs) const { return (value < rhs.value); }
};
const_iterator begin() noexcept { return const_iterator{ first }; }
const_iterator end() noexcept { return const_iterator{ last }; }
const_iterator begin() const noexcept { return const_iterator{ first }; }
const_iterator end() const noexcept { return const_iterator{ last }; }
const_iterator front() const noexcept { return const_iterator{ first }; }
const_iterator back() const noexcept { return const_iterator{ last - 1 }; }
};
} // namespace utils
#endif // TNT_UTILS_RANGE_H

View File

@@ -0,0 +1,312 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_RANGEMAP_H
#define TNT_UTILS_RANGEMAP_H
#include <utils/Panic.h>
#include <utils/Range.h>
#include <utils/debug.h>
#include <map>
#include <utility>
#include <stddef.h>
namespace utils {
/**
* Sparse container for a series of ordered non-overlapping intervals.
*
* RangeMap has a low memory footprint if it contains fairly homogeneous data. Internally, the
* intervals are automatically split and merged as elements are added or removed.
*
* Each interval maps to an instance of ValueType, which should support cheap equality checks
* and copy assignment. (simple concrete types are ideal)
*
* KeyType should support operator< because intervals are internally sorted using std::map.
*/
template<typename KeyType, typename ValueType>
class RangeMap {
public:
/**
* Replaces all slots between first (inclusive) and last (exclusive).
*/
void add(KeyType first, KeyType last, const ValueType& value) noexcept {
// First check if an existing range contains "first".
Iterator iter = findRange(first);
if (iter != end()) {
const Range<KeyType> existing = getRange(iter);
// Check if the existing range be extended.
if (getValue(iter) == value) {
if (existing.last < last) {
wipe(existing.last, last);
iter = shrink(iter, existing.first, last);
mergeRight(iter);
}
return;
}
// Split the existing range into two ranges.
if (last < existing.last && first > existing.first) {
iter = shrink(iter, existing.first, first);
insert(first, last, value);
insert(last, existing.last, getValue(iter));
return;
}
clear(first, last);
insert(first, last, value);
return;
}
// Check if an existing range contains the end of the new range.
KeyType back = last;
iter = findRange(--back);
if (iter == end()) {
wipe(first, last);
insert(first, last, value);
return;
}
const Range<KeyType> existing = getRange(iter);
// Check if the existing range be extended.
if (getValue(iter) == value) {
if (existing.first > first) {
wipe(first, existing.first);
iter = shrink(iter, first, existing.last);
mergeLeft(iter);
}
return;
}
// Clip the beginning of the existing range and potentially remove it.
if (last < existing.last) {
shrink(iter, last, existing.last);
}
wipe(first, last);
insert(first, last, value);
}
/**
* Shorthand for the "add" method that inserts a single element.
*/
void set(KeyType key, const ValueType& value) noexcept {
KeyType begin = key;
add(begin, ++key, value);
}
/**
* Checks if a range exists that encompasses the given key.
*/
bool has(KeyType key) const noexcept {
return findRange(key) != mMap.end();
}
/**
* Retrieves the element at the given location, panics if no element exists.
*/
const ValueType& get(KeyType key) const {
ConstIterator iter = findRange(key);
FILAMENT_CHECK_PRECONDITION(iter != end())
<< "RangeMap: No element exists at the given key.";
return getValue(iter);
}
/**
* Removes all elements between begin (inclusive) and end (exclusive).
*/
void clear(KeyType first, KeyType last) noexcept {
// Check if an existing range contains "first".
Iterator iter = findRange(first);
if (iter != end()) {
const Range<KeyType> existing = getRange(iter);
// Split the existing range into two ranges.
if (last < existing.last && first > existing.first) {
iter = shrink(iter, existing.first, first);
insert(last, existing.last, getValue(iter));
return;
}
// Clip one of the ends of the existing range or remove it.
if (first > existing.first) {
shrink(iter, existing.first, first);
} else if (last < existing.last) {
shrink(iter, last, existing.last);
} else {
wipe(first, last);
}
// There might be another range that intersects the cleared range, so try again.
clear(first, last);
return;
}
// Check if an existing range contains the end of the new range.
KeyType back = last;
iter = findRange(--back);
if (iter == end()) {
wipe(first, last);
return;
}
const Range<KeyType> existing = getRange(iter);
// Clip the beginning of the existing range and potentially remove it.
if (last < existing.last) {
shrink(iter, last, existing.last);
}
wipe(first, last);
}
/**
* Shorthand for the "clear" method that clears a single element.
*/
void reset(KeyType key) noexcept {
KeyType begin = key;
clear(begin, ++key);
}
/**
* Returns the number of internal interval objects (rarely used).
*/
size_t rangeCount() const noexcept { return mMap.size(); }
private:
using Map = std::map<KeyType, std::pair<Range<KeyType>, ValueType>>;
using Iterator = typename Map::iterator;
using ConstIterator = typename Map::const_iterator;
ConstIterator begin() const noexcept { return mMap.begin(); }
ConstIterator end() const noexcept { return mMap.end(); }
Iterator begin() noexcept { return mMap.begin(); }
Iterator end() noexcept { return mMap.end(); }
Range<KeyType>& getRange(Iterator iter) const { return iter->second.first; }
ValueType& getValue(Iterator iter) const { return iter->second.second; }
const Range<KeyType>& getRange(ConstIterator iter) const { return iter->second.first; }
const ValueType& getValue(ConstIterator iter) const { return iter->second.second; }
// Private helper that assumes there is no existing range that overlaps the given range.
void insert(KeyType first, KeyType last, const ValueType& value) noexcept {
assert_invariant(!has(first));
assert_invariant(!has(last - 1));
// Check if there is an adjacent range to the left than can be extended.
KeyType previous = first;
if (Iterator iter = findRange(--previous); iter != end() && getValue(iter) == value) {
getRange(iter).last = last;
mergeRight(iter);
return;
}
// Check if there is an adjacent range to the right than can be extended.
if (Iterator iter = findRange(last); iter != end() && getValue(iter) == value) {
getRange(iter).first = first;
return;
}
mMap[first] = {Range<KeyType> { first, last }, value};
}
// Private helper that erases all intervals that are wholly contained within the given range.
// Note that this is quite different from the public "clear" method.
void wipe(KeyType first, KeyType last) noexcept {
// Find the first range whose beginning is greater than or equal to "first".
Iterator iter = mMap.lower_bound(first);
while (iter != end() && getRange(iter).first < last) {
KeyType existing_last = getRange(iter).last;
if (existing_last > last) {
break;
}
iter = mMap.erase(iter);
}
}
// Checks if there is range to the right that touches the given range.
// If so, erases it, extends the given range rightwards, and returns true.
bool mergeRight(Iterator iter) {
Iterator next = iter;
if (++next == end() || getValue(next) != getValue(iter)) {
return false;
}
if (getRange(next).first != getRange(iter).last) {
return false;
}
getRange(iter).last = getRange(next).last;
mMap.erase(next);
return true;
}
// Checks if there is range to the left that touches the given range.
// If so, erases it, extends the given range leftwards, and returns true.
bool mergeLeft(Iterator iter) {
Iterator prev = iter;
if (--prev == end() || getValue(prev) != getValue(iter)) {
return false;
}
if (getRange(prev).last != getRange(iter).first) {
return false;
}
getRange(iter).first = getRange(prev).first;
mMap.erase(prev);
return true;
}
// Private helper that clips one end of an existing range.
Iterator shrink(Iterator iter, KeyType first, KeyType last) {
assert_invariant(first < last);
assert_invariant(getRange(iter).first == first || getRange(iter).last == last);
std::pair<Range<KeyType>, ValueType> value = {{first, last}, iter->second.second};
mMap.erase(iter);
return mMap.insert({first, value}).first;
}
// If the given key is encompassed by an existing range, returns an iterator for that range.
// If no encompassing range exists, returns end().
ConstIterator findRange(KeyType key) const noexcept {
return findRangeT<ConstIterator>(*this, key);
}
// If the given key is encompassed by an existing range, returns an iterator for that range.
// If no encompassing range exists, returns end().
Iterator findRange(KeyType key) noexcept {
return findRangeT<Iterator>(*this, key);
}
// This template method allows us to avoid code duplication for const and non-const variants of
// findRange. C++17 has "std::as_const()" but that would not be helpful here, as we would still
// need to convert a const iterator to a non-const iterator.
template<typename IteratorType, typename SelfType>
static IteratorType findRangeT(SelfType& instance, KeyType key) noexcept {
// Find the first range whose beginning is greater than or equal to the given key.
IteratorType iter = instance.mMap.lower_bound(key);
if (iter != instance.end() && instance.getRange(iter).contains(key)) {
return iter;
}
// If that was the first range, or if the map is empty, return false.
if (iter == instance.begin()) {
return instance.end();
}
// Check the range immediately previous to the one that was found.
return instance.getRange(--iter).contains(key) ? iter : instance.end();
}
// This maps from the start value of each range to the range itself.
Map mMap;
};
} // namespace utils
#endif // TNT_UTILS_RANGEMAP_H

View File

@@ -0,0 +1,105 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_STOPWATCH_H
#define TNT_UTILS_STOPWATCH_H
#include <utils/Log.h>
#include <utils/ostream.h>
#include <chrono>
#include <limits>
#include <ratio>
#include <stddef.h>
namespace utils {
/*
* A very basic Stopwatch class
*/
template<typename Clock = std::chrono::steady_clock>
class Stopwatch {
public:
using duration = typename Clock::duration;
using time_point = typename Clock::time_point;
private:
time_point mStart;
duration mMinLap = std::numeric_limits<duration>::max();
duration mMaxLap{};
std::chrono::duration<double, std::nano> mAvgLap{};
size_t mCount = 0;
const char* mName = nullptr;
public:
// Create a Stopwatch with a name and clock
explicit Stopwatch(const char* name) noexcept: mName(name) {}
// Logs min/avg/max lap time
~Stopwatch() noexcept;
// start the stopwatch
inline void start() noexcept {
mStart = Clock::now();
}
// stop the stopwatch
inline void stop() noexcept {
auto d = Clock::now() - mStart;
mMinLap = std::min(mMinLap, d);
mMaxLap = std::max(mMaxLap, d);
mAvgLap = (mAvgLap * mCount + d) / (mCount + 1);
mCount++;
}
// get the minimum lap time recorded
duration getMinLapTime() const noexcept { return mMinLap; }
// get the maximum lap time recorded
duration getMaxLapTime() const noexcept { return mMaxLap; }
// get the average lap time
duration getAverageLapTime() const noexcept { return mAvgLap; }
};
template<typename Clock>
Stopwatch<Clock>::~Stopwatch() noexcept {
slog.d << "Stopwatch \"" << mName << "\" : ["
<< mMinLap.count() << ", "
<< std::chrono::duration_cast<duration>(mAvgLap).count() << ", "
<< mMaxLap.count() << "] ns" << io::endl;
}
/*
* AutoStopwatch can be used to start and stop a Stopwatch automatically
* when entering and exiting a scope.
*/
template<typename Stopwatch>
class AutoStopwatch {
Stopwatch& stopwatch;
public:
inline explicit AutoStopwatch(Stopwatch& stopwatch) noexcept: stopwatch(stopwatch) {
stopwatch.start();
}
inline ~AutoStopwatch() noexcept { stopwatch.stop(); }
};
} // namespace utils
#endif // TNT_UTILS_STOPWATCH_H

View File

@@ -0,0 +1,26 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_THERMALMANAGER_H
#define TNT_UTILS_THERMALMANAGER_H
#if defined(__ANDROID__)
#include <utils/android/ThermalManager.h>
#else
#include <utils/generic/ThermalManager.h>
#endif
#endif // TNT_UTILS_THERMALMANAGER_H

View File

@@ -0,0 +1,32 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_THREADUTILS_H
#define TNT_UTILS_THREADUTILS_H
#include <thread>
namespace utils {
class ThreadUtils {
public:
static std::thread::id getThreadId() noexcept;
static bool isThisThread(std::thread::id id) noexcept;
};
} // namespace utils
#endif // TNT_UTILS_THREADUTILS_H

View File

@@ -0,0 +1,216 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_WORKSTEALINGDEQUEUE_H
#define TNT_UTILS_WORKSTEALINGDEQUEUE_H
#include <atomic>
#include <type_traits>
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
namespace utils {
/*
* A templated, lockless, fixed-size work-stealing dequeue
*
*
* top bottom
* v v
* |----|----|----|----|----|----|
* steal() push(), pop()
* any thread main thread
*
* References:
* - This code is largely inspired from
* https://blog.molecular-matters.com/2015/09/25/job-system-2-0-lock-free-work-stealing-part-3-going-lock-free/
* - other implementations
* https://github.com/ConorWilliams/ConcurrentDeque/blob/main/include/riften/deque.hpp
* https://github.com/ssbl/concurrent-deque/blob/master/include/deque.hpp
* https://github.com/taskflow/work-stealing-queue/blob/master/wsq.hpp
*/
template <typename TYPE, size_t COUNT>
class WorkStealingDequeue {
static_assert(!(COUNT & (COUNT - 1)), "COUNT must be a power of two");
static constexpr size_t MASK = COUNT - 1;
// mTop and mBottom must be signed integers. We use 64-bits atomics so we don't have
// to worry about wrapping around.
using index_t = int64_t;
std::atomic<index_t> mTop = { 0 }; // written/read in pop()/steal()
std::atomic<index_t> mBottom = { 0 }; // written only in pop(), read in push(), steal()
TYPE mItems[COUNT];
// NOTE: it's not safe to return a reference because getItemAt() can be called
// concurrently and the caller could std::move() the item unsafely.
TYPE getItemAt(index_t index) noexcept { return mItems[index & MASK]; }
void setItemAt(index_t index, TYPE item) noexcept { mItems[index & MASK] = item; }
public:
using value_type = TYPE;
inline void push(TYPE item) noexcept;
inline TYPE pop() noexcept;
inline TYPE steal() noexcept;
size_t getSize() const noexcept { return COUNT; }
// for debugging only...
size_t getCount() const noexcept {
index_t bottom = mBottom.load(std::memory_order_relaxed);
index_t top = mTop.load(std::memory_order_relaxed);
return bottom - top;
}
};
/*
* Adds an item at the BOTTOM of the queue.
*
* Must be called from the main thread.
*/
template <typename TYPE, size_t COUNT>
void WorkStealingDequeue<TYPE, COUNT>::push(TYPE item) noexcept {
// std::memory_order_relaxed is sufficient because this load doesn't acquire anything from
// another thread. mBottom is only written in pop() which cannot be concurrent with push()
index_t bottom = mBottom.load(std::memory_order_relaxed);
setItemAt(bottom, item);
// Here we need std::memory_order_release because we release, the item we just pushed, to other
// threads which are calling steal().
// However, generally seq_cst cannot be mixed with other memory orders. So we must use seq_cst.
// see: https://plv.mpi-sws.org/scfix/paper.pdf
mBottom.store(bottom + 1, std::memory_order_seq_cst);
}
/*
* Removes an item from the BOTTOM of the queue.
*
* Must be called from the main thread.
*/
template <typename TYPE, size_t COUNT>
TYPE WorkStealingDequeue<TYPE, COUNT>::pop() noexcept {
// std::memory_order_seq_cst is needed to guarantee ordering in steal()
// Note however that this is not a typical acquire/release operation:
// - not acquire because mBottom is only written in push() which is not concurrent
// - not release because we're not publishing anything to steal() here
//
// QUESTION: does this prevent mTop load below to be reordered before the "store" part of
// fetch_sub()? Hopefully it does. If not we'd need a full memory barrier.
//
index_t bottom = mBottom.fetch_sub(1, std::memory_order_seq_cst) - 1;
// bottom could be -1 if we tried to pop() from an empty queue. This will be corrected below.
assert( bottom >= -1 );
// std::memory_order_seq_cst is needed to guarantee ordering in steal()
// Note however that this is not a typical acquire operation
// (i.e. other thread's writes of mTop don't publish data)
index_t top = mTop.load(std::memory_order_seq_cst);
if (top < bottom) {
// Queue isn't empty, and it's not the last item, just return it, this is the common case.
return getItemAt(bottom);
}
TYPE item{};
if (top == bottom) {
// we just took the last item
item = getItemAt(bottom);
// Because we know we took the last item, we could be racing with steal() -- the last
// item being both at the top and bottom of the queue.
// We resolve this potential race by also stealing that item from ourselves.
if (mTop.compare_exchange_strong(top, top + 1,
std::memory_order_seq_cst,
std::memory_order_relaxed)) {
// Success: we stole our last item from ourselves, meaning that a concurrent steal()
// would have failed.
// mTop now equals top + 1, we adjust top to make the queue empty.
top++;
} else {
// Failure: mTop was not equal to top, which means the item was stolen under our feet.
// `top` now equals to mTop. Simply discard the item we just popped.
// The queue is now empty.
item = TYPE();
}
} else {
// We could be here if the item was stolen just before we read mTop, we'll adjust
// mBottom below.
assert(top - bottom == 1);
}
// Here, we only need std::memory_order_relaxed because we're not publishing any data.
// No concurrent writes to mBottom possible, it's always safe to write mBottom.
// However, generally seq_cst cannot be mixed with other memory orders. So we must use seq_cst.
// see: https://plv.mpi-sws.org/scfix/paper.pdf
mBottom.store(top, std::memory_order_seq_cst);
return item;
}
/*
* Steals an item from the TOP of another thread's queue.
*
* This can be called concurrently with steal(), push() or pop()
*
* steal() never fails, either there is an item and it atomically takes it, or there isn't and
* it returns an empty item.
*/
template <typename TYPE, size_t COUNT>
TYPE WorkStealingDequeue<TYPE, COUNT>::steal() noexcept {
while (true) {
/*
* Note: A Key component of this algorithm is that mTop is read before mBottom here
* (and observed as such in other threads)
*/
// std::memory_order_seq_cst is needed to guarantee ordering in pop()
// Note however that this is not a typical acquire operation
// (i.e. other thread's writes of mTop don't publish data)
index_t top = mTop.load(std::memory_order_seq_cst);
// std::memory_order_acquire is needed because we're acquiring items published in push().
// std::memory_order_seq_cst is needed to guarantee ordering in pop()
index_t bottom = mBottom.load(std::memory_order_seq_cst);
if (top >= bottom) {
// queue is empty
return TYPE();
}
// The queue isn't empty
TYPE item(getItemAt(top));
if (mTop.compare_exchange_strong(top, top + 1,
std::memory_order_seq_cst,
std::memory_order_relaxed)) {
// success: we stole an item, just return it.
return item;
}
// failure: the item we just tried to steal was pop()'ed under our feet,
// simply discard it; nothing to do -- it's okay to try again.
// However, item might be corrupted, so it must be trivially destructible
static_assert(std::is_trivially_destructible_v<TYPE>);
}
}
} // namespace utils
#endif // TNT_UTILS_WORKSTEALINGDEQUEUE_H

View File

@@ -0,0 +1,123 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_ZIP2ITERATOR_H
#define TNT_UTILS_ZIP2ITERATOR_H
#include <iterator>
#include <type_traits>
#include <utility>
#include <stddef.h>
namespace utils {
/*
* A random access iterator that wraps two other random access iterators.
* This mostly exists so that one can sort an array using values from another.
*/
template<typename It1, typename It2>
class Zip2Iterator {
std::pair<It1, It2> mIt;
using Ref1 = typename std::iterator_traits<It1>::reference;
using Ref2 = typename std::iterator_traits<It2>::reference;
using Val1 = typename std::iterator_traits<It1>::value_type;
using Val2 = typename std::iterator_traits<It2>::value_type;
public:
struct Ref : public std::pair<Ref1, Ref2> {
using std::pair<Ref1, Ref2>::pair;
using std::pair<Ref1, Ref2>::operator=;
private:
friend void swap(Ref lhs, Ref rhs) {
using std::swap;
swap(lhs.first, rhs.first);
swap(lhs.second, rhs.second);
}
};
using value_type = std::pair<Val1, Val2>;
using reference = Ref;
using pointer = value_type*;
using difference_type = ptrdiff_t;
using iterator_category = std::random_access_iterator_tag;
Zip2Iterator() = default;
Zip2Iterator(It1 first, It2 second) : mIt({first, second}) {}
Zip2Iterator(Zip2Iterator const& rhs) noexcept = default;
Zip2Iterator& operator=(Zip2Iterator const& rhs) = default;
reference operator*() const { return { *mIt.first, *mIt.second }; }
reference operator[](size_t n) const { return *(*this + n); }
Zip2Iterator& operator++() {
++mIt.first;
++mIt.second;
return *this;
}
Zip2Iterator& operator--() {
--mIt.first;
--mIt.second;
return *this;
}
// Postfix operator needed by Microsoft C++
const Zip2Iterator operator++(int) {
Zip2Iterator t(*this);
mIt.first++;
mIt.second++;
return t;
}
const Zip2Iterator operator--(int) {
Zip2Iterator t(*this);
mIt.first--;
mIt.second--;
return t;
}
Zip2Iterator& operator+=(size_t v) {
mIt.first += v;
mIt.second += v;
return *this;
}
Zip2Iterator& operator-=(size_t v) {
mIt.first -= v;
mIt.second -= v;
return *this;
}
Zip2Iterator operator+(size_t rhs) const { return { mIt.first + rhs, mIt.second + rhs }; }
Zip2Iterator operator+(size_t rhs) { return { mIt.first + rhs, mIt.second + rhs }; }
Zip2Iterator operator-(size_t rhs) const { return { mIt.first - rhs, mIt.second - rhs }; }
difference_type operator-(Zip2Iterator const& rhs) const { return mIt.first - rhs.mIt.first; }
bool operator==(Zip2Iterator const& rhs) const { return (mIt.first == rhs.mIt.first); }
bool operator!=(Zip2Iterator const& rhs) const { return (mIt.first != rhs.mIt.first); }
bool operator>=(Zip2Iterator const& rhs) const { return (mIt.first >= rhs.mIt.first); }
bool operator> (Zip2Iterator const& rhs) const { return (mIt.first > rhs.mIt.first); }
bool operator<=(Zip2Iterator const& rhs) const { return (mIt.first <= rhs.mIt.first); }
bool operator< (Zip2Iterator const& rhs) const { return (mIt.first < rhs.mIt.first); }
};
} // namespace utils
#endif // TNT_UTILS_ZIP2ITERATOR_H

View File

@@ -0,0 +1,74 @@
/*
* Copyright (C) 2024 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_ANDROID_PERFORMANCEHINTMANAGER_H
#define TNT_UTILS_ANDROID_PERFORMANCEHINTMANAGER_H
#include <utils/compiler.h>
#include <utils/PrivateImplementation.h>
#include <stddef.h>
#include <stdint.h>
namespace utils {
namespace details {
struct PerformanceHintManager;
} // namespace details
class UTILS_PUBLIC PerformanceHintManager :
private PrivateImplementation<details::PerformanceHintManager> {
friend struct details::PerformanceHintManager;
struct SessionDetails;
public:
class UTILS_PUBLIC Session : PrivateImplementation<SessionDetails> {
friend class PerformanceHintManager;
friend struct SessionDetails;
public:
Session() noexcept;
Session(PerformanceHintManager& manager,
int32_t const* threadIds, size_t size,
int64_t initialTargetWorkDurationNanos) noexcept;
~Session() noexcept;
Session(Session&& rhs) noexcept;
Session& operator=(Session&& rhs) noexcept;
Session(Session const& rhs) = delete;
Session& operator=(Session const& rhs) = delete;
bool isValid() const;
int updateTargetWorkDuration(int64_t targetDurationNanos) noexcept;
int reportActualWorkDuration(int64_t actualDurationNanos) noexcept;
};
// caveat: This must be called on a Java thread
PerformanceHintManager() noexcept;
~PerformanceHintManager() noexcept;
// Whether PerformanceHintManager APIs are supported (which doesn't mean PerformanceHintManager
// itself is, use isValid()).
static bool isSupported() noexcept;
// Whether PerformanceHintManager can be used.
bool isValid() const;
int64_t getPreferredUpdateRateNanos() const noexcept;
};
} // namespace utils
#endif //TNT_UTILS_ANDROID_PERFORMANCEHINTMANAGER_H

View File

@@ -0,0 +1,242 @@
/*
* Copyright (C) 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_ANDROID_SYSTRACE_H
#define TNT_UTILS_ANDROID_SYSTRACE_H
#include <atomic>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <utils/compiler.h>
// enable tracing
#define SYSTRACE_ENABLE() ::utils::details::Systrace::enable(SYSTRACE_TAG)
// disable tracing
#define SYSTRACE_DISABLE() ::utils::details::Systrace::disable(SYSTRACE_TAG)
/**
* Creates a Systrace context in the current scope. needed for calling all other systrace
* commands below.
*/
#define SYSTRACE_CONTEXT() ::utils::details::Systrace ___trctx(SYSTRACE_TAG)
// SYSTRACE_NAME traces the beginning and end of the current scope. To trace
// the correct start and end times this macro should be declared first in the
// scope body.
// It also automatically creates a Systrace context
#define SYSTRACE_NAME(name) ::utils::details::ScopedTrace ___tracer(SYSTRACE_TAG, name)
// Denotes that a new frame has started processing.
#define SYSTRACE_FRAME_ID(frame) \
{ /* scope for frame id trace */ \
char buf[64]; \
snprintf(buf, 64, "frame %u", frame); \
SYSTRACE_NAME(buf); \
}
// SYSTRACE_CALL is an SYSTRACE_NAME that uses the current function name.
#define SYSTRACE_CALL() SYSTRACE_NAME(__FUNCTION__)
#define SYSTRACE_NAME_BEGIN(name) \
___trctx.traceBegin(SYSTRACE_TAG, name)
#define SYSTRACE_NAME_END() \
___trctx.traceEnd(SYSTRACE_TAG)
/**
* Trace the beginning of an asynchronous event. Unlike ATRACE_BEGIN/ATRACE_END
* contexts, asynchronous events do not need to be nested. The name describes
* the event, and the cookie provides a unique identifier for distinguishing
* simultaneous events. The name and cookie used to begin an event must be
* used to end it.
*/
#define SYSTRACE_ASYNC_BEGIN(name, cookie) \
___trctx.asyncBegin(SYSTRACE_TAG, name, cookie)
/**
* Trace the end of an asynchronous event.
* This should have a corresponding SYSTRACE_ASYNC_BEGIN.
*/
#define SYSTRACE_ASYNC_END(name, cookie) \
___trctx.asyncEnd(SYSTRACE_TAG, name, cookie)
/**
* Traces an integer counter value. name is used to identify the counter.
* This can be used to track how a value changes over time.
*/
#define SYSTRACE_VALUE32(name, val) \
___trctx.value(SYSTRACE_TAG, name, int32_t(val))
#define SYSTRACE_VALUE64(name, val) \
___trctx.value(SYSTRACE_TAG, name, int64_t(val))
// ------------------------------------------------------------------------------------------------
// No user serviceable code below...
// ------------------------------------------------------------------------------------------------
namespace utils {
namespace details {
class UTILS_PUBLIC Systrace {
public:
enum tags {
NEVER = SYSTRACE_TAG_NEVER,
ALWAYS = SYSTRACE_TAG_ALWAYS,
FILAMENT = SYSTRACE_TAG_FILAMENT,
JOBSYSTEM = SYSTRACE_TAG_JOBSYSTEM
// we could define more TAGS here, as we need them.
};
explicit Systrace(uint32_t tag) noexcept {
if (tag) init(tag);
}
static void enable(uint32_t tags) noexcept;
static void disable(uint32_t tags) noexcept;
inline void traceBegin(uint32_t tag, const char* name) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
beginSection(this, name);
}
}
inline void traceEnd(uint32_t tag) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
endSection(this);
}
}
inline void asyncBegin(uint32_t tag, const char* name, int32_t cookie) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
beginAsyncSection(this, name, cookie);
}
}
inline void asyncEnd(uint32_t tag, const char* name, int32_t cookie) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
endAsyncSection(this, name, cookie);
}
}
inline void value(uint32_t tag, const char* name, int32_t value) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
setCounter(this, name, value);
}
}
inline void value(uint32_t tag, const char* name, int64_t value) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
setCounter(this, name, value);
}
}
private:
friend class ScopedTrace;
// whether tracing is supported at all by the platform
using ATrace_isEnabled_t = bool (*)();
using ATrace_beginSection_t = void (*)(const char* sectionName);
using ATrace_endSection_t = void (*)();
using ATrace_beginAsyncSection_t = void (*)(const char* sectionName, int32_t cookie);
using ATrace_endAsyncSection_t = void (*)(const char* sectionName, int32_t cookie);
using ATrace_setCounter_t = void (*)(const char* counterName, int64_t counterValue);
struct GlobalState {
bool isTracingAvailable;
std::atomic<uint32_t> isTracingEnabled;
int markerFd;
ATrace_isEnabled_t ATrace_isEnabled;
ATrace_beginSection_t ATrace_beginSection;
ATrace_endSection_t ATrace_endSection;
ATrace_beginAsyncSection_t ATrace_beginAsyncSection;
ATrace_endAsyncSection_t ATrace_endAsyncSection;
ATrace_setCounter_t ATrace_setCounter;
void (*beginSection)(Systrace* that, const char* name);
void (*endSection)(Systrace* that);
void (*beginAsyncSection)(Systrace* that, const char* name, int32_t cookie);
void (*endAsyncSection)(Systrace* that, const char* name, int32_t cookie);
void (*setCounter)(Systrace* that, const char* name, int64_t value);
};
static GlobalState sGlobalState;
// per-instance versions for better performance
ATrace_isEnabled_t ATrace_isEnabled;
ATrace_beginSection_t ATrace_beginSection;
ATrace_endSection_t ATrace_endSection;
ATrace_beginAsyncSection_t ATrace_beginAsyncSection;
ATrace_endAsyncSection_t ATrace_endAsyncSection;
ATrace_setCounter_t ATrace_setCounter;
void (*beginSection)(Systrace* that, const char* name);
void (*endSection)(Systrace* that);
void (*beginAsyncSection)(Systrace* that, const char* name, int32_t cookie);
void (*endAsyncSection)(Systrace* that, const char* name, int32_t cookie);
void (*setCounter)(Systrace* that, const char* name, int64_t value);
void init(uint32_t tag) noexcept;
// cached values for faster access, no need to be initialized
bool mIsTracingEnabled;
int mMarkerFd = -1;
pid_t mPid;
static void setup() noexcept;
static void init_once() noexcept;
static bool isTracingEnabled(uint32_t tag) noexcept;
static void begin_body(int fd, int pid, const char* name) noexcept;
static void end_body(int fd, int pid) noexcept;
static void async_begin_body(int fd, int pid, const char* name, int32_t cookie) noexcept;
static void async_end_body(int fd, int pid, const char* name, int32_t cookie) noexcept;
static void int64_body(int fd, int pid, const char* name, int64_t value) noexcept;
};
// ------------------------------------------------------------------------------------------------
class UTILS_PUBLIC ScopedTrace {
public:
// we don't inline this because it's relatively heavy due to a global check
ScopedTrace(uint32_t tag, const char* name) noexcept: mTrace(tag), mTag(tag) {
mTrace.traceBegin(tag, name);
}
inline ~ScopedTrace() noexcept {
mTrace.traceEnd(mTag);
}
private:
Systrace mTrace;
const uint32_t mTag;
};
} // namespace details
} // namespace utils
#endif // TNT_UTILS_ANDROID_SYSTRACE_H

View File

@@ -0,0 +1,58 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_ANDROID_THERMALMANAGER_H
#define TNT_UTILS_ANDROID_THERMALMANAGER_H
#include <stdint.h>
struct AThermalManager;
namespace utils {
class ThermalManager {
public:
enum class ThermalStatus : int8_t {
ERROR = -1,
NONE,
LIGHT,
MODERATE,
SEVERE,
CRITICAL,
EMERGENCY,
SHUTDOWN
};
ThermalManager();
~ThermalManager();
// Movable
ThermalManager(ThermalManager&& rhs) noexcept;
ThermalManager& operator=(ThermalManager&& rhs) noexcept;
// not copiable
ThermalManager(ThermalManager const& rhs) = delete;
ThermalManager& operator=(ThermalManager const& rhs) = delete;
ThermalStatus getCurrentThermalStatus() const noexcept;
private:
AThermalManager* mThermalManager = nullptr;
};
} // namespace utils
#endif // TNT_UTILS_ANDROID_THERMALMANAGER_H

View File

@@ -0,0 +1,34 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_APILEVEL_H
#define TNT_UTILS_APILEVEL_H
#include <utils/compiler.h>
namespace utils {
/**
* Returns this platform's API level. On Android this function will return
* the API level as defined by the SDK API level version. If a platform does
* not have an API level, this function returns 0.
*/
UTILS_PUBLIC
int api_level();
} // namespace utils
#endif // TNT_UTILS_APILEVEL_H

View File

@@ -0,0 +1,33 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_ARCHITECTURE_H
#define TNT_UTILS_ARCHITECTURE_H
#include <stddef.h>
namespace utils {
constexpr size_t CACHELINE_SIZE = 64;
namespace arch {
size_t getPageSize() noexcept;
} // namespace arch
} // namespace utils
#endif // TNT_UTILS_ARCHITECTURE_H

View File

@@ -0,0 +1,28 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_ASHMEM_H
#define TNT_UTILS_ASHMEM_H
#include <stddef.h>
namespace utils {
int ashmem_create_region(const char *name, size_t size);
} // namespace utils
#endif // TNT_UTILS_ASHMEM_H

View File

@@ -0,0 +1,243 @@
/*
* Copyright (C) 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_DARWIN_SYSTRACE_H
#define TNT_UTILS_DARWIN_SYSTRACE_H
#include <atomic>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <os/log.h>
#include <os/signpost.h>
#include <utils/compiler.h>
#include <stack>
// enable tracing
#define SYSTRACE_ENABLE() ::utils::details::Systrace::enable(SYSTRACE_TAG)
// disable tracing
#define SYSTRACE_DISABLE() ::utils::details::Systrace::disable(SYSTRACE_TAG)
/**
* Creates a Systrace context in the current scope. needed for calling all other systrace
* commands below.
*/
#define SYSTRACE_CONTEXT() ::utils::details::Systrace ___tracer(SYSTRACE_TAG)
// SYSTRACE_NAME traces the beginning and end of the current scope. To trace
// the correct start and end times this macro should be declared first in the
// scope body.
// It also automatically creates a Systrace context
#define SYSTRACE_NAME(name) ::utils::details::ScopedTrace ___tracer(SYSTRACE_TAG, name)
// Denotes that a new frame has started processing.
#define SYSTRACE_FRAME_ID(frame) \
::utils::details::Systrace(SYSTRACE_TAG).frameId(SYSTRACE_TAG, frame)
extern thread_local std::stack<const char*> ___tracerSections;
// SYSTRACE_CALL is an SYSTRACE_NAME that uses the current function name.
#define SYSTRACE_CALL() SYSTRACE_NAME(__PRETTY_FUNCTION__)
#define SYSTRACE_NAME_BEGIN(name) \
___tracerSections.push(name) , \
___tracer.traceBegin(SYSTRACE_TAG, name)
#define SYSTRACE_NAME_END() \
___tracer.traceEnd(SYSTRACE_TAG, ___tracerSections.top()) , \
___tracerSections.pop()
/**
* Trace the beginning of an asynchronous event. Unlike ATRACE_BEGIN/ATRACE_END
* contexts, asynchronous events do not need to be nested. The name describes
* the event, and the cookie provides a unique identifier for distinguishing
* simultaneous events. The name and cookie used to begin an event must be
* used to end it.
*/
#define SYSTRACE_ASYNC_BEGIN(name, cookie) \
___tracer.asyncBegin(SYSTRACE_TAG, name, cookie)
/**
* Trace the end of an asynchronous event.
* This should have a corresponding SYSTRACE_ASYNC_BEGIN.
*/
#define SYSTRACE_ASYNC_END(name, cookie) \
___tracer.asyncEnd(SYSTRACE_TAG, name, cookie)
/**
* Traces an integer counter value. name is used to identify the counter.
* This can be used to track how a value changes over time.
*/
#define SYSTRACE_VALUE32(name, val) \
___tracer.value(SYSTRACE_TAG, name, int32_t(val))
#define SYSTRACE_VALUE64(name, val) \
___tracer.value(SYSTRACE_TAG, name, int64_t(val))
// ------------------------------------------------------------------------------------------------
// No user serviceable code below...
// ------------------------------------------------------------------------------------------------
// This is an alternative to os_signpost_emit_with_type that allows non-compile time strings (namely
// for us, __FUNCTION__).
// The trade-off is that this doesn't allow messages to have printf-style formatting.
// It's fine to pass an empty string to __builtin_os_log_format_buffer_size and
// __builtin_os_log_format, because they return the same value for strings without any format
// specifiers.
// This is fragile, so should only be used to assist debugging and never in production.
#define APPLE_SIGNPOST_EMIT(log, type, spid, name, message) \
if (os_signpost_enabled(log)) { \
uint8_t _os_fmt_buf[__builtin_os_log_format_buffer_size("")]; \
_os_signpost_emit_with_name_impl( \
&__dso_handle, log, type, spid, \
name, message, \
(uint8_t *)__builtin_os_log_format(_os_fmt_buf, ""), \
(uint32_t)sizeof(_os_fmt_buf)); \
}
namespace utils {
namespace details {
class Systrace {
public:
enum tags {
NEVER = SYSTRACE_TAG_NEVER,
ALWAYS = SYSTRACE_TAG_ALWAYS,
FILAMENT = SYSTRACE_TAG_FILAMENT,
JOBSYSTEM = SYSTRACE_TAG_JOBSYSTEM
// we could define more TAGS here, as we need them.
};
explicit Systrace(uint32_t tag) noexcept {
if (tag) init(tag);
}
static void enable(uint32_t tags) noexcept;
static void disable(uint32_t tags) noexcept;
inline void traceBegin(uint32_t tag, const char* name) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
APPLE_SIGNPOST_EMIT(sGlobalState.systraceLog, OS_SIGNPOST_INTERVAL_BEGIN,
OS_SIGNPOST_ID_EXCLUSIVE, name, name)
}
}
inline void traceEnd(uint32_t tag, const char* name) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
APPLE_SIGNPOST_EMIT(sGlobalState.systraceLog, OS_SIGNPOST_INTERVAL_END,
OS_SIGNPOST_ID_EXCLUSIVE, name, "")
}
}
inline void asyncBegin(uint32_t tag, const char* name, int32_t cookie) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
// TODO
}
}
inline void asyncEnd(uint32_t tag, const char* name, int32_t cookie) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
// TODO
}
}
inline void value(uint32_t tag, const char* name, int32_t value) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
char buf[64];
snprintf(buf, 64, "%s - %d", name, value);
APPLE_SIGNPOST_EMIT(sGlobalState.systraceLog, OS_SIGNPOST_EVENT,
OS_SIGNPOST_ID_EXCLUSIVE, name, buf)
}
}
inline void value(uint32_t tag, const char* name, int64_t value) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
char buf[64];
snprintf(buf, 64, "%s - %lld", name, value);
APPLE_SIGNPOST_EMIT(sGlobalState.systraceLog, OS_SIGNPOST_EVENT,
OS_SIGNPOST_ID_EXCLUSIVE, name, buf)
}
}
inline void frameId(uint32_t tag, uint32_t frame) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
char buf[64]; \
snprintf(buf, 64, "frame %u", frame); \
APPLE_SIGNPOST_EMIT(sGlobalState.frameIdLog, OS_SIGNPOST_EVENT,
OS_SIGNPOST_ID_EXCLUSIVE, "frame", buf)
}
}
private:
friend class ScopedTrace;
struct GlobalState {
std::atomic<uint32_t> isTracingEnabled;
os_log_t systraceLog;
os_log_t frameIdLog;
};
static GlobalState sGlobalState;
void init(uint32_t tag) noexcept;
// cached values for faster access, no need to be initialized
bool mIsTracingEnabled;
static void setup() noexcept;
static void init_once() noexcept;
static bool isTracingEnabled(uint32_t tag) noexcept;
};
// ------------------------------------------------------------------------------------------------
class ScopedTrace {
public:
// we don't inline this because it's relatively heavy due to a global check
ScopedTrace(uint32_t tag, const char* name) noexcept : mTrace(tag), mName(name), mTag(tag) {
mTrace.traceBegin(tag, name);
}
inline ~ScopedTrace() noexcept {
mTrace.traceEnd(mTag, mName);
}
inline void value(uint32_t tag, const char* name, int32_t v) noexcept {
mTrace.value(tag, name, v);
}
inline void value(uint32_t tag, const char* name, int64_t v) noexcept {
mTrace.value(tag, name, v);
}
private:
Systrace mTrace;
const char* mName;
const uint32_t mTag;
};
} // namespace details
} // namespace utils
#endif // TNT_UTILS_DARWIN_SYSTRACE_H

View File

@@ -0,0 +1,41 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_GENERIC_CONDITION_H
#define TNT_UTILS_GENERIC_CONDITION_H
#include <condition_variable>
#include <stddef.h>
namespace utils {
class Condition : public std::condition_variable {
public:
using std::condition_variable::condition_variable;
inline void notify_n(size_t n) noexcept {
if (n == 1) {
notify_one();
} else if (n > 1) {
notify_all();
}
}
};
} // namespace utils
#endif // TNT_UTILS_GENERIC_CONDITION_H

View File

@@ -0,0 +1,54 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_GENERIC_THERMALMANAGER_H
#define TNT_UTILS_GENERIC_THERMALMANAGER_H
#include <stdint.h>
namespace utils {
class ThermalManager {
public:
enum class ThermalStatus : int8_t {
ERROR = -1,
NONE,
LIGHT,
MODERATE,
SEVERE,
CRITICAL,
EMERGENCY,
SHUTDOWN
};
ThermalManager() = default;
// Movable
ThermalManager(ThermalManager&& rhs) noexcept = default;
ThermalManager& operator=(ThermalManager&& rhs) noexcept = default;
// not copiable
ThermalManager(ThermalManager const& rhs) = delete;
ThermalManager& operator=(ThermalManager const& rhs) = delete;
ThermalStatus getCurrentThermalStatus() const noexcept {
return ThermalStatus::NONE;
}
};
} // namespace utils
#endif // TNT_UTILS_GENERIC_THERMALMANAGER_H

View File

@@ -0,0 +1,123 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_LINUX_CONDITION_H
#define TNT_UTILS_LINUX_CONDITION_H
#include <atomic>
#include <chrono>
#include <condition_variable> // for cv_status
#include <limits>
#include <mutex> // for unique_lock
#include <utils/linux/Mutex.h>
#include <time.h>
namespace utils {
/*
* A very simple condition variable class that can be used as an (almost) drop-in replacement
* for std::condition_variable (doesn't have the timed wait() though).
* It is very low overhead as most of it is inlined.
*/
class Condition {
public:
Condition() noexcept = default;
Condition(const Condition&) = delete;
Condition& operator=(const Condition&) = delete;
void notify_all() noexcept {
pulse(std::numeric_limits<int>::max());
}
void notify_one() noexcept {
pulse(1);
}
void notify_n(size_t n) noexcept {
if (n > 0) pulse(n);
}
void wait(std::unique_lock<Mutex>& lock) noexcept {
wait_until(lock.mutex(), false, nullptr);
}
template <class P>
void wait(std::unique_lock<Mutex>& lock, P predicate) {
while (!predicate()) {
wait(lock);
}
}
template<typename D>
std::cv_status wait_until(std::unique_lock<Mutex>& lock,
const std::chrono::time_point<std::chrono::steady_clock, D>& timeout_time) noexcept {
// convert to nanoseconds
uint64_t ns = std::chrono::duration<uint64_t, std::nano>(timeout_time.time_since_epoch()).count();
using sec_t = decltype(timespec::tv_sec);
using nsec_t = decltype(timespec::tv_nsec);
timespec ts{ sec_t(ns / 1000000000), nsec_t(ns % 1000000000) };
return wait_until(lock.mutex(), false, &ts);
}
template<typename D>
std::cv_status wait_until(std::unique_lock<Mutex>& lock,
const std::chrono::time_point<std::chrono::system_clock, D>& timeout_time) noexcept {
// convert to nanoseconds
uint64_t ns = std::chrono::duration<uint64_t, std::nano>(timeout_time.time_since_epoch()).count();
using sec_t = decltype(timespec::tv_sec);
using nsec_t = decltype(timespec::tv_nsec);
timespec ts{ sec_t(ns / 1000000000), nsec_t(ns % 1000000000) };
return wait_until(lock.mutex(), true, &ts);
}
template<typename C, typename D, typename P>
bool wait_until(std::unique_lock<Mutex>& lock,
const std::chrono::time_point<C, D>& timeout_time, P predicate) noexcept {
while (!predicate()) {
if (wait_until(lock, timeout_time) == std::cv_status::timeout) {
return predicate();
}
}
return true;
}
template<typename R, typename Period>
std::cv_status wait_for(std::unique_lock<Mutex>& lock,
const std::chrono::duration<R, Period>& rel_time) noexcept {
return wait_until(lock, std::chrono::steady_clock::now() + rel_time);
}
template<typename R, typename Period, typename P>
bool wait_for(std::unique_lock<Mutex>& lock,
const std::chrono::duration<R, Period>& rel_time, P pred) noexcept {
return wait_until(lock, std::chrono::steady_clock::now() + rel_time, std::move(pred));
}
private:
std::atomic<uint32_t> mState = { 0 };
void pulse(int threadCount) noexcept;
std::cv_status wait_until(Mutex* lock,
bool realtimeClock, timespec* ts) noexcept;
};
} // namespace utils
#endif // TNT_UTILS_LINUX_CONDITION_H

View File

@@ -0,0 +1,26 @@
/*
* Copyright (C) 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_STRING_H
#define TNT_UTILS_STRING_H
namespace utils {
float strtof_c(const char* start, char** end);
} // namespace utils
#endif // TNT_UTILS_STRING_H

View File

@@ -0,0 +1,40 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_TRAP_H
#define TNT_UTILS_TRAP_H
#include <csignal>
#if defined(WIN32)
#include <Windows.h>
#include <utils/unwindows.h>
#endif
namespace utils {
// This can be used as a programmatic breakpoint.
inline void debug_trap() noexcept {
#if defined(WIN32)
DebugBreak();
#else
std::raise(SIGINT);
#endif
}
} // namespace utils
#endif // TNT_UTILS_TRAP_H

View File

@@ -0,0 +1,61 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_VECTOR_H
#define TNT_UTILS_VECTOR_H
#include <algorithm>
#include <vector>
namespace utils {
/**
* Inserts the specified item in the vector at its sorted position.
*/
template <typename T>
static inline void insert_sorted(std::vector<T>& v, T item) {
auto pos = std::lower_bound(v.begin(), v.end(), item);
v.insert(pos, std::move(item));
}
/**
* Inserts the specified item in the vector at its sorted position.
* The item type must implement the < operator. If the specified
* item is already present in the vector, this method returns without
* inserting the item again.
*
* @return True if the item was inserted at is sorted position, false
* if the item already exists in the vector.
*/
template <typename T>
static inline bool insert_sorted_unique(std::vector<T>& v, T item) {
if (UTILS_LIKELY(v.size() == 0 || v.back() < item)) {
v.push_back(item);
return true;
}
auto pos = std::lower_bound(v.begin(), v.end(), item);
if (UTILS_LIKELY(pos == v.end() || item < *pos)) {
v.insert(pos, std::move(item));
return true;
}
return false;
}
} // end utils namespace
#endif // TNT_UTILS_VECTOR_H

View File

@@ -0,0 +1,33 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_WIN32_STDTYPES_H
#define TNT_UTILS_WIN32_STDTYPES_H
#if defined(WIN32)
#include <windows.h>
// Copied from linux libc sys/stat.h:
#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
#define PATH_MAX (MAX_PATH)
// For getcwd
#include <direct.h>
#define getcwd _getcwd
#endif
#endif // TNT_UTILS_WIN32_STDTYPES_H

View File

@@ -0,0 +1,310 @@
#ifndef VULKAN_VIDEO_CODEC_H264STD_H_
#define VULKAN_VIDEO_CODEC_H264STD_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define vulkan_video_codec_h264std 1
#include <stdint.h>
#define STD_VIDEO_H264_CPB_CNT_LIST_SIZE 32
#define STD_VIDEO_H264_SCALING_LIST_4X4_NUM_LISTS 6
#define STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS 16
#define STD_VIDEO_H264_SCALING_LIST_8X8_NUM_LISTS 6
#define STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS 64
#define STD_VIDEO_H264_MAX_NUM_LIST_REF 32
#define STD_VIDEO_H264_MAX_CHROMA_PLANES 2
typedef enum StdVideoH264ChromaFormatIdc {
STD_VIDEO_H264_CHROMA_FORMAT_IDC_MONOCHROME = 0,
STD_VIDEO_H264_CHROMA_FORMAT_IDC_420 = 1,
STD_VIDEO_H264_CHROMA_FORMAT_IDC_422 = 2,
STD_VIDEO_H264_CHROMA_FORMAT_IDC_444 = 3,
STD_VIDEO_H264_CHROMA_FORMAT_IDC_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_CHROMA_FORMAT_IDC_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264ChromaFormatIdc;
typedef enum StdVideoH264ProfileIdc {
STD_VIDEO_H264_PROFILE_IDC_BASELINE = 66,
STD_VIDEO_H264_PROFILE_IDC_MAIN = 77,
STD_VIDEO_H264_PROFILE_IDC_HIGH = 100,
STD_VIDEO_H264_PROFILE_IDC_HIGH_444_PREDICTIVE = 244,
STD_VIDEO_H264_PROFILE_IDC_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_PROFILE_IDC_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264ProfileIdc;
typedef enum StdVideoH264LevelIdc {
STD_VIDEO_H264_LEVEL_IDC_1_0 = 0,
STD_VIDEO_H264_LEVEL_IDC_1_1 = 1,
STD_VIDEO_H264_LEVEL_IDC_1_2 = 2,
STD_VIDEO_H264_LEVEL_IDC_1_3 = 3,
STD_VIDEO_H264_LEVEL_IDC_2_0 = 4,
STD_VIDEO_H264_LEVEL_IDC_2_1 = 5,
STD_VIDEO_H264_LEVEL_IDC_2_2 = 6,
STD_VIDEO_H264_LEVEL_IDC_3_0 = 7,
STD_VIDEO_H264_LEVEL_IDC_3_1 = 8,
STD_VIDEO_H264_LEVEL_IDC_3_2 = 9,
STD_VIDEO_H264_LEVEL_IDC_4_0 = 10,
STD_VIDEO_H264_LEVEL_IDC_4_1 = 11,
STD_VIDEO_H264_LEVEL_IDC_4_2 = 12,
STD_VIDEO_H264_LEVEL_IDC_5_0 = 13,
STD_VIDEO_H264_LEVEL_IDC_5_1 = 14,
STD_VIDEO_H264_LEVEL_IDC_5_2 = 15,
STD_VIDEO_H264_LEVEL_IDC_6_0 = 16,
STD_VIDEO_H264_LEVEL_IDC_6_1 = 17,
STD_VIDEO_H264_LEVEL_IDC_6_2 = 18,
STD_VIDEO_H264_LEVEL_IDC_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_LEVEL_IDC_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264LevelIdc;
typedef enum StdVideoH264PocType {
STD_VIDEO_H264_POC_TYPE_0 = 0,
STD_VIDEO_H264_POC_TYPE_1 = 1,
STD_VIDEO_H264_POC_TYPE_2 = 2,
STD_VIDEO_H264_POC_TYPE_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_POC_TYPE_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264PocType;
typedef enum StdVideoH264AspectRatioIdc {
STD_VIDEO_H264_ASPECT_RATIO_IDC_UNSPECIFIED = 0,
STD_VIDEO_H264_ASPECT_RATIO_IDC_SQUARE = 1,
STD_VIDEO_H264_ASPECT_RATIO_IDC_12_11 = 2,
STD_VIDEO_H264_ASPECT_RATIO_IDC_10_11 = 3,
STD_VIDEO_H264_ASPECT_RATIO_IDC_16_11 = 4,
STD_VIDEO_H264_ASPECT_RATIO_IDC_40_33 = 5,
STD_VIDEO_H264_ASPECT_RATIO_IDC_24_11 = 6,
STD_VIDEO_H264_ASPECT_RATIO_IDC_20_11 = 7,
STD_VIDEO_H264_ASPECT_RATIO_IDC_32_11 = 8,
STD_VIDEO_H264_ASPECT_RATIO_IDC_80_33 = 9,
STD_VIDEO_H264_ASPECT_RATIO_IDC_18_11 = 10,
STD_VIDEO_H264_ASPECT_RATIO_IDC_15_11 = 11,
STD_VIDEO_H264_ASPECT_RATIO_IDC_64_33 = 12,
STD_VIDEO_H264_ASPECT_RATIO_IDC_160_99 = 13,
STD_VIDEO_H264_ASPECT_RATIO_IDC_4_3 = 14,
STD_VIDEO_H264_ASPECT_RATIO_IDC_3_2 = 15,
STD_VIDEO_H264_ASPECT_RATIO_IDC_2_1 = 16,
STD_VIDEO_H264_ASPECT_RATIO_IDC_EXTENDED_SAR = 255,
STD_VIDEO_H264_ASPECT_RATIO_IDC_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_ASPECT_RATIO_IDC_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264AspectRatioIdc;
typedef enum StdVideoH264WeightedBipredIdc {
STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_DEFAULT = 0,
STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_EXPLICIT = 1,
STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_IMPLICIT = 2,
STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264WeightedBipredIdc;
typedef enum StdVideoH264ModificationOfPicNumsIdc {
STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_SHORT_TERM_SUBTRACT = 0,
STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_SHORT_TERM_ADD = 1,
STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_LONG_TERM = 2,
STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_END = 3,
STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264ModificationOfPicNumsIdc;
typedef enum StdVideoH264MemMgmtControlOp {
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_END = 0,
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_UNMARK_SHORT_TERM = 1,
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_UNMARK_LONG_TERM = 2,
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_MARK_LONG_TERM = 3,
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_SET_MAX_LONG_TERM_INDEX = 4,
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_UNMARK_ALL = 5,
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_MARK_CURRENT_AS_LONG_TERM = 6,
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264MemMgmtControlOp;
typedef enum StdVideoH264CabacInitIdc {
STD_VIDEO_H264_CABAC_INIT_IDC_0 = 0,
STD_VIDEO_H264_CABAC_INIT_IDC_1 = 1,
STD_VIDEO_H264_CABAC_INIT_IDC_2 = 2,
STD_VIDEO_H264_CABAC_INIT_IDC_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_CABAC_INIT_IDC_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264CabacInitIdc;
typedef enum StdVideoH264DisableDeblockingFilterIdc {
STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_DISABLED = 0,
STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_ENABLED = 1,
STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_PARTIAL = 2,
STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264DisableDeblockingFilterIdc;
typedef enum StdVideoH264SliceType {
STD_VIDEO_H264_SLICE_TYPE_P = 0,
STD_VIDEO_H264_SLICE_TYPE_B = 1,
STD_VIDEO_H264_SLICE_TYPE_I = 2,
STD_VIDEO_H264_SLICE_TYPE_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_SLICE_TYPE_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264SliceType;
typedef enum StdVideoH264PictureType {
STD_VIDEO_H264_PICTURE_TYPE_P = 0,
STD_VIDEO_H264_PICTURE_TYPE_B = 1,
STD_VIDEO_H264_PICTURE_TYPE_I = 2,
STD_VIDEO_H264_PICTURE_TYPE_IDR = 5,
STD_VIDEO_H264_PICTURE_TYPE_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_PICTURE_TYPE_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264PictureType;
typedef enum StdVideoH264NonVclNaluType {
STD_VIDEO_H264_NON_VCL_NALU_TYPE_SPS = 0,
STD_VIDEO_H264_NON_VCL_NALU_TYPE_PPS = 1,
STD_VIDEO_H264_NON_VCL_NALU_TYPE_AUD = 2,
STD_VIDEO_H264_NON_VCL_NALU_TYPE_PREFIX = 3,
STD_VIDEO_H264_NON_VCL_NALU_TYPE_END_OF_SEQUENCE = 4,
STD_VIDEO_H264_NON_VCL_NALU_TYPE_END_OF_STREAM = 5,
STD_VIDEO_H264_NON_VCL_NALU_TYPE_PRECODED = 6,
STD_VIDEO_H264_NON_VCL_NALU_TYPE_INVALID = 0x7FFFFFFF,
STD_VIDEO_H264_NON_VCL_NALU_TYPE_MAX_ENUM = 0x7FFFFFFF
} StdVideoH264NonVclNaluType;
typedef struct StdVideoH264SpsVuiFlags {
uint32_t aspect_ratio_info_present_flag : 1;
uint32_t overscan_info_present_flag : 1;
uint32_t overscan_appropriate_flag : 1;
uint32_t video_signal_type_present_flag : 1;
uint32_t video_full_range_flag : 1;
uint32_t color_description_present_flag : 1;
uint32_t chroma_loc_info_present_flag : 1;
uint32_t timing_info_present_flag : 1;
uint32_t fixed_frame_rate_flag : 1;
uint32_t bitstream_restriction_flag : 1;
uint32_t nal_hrd_parameters_present_flag : 1;
uint32_t vcl_hrd_parameters_present_flag : 1;
} StdVideoH264SpsVuiFlags;
typedef struct StdVideoH264HrdParameters {
uint8_t cpb_cnt_minus1;
uint8_t bit_rate_scale;
uint8_t cpb_size_scale;
uint8_t reserved1;
uint32_t bit_rate_value_minus1[STD_VIDEO_H264_CPB_CNT_LIST_SIZE];
uint32_t cpb_size_value_minus1[STD_VIDEO_H264_CPB_CNT_LIST_SIZE];
uint8_t cbr_flag[STD_VIDEO_H264_CPB_CNT_LIST_SIZE];
uint32_t initial_cpb_removal_delay_length_minus1;
uint32_t cpb_removal_delay_length_minus1;
uint32_t dpb_output_delay_length_minus1;
uint32_t time_offset_length;
} StdVideoH264HrdParameters;
typedef struct StdVideoH264SequenceParameterSetVui {
StdVideoH264SpsVuiFlags flags;
StdVideoH264AspectRatioIdc aspect_ratio_idc;
uint16_t sar_width;
uint16_t sar_height;
uint8_t video_format;
uint8_t colour_primaries;
uint8_t transfer_characteristics;
uint8_t matrix_coefficients;
uint32_t num_units_in_tick;
uint32_t time_scale;
uint8_t max_num_reorder_frames;
uint8_t max_dec_frame_buffering;
uint8_t chroma_sample_loc_type_top_field;
uint8_t chroma_sample_loc_type_bottom_field;
uint32_t reserved1;
const StdVideoH264HrdParameters* pHrdParameters;
} StdVideoH264SequenceParameterSetVui;
typedef struct StdVideoH264SpsFlags {
uint32_t constraint_set0_flag : 1;
uint32_t constraint_set1_flag : 1;
uint32_t constraint_set2_flag : 1;
uint32_t constraint_set3_flag : 1;
uint32_t constraint_set4_flag : 1;
uint32_t constraint_set5_flag : 1;
uint32_t direct_8x8_inference_flag : 1;
uint32_t mb_adaptive_frame_field_flag : 1;
uint32_t frame_mbs_only_flag : 1;
uint32_t delta_pic_order_always_zero_flag : 1;
uint32_t separate_colour_plane_flag : 1;
uint32_t gaps_in_frame_num_value_allowed_flag : 1;
uint32_t qpprime_y_zero_transform_bypass_flag : 1;
uint32_t frame_cropping_flag : 1;
uint32_t seq_scaling_matrix_present_flag : 1;
uint32_t vui_parameters_present_flag : 1;
} StdVideoH264SpsFlags;
typedef struct StdVideoH264ScalingLists {
uint16_t scaling_list_present_mask;
uint16_t use_default_scaling_matrix_mask;
uint8_t ScalingList4x4[STD_VIDEO_H264_SCALING_LIST_4X4_NUM_LISTS][STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS];
uint8_t ScalingList8x8[STD_VIDEO_H264_SCALING_LIST_8X8_NUM_LISTS][STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS];
} StdVideoH264ScalingLists;
typedef struct StdVideoH264SequenceParameterSet {
StdVideoH264SpsFlags flags;
StdVideoH264ProfileIdc profile_idc;
StdVideoH264LevelIdc level_idc;
StdVideoH264ChromaFormatIdc chroma_format_idc;
uint8_t seq_parameter_set_id;
uint8_t bit_depth_luma_minus8;
uint8_t bit_depth_chroma_minus8;
uint8_t log2_max_frame_num_minus4;
StdVideoH264PocType pic_order_cnt_type;
int32_t offset_for_non_ref_pic;
int32_t offset_for_top_to_bottom_field;
uint8_t log2_max_pic_order_cnt_lsb_minus4;
uint8_t num_ref_frames_in_pic_order_cnt_cycle;
uint8_t max_num_ref_frames;
uint8_t reserved1;
uint32_t pic_width_in_mbs_minus1;
uint32_t pic_height_in_map_units_minus1;
uint32_t frame_crop_left_offset;
uint32_t frame_crop_right_offset;
uint32_t frame_crop_top_offset;
uint32_t frame_crop_bottom_offset;
uint32_t reserved2;
const int32_t* pOffsetForRefFrame;
const StdVideoH264ScalingLists* pScalingLists;
const StdVideoH264SequenceParameterSetVui* pSequenceParameterSetVui;
} StdVideoH264SequenceParameterSet;
typedef struct StdVideoH264PpsFlags {
uint32_t transform_8x8_mode_flag : 1;
uint32_t redundant_pic_cnt_present_flag : 1;
uint32_t constrained_intra_pred_flag : 1;
uint32_t deblocking_filter_control_present_flag : 1;
uint32_t weighted_pred_flag : 1;
uint32_t bottom_field_pic_order_in_frame_present_flag : 1;
uint32_t entropy_coding_mode_flag : 1;
uint32_t pic_scaling_matrix_present_flag : 1;
} StdVideoH264PpsFlags;
typedef struct StdVideoH264PictureParameterSet {
StdVideoH264PpsFlags flags;
uint8_t seq_parameter_set_id;
uint8_t pic_parameter_set_id;
uint8_t num_ref_idx_l0_default_active_minus1;
uint8_t num_ref_idx_l1_default_active_minus1;
StdVideoH264WeightedBipredIdc weighted_bipred_idc;
int8_t pic_init_qp_minus26;
int8_t pic_init_qs_minus26;
int8_t chroma_qp_index_offset;
int8_t second_chroma_qp_index_offset;
const StdVideoH264ScalingLists* pScalingLists;
} StdVideoH264PictureParameterSet;
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,75 @@
#ifndef VULKAN_VIDEO_CODEC_H264STD_DECODE_H_
#define VULKAN_VIDEO_CODEC_H264STD_DECODE_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define vulkan_video_codec_h264std_decode 1
// Vulkan 0.9 provisional Vulkan video H.264 decode std specification version number
#define VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_API_VERSION_0_9_8 VK_MAKE_VIDEO_STD_VERSION(0, 9, 8) // Patch version should always be set to 0
#define STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_LIST_SIZE 2
#define VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_API_VERSION_0_9_8
#define VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_EXTENSION_NAME "VK_STD_vulkan_video_codec_h264_decode"
typedef enum StdVideoDecodeH264FieldOrderCount {
STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_TOP = 0,
STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_BOTTOM = 1,
STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_INVALID = 0x7FFFFFFF,
STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_MAX_ENUM = 0x7FFFFFFF
} StdVideoDecodeH264FieldOrderCount;
typedef struct StdVideoDecodeH264PictureInfoFlags {
uint32_t field_pic_flag : 1;
uint32_t is_intra : 1;
uint32_t IdrPicFlag : 1;
uint32_t bottom_field_flag : 1;
uint32_t is_reference : 1;
uint32_t complementary_field_pair : 1;
} StdVideoDecodeH264PictureInfoFlags;
typedef struct StdVideoDecodeH264PictureInfo {
StdVideoDecodeH264PictureInfoFlags flags;
uint8_t seq_parameter_set_id;
uint8_t pic_parameter_set_id;
uint8_t reserved1;
uint8_t reserved2;
uint16_t frame_num;
uint16_t idr_pic_id;
int32_t PicOrderCnt[STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_LIST_SIZE];
} StdVideoDecodeH264PictureInfo;
typedef struct StdVideoDecodeH264ReferenceInfoFlags {
uint32_t top_field_flag : 1;
uint32_t bottom_field_flag : 1;
uint32_t used_for_long_term_reference : 1;
uint32_t is_non_existing : 1;
} StdVideoDecodeH264ReferenceInfoFlags;
typedef struct StdVideoDecodeH264ReferenceInfo {
StdVideoDecodeH264ReferenceInfoFlags flags;
uint16_t FrameNum;
uint16_t reserved;
int32_t PicOrderCnt[STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_LIST_SIZE];
} StdVideoDecodeH264ReferenceInfo;
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,132 @@
#ifndef VULKAN_VIDEO_CODEC_H264STD_ENCODE_H_
#define VULKAN_VIDEO_CODEC_H264STD_ENCODE_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define vulkan_video_codec_h264std_encode 1
// Vulkan 0.9 provisional Vulkan video H.264 encode std specification version number
#define VK_STD_VULKAN_VIDEO_CODEC_H264_ENCODE_API_VERSION_0_9_8 VK_MAKE_VIDEO_STD_VERSION(0, 9, 8) // Patch version should always be set to 0
#define VK_STD_VULKAN_VIDEO_CODEC_H264_ENCODE_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_H264_ENCODE_API_VERSION_0_9_8
#define VK_STD_VULKAN_VIDEO_CODEC_H264_ENCODE_EXTENSION_NAME "VK_STD_vulkan_video_codec_h264_encode"
typedef struct StdVideoEncodeH264WeightTableFlags {
uint32_t luma_weight_l0_flag;
uint32_t chroma_weight_l0_flag;
uint32_t luma_weight_l1_flag;
uint32_t chroma_weight_l1_flag;
} StdVideoEncodeH264WeightTableFlags;
typedef struct StdVideoEncodeH264WeightTable {
StdVideoEncodeH264WeightTableFlags flags;
uint8_t luma_log2_weight_denom;
uint8_t chroma_log2_weight_denom;
int8_t luma_weight_l0[STD_VIDEO_H264_MAX_NUM_LIST_REF];
int8_t luma_offset_l0[STD_VIDEO_H264_MAX_NUM_LIST_REF];
int8_t chroma_weight_l0[STD_VIDEO_H264_MAX_NUM_LIST_REF][STD_VIDEO_H264_MAX_CHROMA_PLANES];
int8_t chroma_offset_l0[STD_VIDEO_H264_MAX_NUM_LIST_REF][STD_VIDEO_H264_MAX_CHROMA_PLANES];
int8_t luma_weight_l1[STD_VIDEO_H264_MAX_NUM_LIST_REF];
int8_t luma_offset_l1[STD_VIDEO_H264_MAX_NUM_LIST_REF];
int8_t chroma_weight_l1[STD_VIDEO_H264_MAX_NUM_LIST_REF][STD_VIDEO_H264_MAX_CHROMA_PLANES];
int8_t chroma_offset_l1[STD_VIDEO_H264_MAX_NUM_LIST_REF][STD_VIDEO_H264_MAX_CHROMA_PLANES];
} StdVideoEncodeH264WeightTable;
typedef struct StdVideoEncodeH264SliceHeaderFlags {
uint32_t direct_spatial_mv_pred_flag : 1;
uint32_t num_ref_idx_active_override_flag : 1;
uint32_t no_output_of_prior_pics_flag : 1;
uint32_t adaptive_ref_pic_marking_mode_flag : 1;
uint32_t no_prior_references_available_flag : 1;
} StdVideoEncodeH264SliceHeaderFlags;
typedef struct StdVideoEncodeH264PictureInfoFlags {
uint32_t idr_flag : 1;
uint32_t is_reference_flag : 1;
uint32_t used_for_long_term_reference : 1;
} StdVideoEncodeH264PictureInfoFlags;
typedef struct StdVideoEncodeH264ReferenceInfoFlags {
uint32_t used_for_long_term_reference : 1;
} StdVideoEncodeH264ReferenceInfoFlags;
typedef struct StdVideoEncodeH264RefMgmtFlags {
uint32_t ref_pic_list_modification_l0_flag : 1;
uint32_t ref_pic_list_modification_l1_flag : 1;
} StdVideoEncodeH264RefMgmtFlags;
typedef struct StdVideoEncodeH264RefListModEntry {
StdVideoH264ModificationOfPicNumsIdc modification_of_pic_nums_idc;
uint16_t abs_diff_pic_num_minus1;
uint16_t long_term_pic_num;
} StdVideoEncodeH264RefListModEntry;
typedef struct StdVideoEncodeH264RefPicMarkingEntry {
StdVideoH264MemMgmtControlOp operation;
uint16_t difference_of_pic_nums_minus1;
uint16_t long_term_pic_num;
uint16_t long_term_frame_idx;
uint16_t max_long_term_frame_idx_plus1;
} StdVideoEncodeH264RefPicMarkingEntry;
typedef struct StdVideoEncodeH264RefMemMgmtCtrlOperations {
StdVideoEncodeH264RefMgmtFlags flags;
uint8_t refList0ModOpCount;
const StdVideoEncodeH264RefListModEntry* pRefList0ModOperations;
uint8_t refList1ModOpCount;
const StdVideoEncodeH264RefListModEntry* pRefList1ModOperations;
uint8_t refPicMarkingOpCount;
const StdVideoEncodeH264RefPicMarkingEntry* pRefPicMarkingOperations;
} StdVideoEncodeH264RefMemMgmtCtrlOperations;
typedef struct StdVideoEncodeH264PictureInfo {
StdVideoEncodeH264PictureInfoFlags flags;
uint8_t seq_parameter_set_id;
uint8_t pic_parameter_set_id;
StdVideoH264PictureType pictureType;
uint32_t frame_num;
int32_t PicOrderCnt;
} StdVideoEncodeH264PictureInfo;
typedef struct StdVideoEncodeH264ReferenceInfo {
StdVideoEncodeH264ReferenceInfoFlags flags;
uint32_t FrameNum;
int32_t PicOrderCnt;
uint16_t long_term_pic_num;
uint16_t long_term_frame_idx;
} StdVideoEncodeH264ReferenceInfo;
typedef struct StdVideoEncodeH264SliceHeader {
StdVideoEncodeH264SliceHeaderFlags flags;
uint32_t first_mb_in_slice;
StdVideoH264SliceType slice_type;
uint16_t idr_pic_id;
uint8_t num_ref_idx_l0_active_minus1;
uint8_t num_ref_idx_l1_active_minus1;
StdVideoH264CabacInitIdc cabac_init_idc;
StdVideoH264DisableDeblockingFilterIdc disable_deblocking_filter_idc;
int8_t slice_alpha_c0_offset_div2;
int8_t slice_beta_offset_div2;
const StdVideoEncodeH264WeightTable* pWeightTable;
} StdVideoEncodeH264SliceHeader;
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,443 @@
#ifndef VULKAN_VIDEO_CODEC_H265STD_H_
#define VULKAN_VIDEO_CODEC_H265STD_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define vulkan_video_codec_h265std 1
#define STD_VIDEO_H265_SUBLAYERS_LIST_SIZE 7
#define STD_VIDEO_H265_CPB_CNT_LIST_SIZE 32
#define STD_VIDEO_H265_SCALING_LIST_4X4_NUM_LISTS 6
#define STD_VIDEO_H265_SCALING_LIST_4X4_NUM_ELEMENTS 16
#define STD_VIDEO_H265_SCALING_LIST_8X8_NUM_LISTS 6
#define STD_VIDEO_H265_SCALING_LIST_8X8_NUM_ELEMENTS 64
#define STD_VIDEO_H265_SCALING_LIST_16X16_NUM_LISTS 6
#define STD_VIDEO_H265_SCALING_LIST_16X16_NUM_ELEMENTS 64
#define STD_VIDEO_H265_SCALING_LIST_32X32_NUM_LISTS 2
#define STD_VIDEO_H265_SCALING_LIST_32X32_NUM_ELEMENTS 64
#define STD_VIDEO_H265_PREDICTOR_PALETTE_COMPONENTS_LIST_SIZE 3
#define STD_VIDEO_H265_PREDICTOR_PALETTE_COMP_ENTRIES_LIST_SIZE 128
#define STD_VIDEO_H265_MAX_DPB_SIZE 16
#define STD_VIDEO_H265_MAX_LONG_TERM_REF_PICS_SPS 32
#define STD_VIDEO_H265_CHROMA_QP_OFFSET_LIST_SIZE 6
#define STD_VIDEO_H265_CHROMA_QP_OFFSET_TILE_COLS_LIST_SIZE 19
#define STD_VIDEO_H265_CHROMA_QP_OFFSET_TILE_ROWS_LIST_SIZE 21
#define STD_VIDEO_H265_MAX_NUM_LIST_REF 15
#define STD_VIDEO_H265_MAX_CHROMA_PLANES 2
#define STD_VIDEO_H265_MAX_SHORT_TERM_REF_PIC_SETS 64
#define STD_VIDEO_H265_MAX_LONG_TERM_PICS 16
#define STD_VIDEO_H265_MAX_DELTA_POC 48
typedef enum StdVideoH265ChromaFormatIdc {
STD_VIDEO_H265_CHROMA_FORMAT_IDC_MONOCHROME = 0,
STD_VIDEO_H265_CHROMA_FORMAT_IDC_420 = 1,
STD_VIDEO_H265_CHROMA_FORMAT_IDC_422 = 2,
STD_VIDEO_H265_CHROMA_FORMAT_IDC_444 = 3,
STD_VIDEO_H265_CHROMA_FORMAT_IDC_INVALID = 0x7FFFFFFF,
STD_VIDEO_H265_CHROMA_FORMAT_IDC_MAX_ENUM = 0x7FFFFFFF
} StdVideoH265ChromaFormatIdc;
typedef enum StdVideoH265ProfileIdc {
STD_VIDEO_H265_PROFILE_IDC_MAIN = 1,
STD_VIDEO_H265_PROFILE_IDC_MAIN_10 = 2,
STD_VIDEO_H265_PROFILE_IDC_MAIN_STILL_PICTURE = 3,
STD_VIDEO_H265_PROFILE_IDC_FORMAT_RANGE_EXTENSIONS = 4,
STD_VIDEO_H265_PROFILE_IDC_SCC_EXTENSIONS = 9,
STD_VIDEO_H265_PROFILE_IDC_INVALID = 0x7FFFFFFF,
STD_VIDEO_H265_PROFILE_IDC_MAX_ENUM = 0x7FFFFFFF
} StdVideoH265ProfileIdc;
typedef enum StdVideoH265LevelIdc {
STD_VIDEO_H265_LEVEL_IDC_1_0 = 0,
STD_VIDEO_H265_LEVEL_IDC_2_0 = 1,
STD_VIDEO_H265_LEVEL_IDC_2_1 = 2,
STD_VIDEO_H265_LEVEL_IDC_3_0 = 3,
STD_VIDEO_H265_LEVEL_IDC_3_1 = 4,
STD_VIDEO_H265_LEVEL_IDC_4_0 = 5,
STD_VIDEO_H265_LEVEL_IDC_4_1 = 6,
STD_VIDEO_H265_LEVEL_IDC_5_0 = 7,
STD_VIDEO_H265_LEVEL_IDC_5_1 = 8,
STD_VIDEO_H265_LEVEL_IDC_5_2 = 9,
STD_VIDEO_H265_LEVEL_IDC_6_0 = 10,
STD_VIDEO_H265_LEVEL_IDC_6_1 = 11,
STD_VIDEO_H265_LEVEL_IDC_6_2 = 12,
STD_VIDEO_H265_LEVEL_IDC_INVALID = 0x7FFFFFFF,
STD_VIDEO_H265_LEVEL_IDC_MAX_ENUM = 0x7FFFFFFF
} StdVideoH265LevelIdc;
typedef enum StdVideoH265SliceType {
STD_VIDEO_H265_SLICE_TYPE_B = 0,
STD_VIDEO_H265_SLICE_TYPE_P = 1,
STD_VIDEO_H265_SLICE_TYPE_I = 2,
STD_VIDEO_H265_SLICE_TYPE_INVALID = 0x7FFFFFFF,
STD_VIDEO_H265_SLICE_TYPE_MAX_ENUM = 0x7FFFFFFF
} StdVideoH265SliceType;
typedef enum StdVideoH265PictureType {
STD_VIDEO_H265_PICTURE_TYPE_P = 0,
STD_VIDEO_H265_PICTURE_TYPE_B = 1,
STD_VIDEO_H265_PICTURE_TYPE_I = 2,
STD_VIDEO_H265_PICTURE_TYPE_IDR = 3,
STD_VIDEO_H265_PICTURE_TYPE_INVALID = 0x7FFFFFFF,
STD_VIDEO_H265_PICTURE_TYPE_MAX_ENUM = 0x7FFFFFFF
} StdVideoH265PictureType;
typedef enum StdVideoH265AspectRatioIdc {
STD_VIDEO_H265_ASPECT_RATIO_IDC_UNSPECIFIED = 0,
STD_VIDEO_H265_ASPECT_RATIO_IDC_SQUARE = 1,
STD_VIDEO_H265_ASPECT_RATIO_IDC_12_11 = 2,
STD_VIDEO_H265_ASPECT_RATIO_IDC_10_11 = 3,
STD_VIDEO_H265_ASPECT_RATIO_IDC_16_11 = 4,
STD_VIDEO_H265_ASPECT_RATIO_IDC_40_33 = 5,
STD_VIDEO_H265_ASPECT_RATIO_IDC_24_11 = 6,
STD_VIDEO_H265_ASPECT_RATIO_IDC_20_11 = 7,
STD_VIDEO_H265_ASPECT_RATIO_IDC_32_11 = 8,
STD_VIDEO_H265_ASPECT_RATIO_IDC_80_33 = 9,
STD_VIDEO_H265_ASPECT_RATIO_IDC_18_11 = 10,
STD_VIDEO_H265_ASPECT_RATIO_IDC_15_11 = 11,
STD_VIDEO_H265_ASPECT_RATIO_IDC_64_33 = 12,
STD_VIDEO_H265_ASPECT_RATIO_IDC_160_99 = 13,
STD_VIDEO_H265_ASPECT_RATIO_IDC_4_3 = 14,
STD_VIDEO_H265_ASPECT_RATIO_IDC_3_2 = 15,
STD_VIDEO_H265_ASPECT_RATIO_IDC_2_1 = 16,
STD_VIDEO_H265_ASPECT_RATIO_IDC_EXTENDED_SAR = 255,
STD_VIDEO_H265_ASPECT_RATIO_IDC_INVALID = 0x7FFFFFFF,
STD_VIDEO_H265_ASPECT_RATIO_IDC_MAX_ENUM = 0x7FFFFFFF
} StdVideoH265AspectRatioIdc;
typedef struct StdVideoH265DecPicBufMgr {
uint32_t max_latency_increase_plus1[STD_VIDEO_H265_SUBLAYERS_LIST_SIZE];
uint8_t max_dec_pic_buffering_minus1[STD_VIDEO_H265_SUBLAYERS_LIST_SIZE];
uint8_t max_num_reorder_pics[STD_VIDEO_H265_SUBLAYERS_LIST_SIZE];
} StdVideoH265DecPicBufMgr;
typedef struct StdVideoH265SubLayerHrdParameters {
uint32_t bit_rate_value_minus1[STD_VIDEO_H265_CPB_CNT_LIST_SIZE];
uint32_t cpb_size_value_minus1[STD_VIDEO_H265_CPB_CNT_LIST_SIZE];
uint32_t cpb_size_du_value_minus1[STD_VIDEO_H265_CPB_CNT_LIST_SIZE];
uint32_t bit_rate_du_value_minus1[STD_VIDEO_H265_CPB_CNT_LIST_SIZE];
uint32_t cbr_flag;
} StdVideoH265SubLayerHrdParameters;
typedef struct StdVideoH265HrdFlags {
uint32_t nal_hrd_parameters_present_flag : 1;
uint32_t vcl_hrd_parameters_present_flag : 1;
uint32_t sub_pic_hrd_params_present_flag : 1;
uint32_t sub_pic_cpb_params_in_pic_timing_sei_flag : 1;
uint32_t fixed_pic_rate_general_flag : 8;
uint32_t fixed_pic_rate_within_cvs_flag : 8;
uint32_t low_delay_hrd_flag : 8;
} StdVideoH265HrdFlags;
typedef struct StdVideoH265HrdParameters {
StdVideoH265HrdFlags flags;
uint8_t tick_divisor_minus2;
uint8_t du_cpb_removal_delay_increment_length_minus1;
uint8_t dpb_output_delay_du_length_minus1;
uint8_t bit_rate_scale;
uint8_t cpb_size_scale;
uint8_t cpb_size_du_scale;
uint8_t initial_cpb_removal_delay_length_minus1;
uint8_t au_cpb_removal_delay_length_minus1;
uint8_t dpb_output_delay_length_minus1;
uint8_t cpb_cnt_minus1[STD_VIDEO_H265_SUBLAYERS_LIST_SIZE];
uint16_t elemental_duration_in_tc_minus1[STD_VIDEO_H265_SUBLAYERS_LIST_SIZE];
uint16_t reserved[3];
const StdVideoH265SubLayerHrdParameters* pSubLayerHrdParametersNal;
const StdVideoH265SubLayerHrdParameters* pSubLayerHrdParametersVcl;
} StdVideoH265HrdParameters;
typedef struct StdVideoH265VpsFlags {
uint32_t vps_temporal_id_nesting_flag : 1;
uint32_t vps_sub_layer_ordering_info_present_flag : 1;
uint32_t vps_timing_info_present_flag : 1;
uint32_t vps_poc_proportional_to_timing_flag : 1;
} StdVideoH265VpsFlags;
typedef struct StdVideoH265ProfileTierLevelFlags {
uint32_t general_tier_flag : 1;
uint32_t general_progressive_source_flag : 1;
uint32_t general_interlaced_source_flag : 1;
uint32_t general_non_packed_constraint_flag : 1;
uint32_t general_frame_only_constraint_flag : 1;
} StdVideoH265ProfileTierLevelFlags;
typedef struct StdVideoH265ProfileTierLevel {
StdVideoH265ProfileTierLevelFlags flags;
StdVideoH265ProfileIdc general_profile_idc;
StdVideoH265LevelIdc general_level_idc;
} StdVideoH265ProfileTierLevel;
typedef struct StdVideoH265VideoParameterSet {
StdVideoH265VpsFlags flags;
uint8_t vps_video_parameter_set_id;
uint8_t vps_max_sub_layers_minus1;
uint8_t reserved1;
uint8_t reserved2;
uint32_t vps_num_units_in_tick;
uint32_t vps_time_scale;
uint32_t vps_num_ticks_poc_diff_one_minus1;
uint32_t reserved3;
const StdVideoH265DecPicBufMgr* pDecPicBufMgr;
const StdVideoH265HrdParameters* pHrdParameters;
const StdVideoH265ProfileTierLevel* pProfileTierLevel;
} StdVideoH265VideoParameterSet;
typedef struct StdVideoH265ScalingLists {
uint8_t ScalingList4x4[STD_VIDEO_H265_SCALING_LIST_4X4_NUM_LISTS][STD_VIDEO_H265_SCALING_LIST_4X4_NUM_ELEMENTS];
uint8_t ScalingList8x8[STD_VIDEO_H265_SCALING_LIST_8X8_NUM_LISTS][STD_VIDEO_H265_SCALING_LIST_8X8_NUM_ELEMENTS];
uint8_t ScalingList16x16[STD_VIDEO_H265_SCALING_LIST_16X16_NUM_LISTS][STD_VIDEO_H265_SCALING_LIST_16X16_NUM_ELEMENTS];
uint8_t ScalingList32x32[STD_VIDEO_H265_SCALING_LIST_32X32_NUM_LISTS][STD_VIDEO_H265_SCALING_LIST_32X32_NUM_ELEMENTS];
uint8_t ScalingListDCCoef16x16[STD_VIDEO_H265_SCALING_LIST_16X16_NUM_LISTS];
uint8_t ScalingListDCCoef32x32[STD_VIDEO_H265_SCALING_LIST_32X32_NUM_LISTS];
} StdVideoH265ScalingLists;
typedef struct StdVideoH265SpsVuiFlags {
uint32_t aspect_ratio_info_present_flag : 1;
uint32_t overscan_info_present_flag : 1;
uint32_t overscan_appropriate_flag : 1;
uint32_t video_signal_type_present_flag : 1;
uint32_t video_full_range_flag : 1;
uint32_t colour_description_present_flag : 1;
uint32_t chroma_loc_info_present_flag : 1;
uint32_t neutral_chroma_indication_flag : 1;
uint32_t field_seq_flag : 1;
uint32_t frame_field_info_present_flag : 1;
uint32_t default_display_window_flag : 1;
uint32_t vui_timing_info_present_flag : 1;
uint32_t vui_poc_proportional_to_timing_flag : 1;
uint32_t vui_hrd_parameters_present_flag : 1;
uint32_t bitstream_restriction_flag : 1;
uint32_t tiles_fixed_structure_flag : 1;
uint32_t motion_vectors_over_pic_boundaries_flag : 1;
uint32_t restricted_ref_pic_lists_flag : 1;
} StdVideoH265SpsVuiFlags;
typedef struct StdVideoH265SequenceParameterSetVui {
StdVideoH265SpsVuiFlags flags;
StdVideoH265AspectRatioIdc aspect_ratio_idc;
uint16_t sar_width;
uint16_t sar_height;
uint8_t video_format;
uint8_t colour_primaries;
uint8_t transfer_characteristics;
uint8_t matrix_coeffs;
uint8_t chroma_sample_loc_type_top_field;
uint8_t chroma_sample_loc_type_bottom_field;
uint8_t reserved1;
uint8_t reserved2;
uint16_t def_disp_win_left_offset;
uint16_t def_disp_win_right_offset;
uint16_t def_disp_win_top_offset;
uint16_t def_disp_win_bottom_offset;
uint32_t vui_num_units_in_tick;
uint32_t vui_time_scale;
uint32_t vui_num_ticks_poc_diff_one_minus1;
uint16_t min_spatial_segmentation_idc;
uint16_t reserved3;
uint8_t max_bytes_per_pic_denom;
uint8_t max_bits_per_min_cu_denom;
uint8_t log2_max_mv_length_horizontal;
uint8_t log2_max_mv_length_vertical;
const StdVideoH265HrdParameters* pHrdParameters;
} StdVideoH265SequenceParameterSetVui;
typedef struct StdVideoH265PredictorPaletteEntries {
uint16_t PredictorPaletteEntries[STD_VIDEO_H265_PREDICTOR_PALETTE_COMPONENTS_LIST_SIZE][STD_VIDEO_H265_PREDICTOR_PALETTE_COMP_ENTRIES_LIST_SIZE];
} StdVideoH265PredictorPaletteEntries;
typedef struct StdVideoH265SpsFlags {
uint32_t sps_temporal_id_nesting_flag : 1;
uint32_t separate_colour_plane_flag : 1;
uint32_t conformance_window_flag : 1;
uint32_t sps_sub_layer_ordering_info_present_flag : 1;
uint32_t scaling_list_enabled_flag : 1;
uint32_t sps_scaling_list_data_present_flag : 1;
uint32_t amp_enabled_flag : 1;
uint32_t sample_adaptive_offset_enabled_flag : 1;
uint32_t pcm_enabled_flag : 1;
uint32_t pcm_loop_filter_disabled_flag : 1;
uint32_t long_term_ref_pics_present_flag : 1;
uint32_t sps_temporal_mvp_enabled_flag : 1;
uint32_t strong_intra_smoothing_enabled_flag : 1;
uint32_t vui_parameters_present_flag : 1;
uint32_t sps_extension_present_flag : 1;
uint32_t sps_range_extension_flag : 1;
uint32_t transform_skip_rotation_enabled_flag : 1;
uint32_t transform_skip_context_enabled_flag : 1;
uint32_t implicit_rdpcm_enabled_flag : 1;
uint32_t explicit_rdpcm_enabled_flag : 1;
uint32_t extended_precision_processing_flag : 1;
uint32_t intra_smoothing_disabled_flag : 1;
uint32_t high_precision_offsets_enabled_flag : 1;
uint32_t persistent_rice_adaptation_enabled_flag : 1;
uint32_t cabac_bypass_alignment_enabled_flag : 1;
uint32_t sps_scc_extension_flag : 1;
uint32_t sps_curr_pic_ref_enabled_flag : 1;
uint32_t palette_mode_enabled_flag : 1;
uint32_t sps_palette_predictor_initializers_present_flag : 1;
uint32_t intra_boundary_filtering_disabled_flag : 1;
} StdVideoH265SpsFlags;
typedef struct StdVideoH265ShortTermRefPicSetFlags {
uint32_t inter_ref_pic_set_prediction_flag : 1;
uint32_t delta_rps_sign : 1;
} StdVideoH265ShortTermRefPicSetFlags;
typedef struct StdVideoH265ShortTermRefPicSet {
StdVideoH265ShortTermRefPicSetFlags flags;
uint32_t delta_idx_minus1;
uint16_t use_delta_flag;
uint16_t abs_delta_rps_minus1;
uint16_t used_by_curr_pic_flag;
uint16_t used_by_curr_pic_s0_flag;
uint16_t used_by_curr_pic_s1_flag;
uint16_t reserved1;
uint8_t reserved2;
uint8_t reserved3;
uint8_t num_negative_pics;
uint8_t num_positive_pics;
uint16_t delta_poc_s0_minus1[STD_VIDEO_H265_MAX_DPB_SIZE];
uint16_t delta_poc_s1_minus1[STD_VIDEO_H265_MAX_DPB_SIZE];
} StdVideoH265ShortTermRefPicSet;
typedef struct StdVideoH265LongTermRefPicsSps {
uint32_t used_by_curr_pic_lt_sps_flag;
uint32_t lt_ref_pic_poc_lsb_sps[STD_VIDEO_H265_MAX_LONG_TERM_REF_PICS_SPS];
} StdVideoH265LongTermRefPicsSps;
typedef struct StdVideoH265SequenceParameterSet {
StdVideoH265SpsFlags flags;
StdVideoH265ChromaFormatIdc chroma_format_idc;
uint32_t pic_width_in_luma_samples;
uint32_t pic_height_in_luma_samples;
uint8_t sps_video_parameter_set_id;
uint8_t sps_max_sub_layers_minus1;
uint8_t sps_seq_parameter_set_id;
uint8_t bit_depth_luma_minus8;
uint8_t bit_depth_chroma_minus8;
uint8_t log2_max_pic_order_cnt_lsb_minus4;
uint8_t log2_min_luma_coding_block_size_minus3;
uint8_t log2_diff_max_min_luma_coding_block_size;
uint8_t log2_min_luma_transform_block_size_minus2;
uint8_t log2_diff_max_min_luma_transform_block_size;
uint8_t max_transform_hierarchy_depth_inter;
uint8_t max_transform_hierarchy_depth_intra;
uint8_t num_short_term_ref_pic_sets;
uint8_t num_long_term_ref_pics_sps;
uint8_t pcm_sample_bit_depth_luma_minus1;
uint8_t pcm_sample_bit_depth_chroma_minus1;
uint8_t log2_min_pcm_luma_coding_block_size_minus3;
uint8_t log2_diff_max_min_pcm_luma_coding_block_size;
uint8_t reserved1;
uint8_t reserved2;
uint8_t palette_max_size;
uint8_t delta_palette_max_predictor_size;
uint8_t motion_vector_resolution_control_idc;
uint8_t sps_num_palette_predictor_initializers_minus1;
uint32_t conf_win_left_offset;
uint32_t conf_win_right_offset;
uint32_t conf_win_top_offset;
uint32_t conf_win_bottom_offset;
const StdVideoH265ProfileTierLevel* pProfileTierLevel;
const StdVideoH265DecPicBufMgr* pDecPicBufMgr;
const StdVideoH265ScalingLists* pScalingLists;
const StdVideoH265ShortTermRefPicSet* pShortTermRefPicSet;
const StdVideoH265LongTermRefPicsSps* pLongTermRefPicsSps;
const StdVideoH265SequenceParameterSetVui* pSequenceParameterSetVui;
const StdVideoH265PredictorPaletteEntries* pPredictorPaletteEntries;
} StdVideoH265SequenceParameterSet;
typedef struct StdVideoH265PpsFlags {
uint32_t dependent_slice_segments_enabled_flag : 1;
uint32_t output_flag_present_flag : 1;
uint32_t sign_data_hiding_enabled_flag : 1;
uint32_t cabac_init_present_flag : 1;
uint32_t constrained_intra_pred_flag : 1;
uint32_t transform_skip_enabled_flag : 1;
uint32_t cu_qp_delta_enabled_flag : 1;
uint32_t pps_slice_chroma_qp_offsets_present_flag : 1;
uint32_t weighted_pred_flag : 1;
uint32_t weighted_bipred_flag : 1;
uint32_t transquant_bypass_enabled_flag : 1;
uint32_t tiles_enabled_flag : 1;
uint32_t entropy_coding_sync_enabled_flag : 1;
uint32_t uniform_spacing_flag : 1;
uint32_t loop_filter_across_tiles_enabled_flag : 1;
uint32_t pps_loop_filter_across_slices_enabled_flag : 1;
uint32_t deblocking_filter_control_present_flag : 1;
uint32_t deblocking_filter_override_enabled_flag : 1;
uint32_t pps_deblocking_filter_disabled_flag : 1;
uint32_t pps_scaling_list_data_present_flag : 1;
uint32_t lists_modification_present_flag : 1;
uint32_t slice_segment_header_extension_present_flag : 1;
uint32_t pps_extension_present_flag : 1;
uint32_t cross_component_prediction_enabled_flag : 1;
uint32_t chroma_qp_offset_list_enabled_flag : 1;
uint32_t pps_curr_pic_ref_enabled_flag : 1;
uint32_t residual_adaptive_colour_transform_enabled_flag : 1;
uint32_t pps_slice_act_qp_offsets_present_flag : 1;
uint32_t pps_palette_predictor_initializers_present_flag : 1;
uint32_t monochrome_palette_flag : 1;
uint32_t pps_range_extension_flag : 1;
} StdVideoH265PpsFlags;
typedef struct StdVideoH265PictureParameterSet {
StdVideoH265PpsFlags flags;
uint8_t pps_pic_parameter_set_id;
uint8_t pps_seq_parameter_set_id;
uint8_t sps_video_parameter_set_id;
uint8_t num_extra_slice_header_bits;
uint8_t num_ref_idx_l0_default_active_minus1;
uint8_t num_ref_idx_l1_default_active_minus1;
int8_t init_qp_minus26;
uint8_t diff_cu_qp_delta_depth;
int8_t pps_cb_qp_offset;
int8_t pps_cr_qp_offset;
int8_t pps_beta_offset_div2;
int8_t pps_tc_offset_div2;
uint8_t log2_parallel_merge_level_minus2;
uint8_t log2_max_transform_skip_block_size_minus2;
uint8_t diff_cu_chroma_qp_offset_depth;
uint8_t chroma_qp_offset_list_len_minus1;
int8_t cb_qp_offset_list[STD_VIDEO_H265_CHROMA_QP_OFFSET_LIST_SIZE];
int8_t cr_qp_offset_list[STD_VIDEO_H265_CHROMA_QP_OFFSET_LIST_SIZE];
uint8_t log2_sao_offset_scale_luma;
uint8_t log2_sao_offset_scale_chroma;
int8_t pps_act_y_qp_offset_plus5;
int8_t pps_act_cb_qp_offset_plus5;
int8_t pps_act_cr_qp_offset_plus3;
uint8_t pps_num_palette_predictor_initializers;
uint8_t luma_bit_depth_entry_minus8;
uint8_t chroma_bit_depth_entry_minus8;
uint8_t num_tile_columns_minus1;
uint8_t num_tile_rows_minus1;
uint8_t reserved1;
uint8_t reserved2;
uint16_t column_width_minus1[STD_VIDEO_H265_CHROMA_QP_OFFSET_TILE_COLS_LIST_SIZE];
uint16_t row_height_minus1[STD_VIDEO_H265_CHROMA_QP_OFFSET_TILE_ROWS_LIST_SIZE];
uint32_t reserved3;
const StdVideoH265ScalingLists* pScalingLists;
const StdVideoH265PredictorPaletteEntries* pPredictorPaletteEntries;
} StdVideoH265PictureParameterSet;
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,65 @@
#ifndef VULKAN_VIDEO_CODEC_H265STD_DECODE_H_
#define VULKAN_VIDEO_CODEC_H265STD_DECODE_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define vulkan_video_codec_h265std_decode 1
// Vulkan 0.9 provisional Vulkan video H.265 decode std specification version number
#define VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_API_VERSION_0_9_9 VK_MAKE_VIDEO_STD_VERSION(0, 9, 9) // Patch version should always be set to 0
#define STD_VIDEO_DECODE_H265_REF_PIC_SET_LIST_SIZE 8
#define VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_API_VERSION_0_9_9
#define VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_EXTENSION_NAME "VK_STD_vulkan_video_codec_h265_decode"
typedef struct StdVideoDecodeH265PictureInfoFlags {
uint32_t IrapPicFlag : 1;
uint32_t IdrPicFlag : 1;
uint32_t IsReference : 1;
uint32_t short_term_ref_pic_set_sps_flag : 1;
} StdVideoDecodeH265PictureInfoFlags;
typedef struct StdVideoDecodeH265PictureInfo {
StdVideoDecodeH265PictureInfoFlags flags;
uint8_t sps_video_parameter_set_id;
uint8_t pps_seq_parameter_set_id;
uint8_t pps_pic_parameter_set_id;
uint8_t NumDeltaPocsOfRefRpsIdx;
int32_t PicOrderCntVal;
uint16_t NumBitsForSTRefPicSetInSlice;
uint16_t reserved;
uint8_t RefPicSetStCurrBefore[STD_VIDEO_DECODE_H265_REF_PIC_SET_LIST_SIZE];
uint8_t RefPicSetStCurrAfter[STD_VIDEO_DECODE_H265_REF_PIC_SET_LIST_SIZE];
uint8_t RefPicSetLtCurr[STD_VIDEO_DECODE_H265_REF_PIC_SET_LIST_SIZE];
} StdVideoDecodeH265PictureInfo;
typedef struct StdVideoDecodeH265ReferenceInfoFlags {
uint32_t used_for_long_term_reference : 1;
uint32_t unused_for_reference : 1;
} StdVideoDecodeH265ReferenceInfoFlags;
typedef struct StdVideoDecodeH265ReferenceInfo {
StdVideoDecodeH265ReferenceInfoFlags flags;
int32_t PicOrderCntVal;
} StdVideoDecodeH265ReferenceInfo;
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,146 @@
#ifndef VULKAN_VIDEO_CODEC_H265STD_ENCODE_H_
#define VULKAN_VIDEO_CODEC_H265STD_ENCODE_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define vulkan_video_codec_h265std_encode 1
// Vulkan 0.9 provisional Vulkan video H.265 encode std specification version number
#define VK_STD_VULKAN_VIDEO_CODEC_H265_ENCODE_API_VERSION_0_9_9 VK_MAKE_VIDEO_STD_VERSION(0, 9, 9) // Patch version should always be set to 0
#define VK_STD_VULKAN_VIDEO_CODEC_H265_ENCODE_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_H265_ENCODE_API_VERSION_0_9_9
#define VK_STD_VULKAN_VIDEO_CODEC_H265_ENCODE_EXTENSION_NAME "VK_STD_vulkan_video_codec_h265_encode"
typedef struct StdVideoEncodeH265WeightTableFlags {
uint16_t luma_weight_l0_flag;
uint16_t chroma_weight_l0_flag;
uint16_t luma_weight_l1_flag;
uint16_t chroma_weight_l1_flag;
} StdVideoEncodeH265WeightTableFlags;
typedef struct StdVideoEncodeH265WeightTable {
StdVideoEncodeH265WeightTableFlags flags;
uint8_t luma_log2_weight_denom;
int8_t delta_chroma_log2_weight_denom;
int8_t delta_luma_weight_l0[STD_VIDEO_H265_MAX_NUM_LIST_REF];
int8_t luma_offset_l0[STD_VIDEO_H265_MAX_NUM_LIST_REF];
int8_t delta_chroma_weight_l0[STD_VIDEO_H265_MAX_NUM_LIST_REF][STD_VIDEO_H265_MAX_CHROMA_PLANES];
int8_t delta_chroma_offset_l0[STD_VIDEO_H265_MAX_NUM_LIST_REF][STD_VIDEO_H265_MAX_CHROMA_PLANES];
int8_t delta_luma_weight_l1[STD_VIDEO_H265_MAX_NUM_LIST_REF];
int8_t luma_offset_l1[STD_VIDEO_H265_MAX_NUM_LIST_REF];
int8_t delta_chroma_weight_l1[STD_VIDEO_H265_MAX_NUM_LIST_REF][STD_VIDEO_H265_MAX_CHROMA_PLANES];
int8_t delta_chroma_offset_l1[STD_VIDEO_H265_MAX_NUM_LIST_REF][STD_VIDEO_H265_MAX_CHROMA_PLANES];
} StdVideoEncodeH265WeightTable;
typedef struct StdVideoEncodeH265SliceSegmentHeaderFlags {
uint32_t first_slice_segment_in_pic_flag : 1;
uint32_t no_output_of_prior_pics_flag : 1;
uint32_t dependent_slice_segment_flag : 1;
uint32_t pic_output_flag : 1;
uint32_t short_term_ref_pic_set_sps_flag : 1;
uint32_t slice_temporal_mvp_enable_flag : 1;
uint32_t slice_sao_luma_flag : 1;
uint32_t slice_sao_chroma_flag : 1;
uint32_t num_ref_idx_active_override_flag : 1;
uint32_t mvd_l1_zero_flag : 1;
uint32_t cabac_init_flag : 1;
uint32_t cu_chroma_qp_offset_enabled_flag : 1;
uint32_t deblocking_filter_override_flag : 1;
uint32_t slice_deblocking_filter_disabled_flag : 1;
uint32_t collocated_from_l0_flag : 1;
uint32_t slice_loop_filter_across_slices_enabled_flag : 1;
} StdVideoEncodeH265SliceSegmentHeaderFlags;
typedef struct StdVideoEncodeH265SliceSegmentLongTermRefPics {
uint8_t num_long_term_sps;
uint8_t num_long_term_pics;
uint8_t lt_idx_sps[STD_VIDEO_H265_MAX_LONG_TERM_REF_PICS_SPS];
uint8_t poc_lsb_lt[STD_VIDEO_H265_MAX_LONG_TERM_PICS];
uint16_t used_by_curr_pic_lt_flag;
uint8_t delta_poc_msb_present_flag[STD_VIDEO_H265_MAX_DELTA_POC];
uint8_t delta_poc_msb_cycle_lt[STD_VIDEO_H265_MAX_DELTA_POC];
} StdVideoEncodeH265SliceSegmentLongTermRefPics;
typedef struct StdVideoEncodeH265SliceSegmentHeader {
StdVideoEncodeH265SliceSegmentHeaderFlags flags;
StdVideoH265SliceType slice_type;
uint32_t slice_segment_address;
uint8_t short_term_ref_pic_set_idx;
uint8_t collocated_ref_idx;
uint8_t num_ref_idx_l0_active_minus1;
uint8_t num_ref_idx_l1_active_minus1;
uint8_t MaxNumMergeCand;
int8_t slice_cb_qp_offset;
int8_t slice_cr_qp_offset;
int8_t slice_beta_offset_div2;
int8_t slice_tc_offset_div2;
int8_t slice_act_y_qp_offset;
int8_t slice_act_cb_qp_offset;
int8_t slice_act_cr_qp_offset;
const StdVideoH265ShortTermRefPicSet* pShortTermRefPicSet;
const StdVideoEncodeH265SliceSegmentLongTermRefPics* pLongTermRefPics;
const StdVideoEncodeH265WeightTable* pWeightTable;
} StdVideoEncodeH265SliceSegmentHeader;
typedef struct StdVideoEncodeH265ReferenceModificationFlags {
uint32_t ref_pic_list_modification_flag_l0 : 1;
uint32_t ref_pic_list_modification_flag_l1 : 1;
} StdVideoEncodeH265ReferenceModificationFlags;
typedef struct StdVideoEncodeH265ReferenceModifications {
StdVideoEncodeH265ReferenceModificationFlags flags;
uint8_t referenceList0ModificationsCount;
const uint8_t* pReferenceList0Modifications;
uint8_t referenceList1ModificationsCount;
const uint8_t* pReferenceList1Modifications;
} StdVideoEncodeH265ReferenceModifications;
typedef struct StdVideoEncodeH265PictureInfoFlags {
uint32_t is_reference_flag : 1;
uint32_t IrapPicFlag : 1;
uint32_t long_term_flag : 1;
uint32_t discardable_flag : 1;
uint32_t cross_layer_bla_flag : 1;
} StdVideoEncodeH265PictureInfoFlags;
typedef struct StdVideoEncodeH265PictureInfo {
StdVideoEncodeH265PictureInfoFlags flags;
StdVideoH265PictureType PictureType;
uint8_t sps_video_parameter_set_id;
uint8_t pps_seq_parameter_set_id;
uint8_t pps_pic_parameter_set_id;
int32_t PicOrderCntVal;
uint8_t TemporalId;
} StdVideoEncodeH265PictureInfo;
typedef struct StdVideoEncodeH265ReferenceInfoFlags {
uint32_t used_for_long_term_reference : 1;
uint32_t unused_for_reference : 1;
} StdVideoEncodeH265ReferenceInfoFlags;
typedef struct StdVideoEncodeH265ReferenceInfo {
StdVideoEncodeH265ReferenceInfoFlags flags;
int32_t PicOrderCntVal;
uint8_t TemporalId;
} StdVideoEncodeH265ReferenceInfo;
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,31 @@
#ifndef VULKAN_VIDEO_CODECS_COMMON_H_
#define VULKAN_VIDEO_CODECS_COMMON_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define vulkan_video_codecs_common 1
#define VK_MAKE_VIDEO_STD_VERSION(major, minor, patch) \
((((uint32_t)(major)) << 22) | (((uint32_t)(minor)) << 12) | ((uint32_t)(patch)))
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,245 @@
//
// File: vk_icd.h
//
/*
* Copyright (c) 2015-2016 The Khronos Group Inc.
* Copyright (c) 2015-2016 Valve Corporation
* Copyright (c) 2015-2016 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef VKICD_H
#define VKICD_H
#include "vulkan.h"
#include <stdbool.h>
// Loader-ICD version negotiation API. Versions add the following features:
// Version 0 - Initial. Doesn't support vk_icdGetInstanceProcAddr
// or vk_icdNegotiateLoaderICDInterfaceVersion.
// Version 1 - Add support for vk_icdGetInstanceProcAddr.
// Version 2 - Add Loader/ICD Interface version negotiation
// via vk_icdNegotiateLoaderICDInterfaceVersion.
// Version 3 - Add ICD creation/destruction of KHR_surface objects.
// Version 4 - Add unknown physical device extension querying via
// vk_icdGetPhysicalDeviceProcAddr.
// Version 5 - Tells ICDs that the loader is now paying attention to the
// application version of Vulkan passed into the ApplicationInfo
// structure during vkCreateInstance. This will tell the ICD
// that if the loader is older, it should automatically fail a
// call for any API version > 1.0. Otherwise, the loader will
// manually determine if it can support the expected version.
// Version 6 - Add support for vk_icdEnumerateAdapterPhysicalDevices.
#define CURRENT_LOADER_ICD_INTERFACE_VERSION 6
#define MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION 0
#define MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION 4
// Old typedefs that don't follow a proper naming convention but are preserved for compatibility
typedef VkResult(VKAPI_PTR *PFN_vkNegotiateLoaderICDInterfaceVersion)(uint32_t *pVersion);
// This is defined in vk_layer.h which will be found by the loader, but if an ICD is building against this
// file directly, it won't be found.
#ifndef PFN_GetPhysicalDeviceProcAddr
typedef PFN_vkVoidFunction(VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char *pName);
#endif
// Typedefs for loader/ICD interface
typedef VkResult (VKAPI_PTR *PFN_vk_icdNegotiateLoaderICDInterfaceVersion)(uint32_t* pVersion);
typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vk_icdGetInstanceProcAddr)(VkInstance instance, const char* pName);
typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vk_icdGetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName);
#if defined(VK_USE_PLATFORM_WIN32_KHR)
typedef VkResult (VKAPI_PTR *PFN_vk_icdEnumerateAdapterPhysicalDevices)(VkInstance instance, LUID adapterLUID,
uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices);
#endif
// Prototypes for loader/ICD interface
#if !defined(VK_NO_PROTOTYPES)
#ifdef __cplusplus
extern "C" {
#endif
VKAPI_ATTR VkResult VKAPI_CALL vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pVersion);
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(VkInstance instance, const char* pName);
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(VkInstance isntance, const char* pName);
#if defined(VK_USE_PLATFORM_WIN32_KHR)
VKAPI_ATTR VkResult VKAPI_CALL vk_icdEnumerateAdapterPhysicalDevices(VkInstance instance, LUID adapterLUID,
uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices);
#endif
#ifdef __cplusplus
}
#endif
#endif
/*
* The ICD must reserve space for a pointer for the loader's dispatch
* table, at the start of <each object>.
* The ICD must initialize this variable using the SET_LOADER_MAGIC_VALUE macro.
*/
#define ICD_LOADER_MAGIC 0x01CDC0DE
typedef union {
uintptr_t loaderMagic;
void *loaderData;
} VK_LOADER_DATA;
static inline void set_loader_magic_value(void *pNewObject) {
VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject;
loader_info->loaderMagic = ICD_LOADER_MAGIC;
}
static inline bool valid_loader_magic_value(void *pNewObject) {
const VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject;
return (loader_info->loaderMagic & 0xffffffff) == ICD_LOADER_MAGIC;
}
/*
* Windows and Linux ICDs will treat VkSurfaceKHR as a pointer to a struct that
* contains the platform-specific connection and surface information.
*/
typedef enum {
VK_ICD_WSI_PLATFORM_MIR,
VK_ICD_WSI_PLATFORM_WAYLAND,
VK_ICD_WSI_PLATFORM_WIN32,
VK_ICD_WSI_PLATFORM_XCB,
VK_ICD_WSI_PLATFORM_XLIB,
VK_ICD_WSI_PLATFORM_ANDROID,
VK_ICD_WSI_PLATFORM_MACOS,
VK_ICD_WSI_PLATFORM_IOS,
VK_ICD_WSI_PLATFORM_DISPLAY,
VK_ICD_WSI_PLATFORM_HEADLESS,
VK_ICD_WSI_PLATFORM_METAL,
VK_ICD_WSI_PLATFORM_DIRECTFB,
VK_ICD_WSI_PLATFORM_VI,
VK_ICD_WSI_PLATFORM_GGP,
VK_ICD_WSI_PLATFORM_SCREEN,
} VkIcdWsiPlatform;
typedef struct {
VkIcdWsiPlatform platform;
} VkIcdSurfaceBase;
#ifdef VK_USE_PLATFORM_MIR_KHR
typedef struct {
VkIcdSurfaceBase base;
MirConnection *connection;
MirSurface *mirSurface;
} VkIcdSurfaceMir;
#endif // VK_USE_PLATFORM_MIR_KHR
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
typedef struct {
VkIcdSurfaceBase base;
struct wl_display *display;
struct wl_surface *surface;
} VkIcdSurfaceWayland;
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
typedef struct {
VkIcdSurfaceBase base;
HINSTANCE hinstance;
HWND hwnd;
} VkIcdSurfaceWin32;
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
typedef struct {
VkIcdSurfaceBase base;
xcb_connection_t *connection;
xcb_window_t window;
} VkIcdSurfaceXcb;
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
typedef struct {
VkIcdSurfaceBase base;
Display *dpy;
Window window;
} VkIcdSurfaceXlib;
#endif // VK_USE_PLATFORM_XLIB_KHR
#ifdef VK_USE_PLATFORM_DIRECTFB_EXT
typedef struct {
VkIcdSurfaceBase base;
IDirectFB *dfb;
IDirectFBSurface *surface;
} VkIcdSurfaceDirectFB;
#endif // VK_USE_PLATFORM_DIRECTFB_EXT
#ifdef VK_USE_PLATFORM_ANDROID_KHR
typedef struct {
VkIcdSurfaceBase base;
struct ANativeWindow *window;
} VkIcdSurfaceAndroid;
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_MACOS_MVK
typedef struct {
VkIcdSurfaceBase base;
const void *pView;
} VkIcdSurfaceMacOS;
#endif // VK_USE_PLATFORM_MACOS_MVK
#ifdef VK_USE_PLATFORM_IOS_MVK
typedef struct {
VkIcdSurfaceBase base;
const void *pView;
} VkIcdSurfaceIOS;
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_GGP
typedef struct {
VkIcdSurfaceBase base;
GgpStreamDescriptor streamDescriptor;
} VkIcdSurfaceGgp;
#endif // VK_USE_PLATFORM_GGP
typedef struct {
VkIcdSurfaceBase base;
VkDisplayModeKHR displayMode;
uint32_t planeIndex;
uint32_t planeStackIndex;
VkSurfaceTransformFlagBitsKHR transform;
float globalAlpha;
VkDisplayPlaneAlphaFlagBitsKHR alphaMode;
VkExtent2D imageExtent;
} VkIcdSurfaceDisplay;
typedef struct {
VkIcdSurfaceBase base;
} VkIcdSurfaceHeadless;
#ifdef VK_USE_PLATFORM_METAL_EXT
typedef struct {
VkIcdSurfaceBase base;
const CAMetalLayer *pLayer;
} VkIcdSurfaceMetal;
#endif // VK_USE_PLATFORM_METAL_EXT
#ifdef VK_USE_PLATFORM_VI_NN
typedef struct {
VkIcdSurfaceBase base;
void *window;
} VkIcdSurfaceVi;
#endif // VK_USE_PLATFORM_VI_NN
#ifdef VK_USE_PLATFORM_SCREEN_QNX
typedef struct {
VkIcdSurfaceBase base;
struct _screen_context *context;
struct _screen_window *window;
} VkIcdSurfaceScreen;
#endif // VK_USE_PLATFORM_SCREEN_QNX
#endif // VKICD_H

View File

@@ -0,0 +1,210 @@
//
// File: vk_layer.h
//
/*
* Copyright (c) 2015-2017 The Khronos Group Inc.
* Copyright (c) 2015-2017 Valve Corporation
* Copyright (c) 2015-2017 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Need to define dispatch table
* Core struct can then have ptr to dispatch table at the top
* Along with object ptrs for current and next OBJ
*/
#pragma once
#include "vulkan.h"
#if defined(__GNUC__) && __GNUC__ >= 4
#define VK_LAYER_EXPORT __attribute__((visibility("default")))
#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)
#define VK_LAYER_EXPORT __attribute__((visibility("default")))
#else
#define VK_LAYER_EXPORT
#endif
#define MAX_NUM_UNKNOWN_EXTS 250
// Loader-Layer version negotiation API. Versions add the following features:
// Versions 0/1 - Initial. Doesn't support vk_layerGetPhysicalDeviceProcAddr
// or vk_icdNegotiateLoaderLayerInterfaceVersion.
// Version 2 - Add support for vk_layerGetPhysicalDeviceProcAddr and
// vk_icdNegotiateLoaderLayerInterfaceVersion.
#define CURRENT_LOADER_LAYER_INTERFACE_VERSION 2
#define MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION 1
#define VK_CURRENT_CHAIN_VERSION 1
// Typedef for use in the interfaces below
typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName);
// Version negotiation values
typedef enum VkNegotiateLayerStructType {
LAYER_NEGOTIATE_UNINTIALIZED = 0,
LAYER_NEGOTIATE_INTERFACE_STRUCT = 1,
} VkNegotiateLayerStructType;
// Version negotiation structures
typedef struct VkNegotiateLayerInterface {
VkNegotiateLayerStructType sType;
void *pNext;
uint32_t loaderLayerInterfaceVersion;
PFN_vkGetInstanceProcAddr pfnGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr pfnGetDeviceProcAddr;
PFN_GetPhysicalDeviceProcAddr pfnGetPhysicalDeviceProcAddr;
} VkNegotiateLayerInterface;
// Version negotiation functions
typedef VkResult (VKAPI_PTR *PFN_vkNegotiateLoaderLayerInterfaceVersion)(VkNegotiateLayerInterface *pVersionStruct);
// Function prototype for unknown physical device extension command
typedef VkResult(VKAPI_PTR *PFN_PhysDevExt)(VkPhysicalDevice phys_device);
// ------------------------------------------------------------------------------------------------
// CreateInstance and CreateDevice support structures
/* Sub type of structure for instance and device loader ext of CreateInfo.
* When sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO
* or sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO
* then VkLayerFunction indicates struct type pointed to by pNext
*/
typedef enum VkLayerFunction_ {
VK_LAYER_LINK_INFO = 0,
VK_LOADER_DATA_CALLBACK = 1,
VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK = 2,
VK_LOADER_FEATURES = 3,
} VkLayerFunction;
typedef struct VkLayerInstanceLink_ {
struct VkLayerInstanceLink_ *pNext;
PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr;
PFN_GetPhysicalDeviceProcAddr pfnNextGetPhysicalDeviceProcAddr;
} VkLayerInstanceLink;
/*
* When creating the device chain the loader needs to pass
* down information about it's device structure needed at
* the end of the chain. Passing the data via the
* VkLayerDeviceInfo avoids issues with finding the
* exact instance being used.
*/
typedef struct VkLayerDeviceInfo_ {
void *device_info;
PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr;
} VkLayerDeviceInfo;
typedef VkResult (VKAPI_PTR *PFN_vkSetInstanceLoaderData)(VkInstance instance,
void *object);
typedef VkResult (VKAPI_PTR *PFN_vkSetDeviceLoaderData)(VkDevice device,
void *object);
typedef VkResult (VKAPI_PTR *PFN_vkLayerCreateDevice)(VkInstance instance, VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA);
typedef void (VKAPI_PTR *PFN_vkLayerDestroyDevice)(VkDevice physicalDevice, const VkAllocationCallbacks *pAllocator, PFN_vkDestroyDevice destroyFunction);
typedef enum VkLoaderFeastureFlagBits {
VK_LOADER_FEATURE_PHYSICAL_DEVICE_SORTING = 0x00000001,
} VkLoaderFlagBits;
typedef VkFlags VkLoaderFeatureFlags;
typedef struct {
VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO
const void *pNext;
VkLayerFunction function;
union {
VkLayerInstanceLink *pLayerInfo;
PFN_vkSetInstanceLoaderData pfnSetInstanceLoaderData;
struct {
PFN_vkLayerCreateDevice pfnLayerCreateDevice;
PFN_vkLayerDestroyDevice pfnLayerDestroyDevice;
} layerDevice;
VkLoaderFeatureFlags loaderFeatures;
} u;
} VkLayerInstanceCreateInfo;
typedef struct VkLayerDeviceLink_ {
struct VkLayerDeviceLink_ *pNext;
PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr pfnNextGetDeviceProcAddr;
} VkLayerDeviceLink;
typedef struct {
VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO
const void *pNext;
VkLayerFunction function;
union {
VkLayerDeviceLink *pLayerInfo;
PFN_vkSetDeviceLoaderData pfnSetDeviceLoaderData;
} u;
} VkLayerDeviceCreateInfo;
#ifdef __cplusplus
extern "C" {
#endif
VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct);
typedef enum VkChainType {
VK_CHAIN_TYPE_UNKNOWN = 0,
VK_CHAIN_TYPE_ENUMERATE_INSTANCE_EXTENSION_PROPERTIES = 1,
VK_CHAIN_TYPE_ENUMERATE_INSTANCE_LAYER_PROPERTIES = 2,
VK_CHAIN_TYPE_ENUMERATE_INSTANCE_VERSION = 3,
} VkChainType;
typedef struct VkChainHeader {
VkChainType type;
uint32_t version;
uint32_t size;
} VkChainHeader;
typedef struct VkEnumerateInstanceExtensionPropertiesChain {
VkChainHeader header;
VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceExtensionPropertiesChain *, const char *, uint32_t *,
VkExtensionProperties *);
const struct VkEnumerateInstanceExtensionPropertiesChain *pNextLink;
#if defined(__cplusplus)
inline VkResult CallDown(const char *pLayerName, uint32_t *pPropertyCount, VkExtensionProperties *pProperties) const {
return pfnNextLayer(pNextLink, pLayerName, pPropertyCount, pProperties);
}
#endif
} VkEnumerateInstanceExtensionPropertiesChain;
typedef struct VkEnumerateInstanceLayerPropertiesChain {
VkChainHeader header;
VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceLayerPropertiesChain *, uint32_t *, VkLayerProperties *);
const struct VkEnumerateInstanceLayerPropertiesChain *pNextLink;
#if defined(__cplusplus)
inline VkResult CallDown(uint32_t *pPropertyCount, VkLayerProperties *pProperties) const {
return pfnNextLayer(pNextLink, pPropertyCount, pProperties);
}
#endif
} VkEnumerateInstanceLayerPropertiesChain;
typedef struct VkEnumerateInstanceVersionChain {
VkChainHeader header;
VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceVersionChain *, uint32_t *);
const struct VkEnumerateInstanceVersionChain *pNextLink;
#if defined(__cplusplus)
inline VkResult CallDown(uint32_t *pApiVersion) const {
return pfnNextLayer(pNextLink, pApiVersion);
}
#endif
} VkEnumerateInstanceVersionChain;
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,84 @@
//
// File: vk_platform.h
//
/*
** Copyright 2014-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
#ifndef VK_PLATFORM_H_
#define VK_PLATFORM_H_
#ifdef __cplusplus
extern "C"
{
#endif // __cplusplus
/*
***************************************************************************************************
* Platform-specific directives and type declarations
***************************************************************************************************
*/
/* Platform-specific calling convention macros.
*
* Platforms should define these so that Vulkan clients call Vulkan commands
* with the same calling conventions that the Vulkan implementation expects.
*
* VKAPI_ATTR - Placed before the return type in function declarations.
* Useful for C++11 and GCC/Clang-style function attribute syntax.
* VKAPI_CALL - Placed after the return type in function declarations.
* Useful for MSVC-style calling convention syntax.
* VKAPI_PTR - Placed between the '(' and '*' in function pointer types.
*
* Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void);
* Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void);
*/
#if defined(_WIN32)
// On Windows, Vulkan commands use the stdcall convention
#define VKAPI_ATTR
#define VKAPI_CALL __stdcall
#define VKAPI_PTR VKAPI_CALL
#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7
#error "Vulkan is not supported for the 'armeabi' NDK ABI"
#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE)
// On Android 32-bit ARM targets, Vulkan functions use the "hardfloat"
// calling convention, i.e. float parameters are passed in registers. This
// is true even if the rest of the application passes floats on the stack,
// as it does by default when compiling for the armeabi-v7a NDK ABI.
#define VKAPI_ATTR __attribute__((pcs("aapcs-vfp")))
#define VKAPI_CALL
#define VKAPI_PTR VKAPI_ATTR
#else
// On other platforms, use the default calling convention
#define VKAPI_ATTR
#define VKAPI_CALL
#define VKAPI_PTR
#endif
#if !defined(VK_NO_STDDEF_H)
#include <stddef.h>
#endif // !defined(VK_NO_STDDEF_H)
#if !defined(VK_NO_STDINT_H)
#if defined(_MSC_VER) && (_MSC_VER < 1600)
typedef signed __int8 int8_t;
typedef unsigned __int8 uint8_t;
typedef signed __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef signed __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
#else
#include <stdint.h>
#endif
#endif // !defined(VK_NO_STDINT_H)
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
#endif

View File

@@ -0,0 +1,69 @@
//
// File: vk_sdk_platform.h
//
/*
* Copyright (c) 2015-2016 The Khronos Group Inc.
* Copyright (c) 2015-2016 Valve Corporation
* Copyright (c) 2015-2016 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef VK_SDK_PLATFORM_H
#define VK_SDK_PLATFORM_H
#if defined(_WIN32)
#define NOMINMAX
#ifndef __cplusplus
#undef inline
#define inline __inline
#endif // __cplusplus
#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/)
// C99:
// Microsoft didn't implement C99 in Visual Studio; but started adding it with
// VS2013. However, VS2013 still didn't have snprintf(). The following is a
// work-around (Note: The _CRT_SECURE_NO_WARNINGS macro must be set in the
// "CMakeLists.txt" file).
// NOTE: This is fixed in Visual Studio 2015.
#define snprintf _snprintf
#endif
#define strdup _strdup
#endif // _WIN32
// Check for noexcept support using clang, with fallback to Windows or GCC version numbers
#ifndef NOEXCEPT
#if defined(__clang__)
#if __has_feature(cxx_noexcept)
#define HAS_NOEXCEPT
#endif
#else
#if defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC__ * 10 + __GNUC_MINOR__ >= 46
#define HAS_NOEXCEPT
#else
#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023026 && defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS
#define HAS_NOEXCEPT
#endif
#endif
#endif
#ifdef HAS_NOEXCEPT
#define NOEXCEPT noexcept
#else
#define NOEXCEPT
#endif
#endif
#endif // VK_SDK_PLATFORM_H

View File

@@ -0,0 +1,91 @@
#ifndef VULKAN_H_
#define VULKAN_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
#include "vk_platform.h"
#include "vulkan_core.h"
#ifdef VK_USE_PLATFORM_ANDROID_KHR
#include "vulkan_android.h"
#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
#include <zircon/types.h>
#include "vulkan_fuchsia.h"
#endif
#ifdef VK_USE_PLATFORM_IOS_MVK
#include "vulkan_ios.h"
#endif
#ifdef VK_USE_PLATFORM_MACOS_MVK
#include "vulkan_macos.h"
#endif
#ifdef VK_USE_PLATFORM_METAL_EXT
#include "vulkan_metal.h"
#endif
#ifdef VK_USE_PLATFORM_VI_NN
#include "vulkan_vi.h"
#endif
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
#include "vulkan_wayland.h"
#endif
#ifdef VK_USE_PLATFORM_WIN32_KHR
#include <windows.h>
#include "vulkan_win32.h"
#endif
#ifdef VK_USE_PLATFORM_XCB_KHR
#include <xcb/xcb.h>
#include "vulkan_xcb.h"
#endif
#ifdef VK_USE_PLATFORM_XLIB_KHR
#include <X11/Xlib.h>
#include "vulkan_xlib.h"
#endif
#ifdef VK_USE_PLATFORM_DIRECTFB_EXT
#include <directfb.h>
#include "vulkan_directfb.h"
#endif
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
#include <X11/Xlib.h>
#include <X11/extensions/Xrandr.h>
#include "vulkan_xlib_xrandr.h"
#endif
#ifdef VK_USE_PLATFORM_GGP
#include <ggp_c/vulkan_types.h>
#include "vulkan_ggp.h"
#endif
#ifdef VK_USE_PLATFORM_SCREEN_QNX
#include <screen/screen.h>
#include "vulkan_screen.h"
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
#include "vulkan_beta.h"
#endif
#endif // VULKAN_H_

View File

@@ -0,0 +1,125 @@
#ifndef VULKAN_ANDROID_H_
#define VULKAN_ANDROID_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_KHR_android_surface 1
struct ANativeWindow;
#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6
#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface"
typedef VkFlags VkAndroidSurfaceCreateFlagsKHR;
typedef struct VkAndroidSurfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkAndroidSurfaceCreateFlagsKHR flags;
struct ANativeWindow* window;
} VkAndroidSurfaceCreateInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
VkInstance instance,
const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#define VK_ANDROID_external_memory_android_hardware_buffer 1
struct AHardwareBuffer;
#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION 5
#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME "VK_ANDROID_external_memory_android_hardware_buffer"
typedef struct VkAndroidHardwareBufferUsageANDROID {
VkStructureType sType;
void* pNext;
uint64_t androidHardwareBufferUsage;
} VkAndroidHardwareBufferUsageANDROID;
typedef struct VkAndroidHardwareBufferPropertiesANDROID {
VkStructureType sType;
void* pNext;
VkDeviceSize allocationSize;
uint32_t memoryTypeBits;
} VkAndroidHardwareBufferPropertiesANDROID;
typedef struct VkAndroidHardwareBufferFormatPropertiesANDROID {
VkStructureType sType;
void* pNext;
VkFormat format;
uint64_t externalFormat;
VkFormatFeatureFlags formatFeatures;
VkComponentMapping samplerYcbcrConversionComponents;
VkSamplerYcbcrModelConversion suggestedYcbcrModel;
VkSamplerYcbcrRange suggestedYcbcrRange;
VkChromaLocation suggestedXChromaOffset;
VkChromaLocation suggestedYChromaOffset;
} VkAndroidHardwareBufferFormatPropertiesANDROID;
typedef struct VkImportAndroidHardwareBufferInfoANDROID {
VkStructureType sType;
const void* pNext;
struct AHardwareBuffer* buffer;
} VkImportAndroidHardwareBufferInfoANDROID;
typedef struct VkMemoryGetAndroidHardwareBufferInfoANDROID {
VkStructureType sType;
const void* pNext;
VkDeviceMemory memory;
} VkMemoryGetAndroidHardwareBufferInfoANDROID;
typedef struct VkExternalFormatANDROID {
VkStructureType sType;
void* pNext;
uint64_t externalFormat;
} VkExternalFormatANDROID;
typedef struct VkAndroidHardwareBufferFormatProperties2ANDROID {
VkStructureType sType;
void* pNext;
VkFormat format;
uint64_t externalFormat;
VkFormatFeatureFlags2 formatFeatures;
VkComponentMapping samplerYcbcrConversionComponents;
VkSamplerYcbcrModelConversion suggestedYcbcrModel;
VkSamplerYcbcrRange suggestedYcbcrRange;
VkChromaLocation suggestedXChromaOffset;
VkChromaLocation suggestedYChromaOffset;
} VkAndroidHardwareBufferFormatProperties2ANDROID;
typedef VkResult (VKAPI_PTR *PFN_vkGetAndroidHardwareBufferPropertiesANDROID)(VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties);
typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryAndroidHardwareBufferANDROID)(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkGetAndroidHardwareBufferPropertiesANDROID(
VkDevice device,
const struct AHardwareBuffer* buffer,
VkAndroidHardwareBufferPropertiesANDROID* pProperties);
VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryAndroidHardwareBufferANDROID(
VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
struct AHardwareBuffer** pBuffer);
#endif
#ifdef __cplusplus
}
#endif
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
#ifndef VULKAN_DIRECTFB_H_
#define VULKAN_DIRECTFB_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_EXT_directfb_surface 1
#define VK_EXT_DIRECTFB_SURFACE_SPEC_VERSION 1
#define VK_EXT_DIRECTFB_SURFACE_EXTENSION_NAME "VK_EXT_directfb_surface"
typedef VkFlags VkDirectFBSurfaceCreateFlagsEXT;
typedef struct VkDirectFBSurfaceCreateInfoEXT {
VkStructureType sType;
const void* pNext;
VkDirectFBSurfaceCreateFlagsEXT flags;
IDirectFB* dfb;
IDirectFBSurface* surface;
} VkDirectFBSurfaceCreateInfoEXT;
typedef VkResult (VKAPI_PTR *PFN_vkCreateDirectFBSurfaceEXT)(VkInstance instance, const VkDirectFBSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, IDirectFB* dfb);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateDirectFBSurfaceEXT(
VkInstance instance,
const VkDirectFBSurfaceCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceDirectFBPresentationSupportEXT(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
IDirectFB* dfb);
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,258 @@
#ifndef VULKAN_FUCHSIA_H_
#define VULKAN_FUCHSIA_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_FUCHSIA_imagepipe_surface 1
#define VK_FUCHSIA_IMAGEPIPE_SURFACE_SPEC_VERSION 1
#define VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME "VK_FUCHSIA_imagepipe_surface"
typedef VkFlags VkImagePipeSurfaceCreateFlagsFUCHSIA;
typedef struct VkImagePipeSurfaceCreateInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
VkImagePipeSurfaceCreateFlagsFUCHSIA flags;
zx_handle_t imagePipeHandle;
} VkImagePipeSurfaceCreateInfoFUCHSIA;
typedef VkResult (VKAPI_PTR *PFN_vkCreateImagePipeSurfaceFUCHSIA)(VkInstance instance, const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateImagePipeSurfaceFUCHSIA(
VkInstance instance,
const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#define VK_FUCHSIA_external_memory 1
#define VK_FUCHSIA_EXTERNAL_MEMORY_SPEC_VERSION 1
#define VK_FUCHSIA_EXTERNAL_MEMORY_EXTENSION_NAME "VK_FUCHSIA_external_memory"
typedef struct VkImportMemoryZirconHandleInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
VkExternalMemoryHandleTypeFlagBits handleType;
zx_handle_t handle;
} VkImportMemoryZirconHandleInfoFUCHSIA;
typedef struct VkMemoryZirconHandlePropertiesFUCHSIA {
VkStructureType sType;
void* pNext;
uint32_t memoryTypeBits;
} VkMemoryZirconHandlePropertiesFUCHSIA;
typedef struct VkMemoryGetZirconHandleInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
VkDeviceMemory memory;
VkExternalMemoryHandleTypeFlagBits handleType;
} VkMemoryGetZirconHandleInfoFUCHSIA;
typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryZirconHandleFUCHSIA)(VkDevice device, const VkMemoryGetZirconHandleInfoFUCHSIA* pGetZirconHandleInfo, zx_handle_t* pZirconHandle);
typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, zx_handle_t zirconHandle, VkMemoryZirconHandlePropertiesFUCHSIA* pMemoryZirconHandleProperties);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryZirconHandleFUCHSIA(
VkDevice device,
const VkMemoryGetZirconHandleInfoFUCHSIA* pGetZirconHandleInfo,
zx_handle_t* pZirconHandle);
VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryZirconHandlePropertiesFUCHSIA(
VkDevice device,
VkExternalMemoryHandleTypeFlagBits handleType,
zx_handle_t zirconHandle,
VkMemoryZirconHandlePropertiesFUCHSIA* pMemoryZirconHandleProperties);
#endif
#define VK_FUCHSIA_external_semaphore 1
#define VK_FUCHSIA_EXTERNAL_SEMAPHORE_SPEC_VERSION 1
#define VK_FUCHSIA_EXTERNAL_SEMAPHORE_EXTENSION_NAME "VK_FUCHSIA_external_semaphore"
typedef struct VkImportSemaphoreZirconHandleInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
VkSemaphore semaphore;
VkSemaphoreImportFlags flags;
VkExternalSemaphoreHandleTypeFlagBits handleType;
zx_handle_t zirconHandle;
} VkImportSemaphoreZirconHandleInfoFUCHSIA;
typedef struct VkSemaphoreGetZirconHandleInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
VkSemaphore semaphore;
VkExternalSemaphoreHandleTypeFlagBits handleType;
} VkSemaphoreGetZirconHandleInfoFUCHSIA;
typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreZirconHandleFUCHSIA)(VkDevice device, const VkImportSemaphoreZirconHandleInfoFUCHSIA* pImportSemaphoreZirconHandleInfo);
typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreZirconHandleFUCHSIA)(VkDevice device, const VkSemaphoreGetZirconHandleInfoFUCHSIA* pGetZirconHandleInfo, zx_handle_t* pZirconHandle);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreZirconHandleFUCHSIA(
VkDevice device,
const VkImportSemaphoreZirconHandleInfoFUCHSIA* pImportSemaphoreZirconHandleInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreZirconHandleFUCHSIA(
VkDevice device,
const VkSemaphoreGetZirconHandleInfoFUCHSIA* pGetZirconHandleInfo,
zx_handle_t* pZirconHandle);
#endif
#define VK_FUCHSIA_buffer_collection 1
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferCollectionFUCHSIA)
#define VK_FUCHSIA_BUFFER_COLLECTION_SPEC_VERSION 2
#define VK_FUCHSIA_BUFFER_COLLECTION_EXTENSION_NAME "VK_FUCHSIA_buffer_collection"
typedef VkFlags VkImageFormatConstraintsFlagsFUCHSIA;
typedef enum VkImageConstraintsInfoFlagBitsFUCHSIA {
VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA = 0x00000001,
VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA = 0x00000002,
VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA = 0x00000004,
VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA = 0x00000008,
VK_IMAGE_CONSTRAINTS_INFO_PROTECTED_OPTIONAL_FUCHSIA = 0x00000010,
VK_IMAGE_CONSTRAINTS_INFO_FLAG_BITS_MAX_ENUM_FUCHSIA = 0x7FFFFFFF
} VkImageConstraintsInfoFlagBitsFUCHSIA;
typedef VkFlags VkImageConstraintsInfoFlagsFUCHSIA;
typedef struct VkBufferCollectionCreateInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
zx_handle_t collectionToken;
} VkBufferCollectionCreateInfoFUCHSIA;
typedef struct VkImportMemoryBufferCollectionFUCHSIA {
VkStructureType sType;
const void* pNext;
VkBufferCollectionFUCHSIA collection;
uint32_t index;
} VkImportMemoryBufferCollectionFUCHSIA;
typedef struct VkBufferCollectionImageCreateInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
VkBufferCollectionFUCHSIA collection;
uint32_t index;
} VkBufferCollectionImageCreateInfoFUCHSIA;
typedef struct VkBufferCollectionConstraintsInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
uint32_t minBufferCount;
uint32_t maxBufferCount;
uint32_t minBufferCountForCamping;
uint32_t minBufferCountForDedicatedSlack;
uint32_t minBufferCountForSharedSlack;
} VkBufferCollectionConstraintsInfoFUCHSIA;
typedef struct VkBufferConstraintsInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
VkBufferCreateInfo createInfo;
VkFormatFeatureFlags requiredFormatFeatures;
VkBufferCollectionConstraintsInfoFUCHSIA bufferCollectionConstraints;
} VkBufferConstraintsInfoFUCHSIA;
typedef struct VkBufferCollectionBufferCreateInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
VkBufferCollectionFUCHSIA collection;
uint32_t index;
} VkBufferCollectionBufferCreateInfoFUCHSIA;
typedef struct VkSysmemColorSpaceFUCHSIA {
VkStructureType sType;
const void* pNext;
uint32_t colorSpace;
} VkSysmemColorSpaceFUCHSIA;
typedef struct VkBufferCollectionPropertiesFUCHSIA {
VkStructureType sType;
void* pNext;
uint32_t memoryTypeBits;
uint32_t bufferCount;
uint32_t createInfoIndex;
uint64_t sysmemPixelFormat;
VkFormatFeatureFlags formatFeatures;
VkSysmemColorSpaceFUCHSIA sysmemColorSpaceIndex;
VkComponentMapping samplerYcbcrConversionComponents;
VkSamplerYcbcrModelConversion suggestedYcbcrModel;
VkSamplerYcbcrRange suggestedYcbcrRange;
VkChromaLocation suggestedXChromaOffset;
VkChromaLocation suggestedYChromaOffset;
} VkBufferCollectionPropertiesFUCHSIA;
typedef struct VkImageFormatConstraintsInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
VkImageCreateInfo imageCreateInfo;
VkFormatFeatureFlags requiredFormatFeatures;
VkImageFormatConstraintsFlagsFUCHSIA flags;
uint64_t sysmemPixelFormat;
uint32_t colorSpaceCount;
const VkSysmemColorSpaceFUCHSIA* pColorSpaces;
} VkImageFormatConstraintsInfoFUCHSIA;
typedef struct VkImageConstraintsInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
uint32_t formatConstraintsCount;
const VkImageFormatConstraintsInfoFUCHSIA* pFormatConstraints;
VkBufferCollectionConstraintsInfoFUCHSIA bufferCollectionConstraints;
VkImageConstraintsInfoFlagsFUCHSIA flags;
} VkImageConstraintsInfoFUCHSIA;
typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferCollectionFUCHSIA)(VkDevice device, const VkBufferCollectionCreateInfoFUCHSIA* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferCollectionFUCHSIA* pCollection);
typedef VkResult (VKAPI_PTR *PFN_vkSetBufferCollectionImageConstraintsFUCHSIA)(VkDevice device, VkBufferCollectionFUCHSIA collection, const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo);
typedef VkResult (VKAPI_PTR *PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA)(VkDevice device, VkBufferCollectionFUCHSIA collection, const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo);
typedef void (VKAPI_PTR *PFN_vkDestroyBufferCollectionFUCHSIA)(VkDevice device, VkBufferCollectionFUCHSIA collection, const VkAllocationCallbacks* pAllocator);
typedef VkResult (VKAPI_PTR *PFN_vkGetBufferCollectionPropertiesFUCHSIA)(VkDevice device, VkBufferCollectionFUCHSIA collection, VkBufferCollectionPropertiesFUCHSIA* pProperties);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferCollectionFUCHSIA(
VkDevice device,
const VkBufferCollectionCreateInfoFUCHSIA* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkBufferCollectionFUCHSIA* pCollection);
VKAPI_ATTR VkResult VKAPI_CALL vkSetBufferCollectionImageConstraintsFUCHSIA(
VkDevice device,
VkBufferCollectionFUCHSIA collection,
const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkSetBufferCollectionBufferConstraintsFUCHSIA(
VkDevice device,
VkBufferCollectionFUCHSIA collection,
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo);
VKAPI_ATTR void VKAPI_CALL vkDestroyBufferCollectionFUCHSIA(
VkDevice device,
VkBufferCollectionFUCHSIA collection,
const VkAllocationCallbacks* pAllocator);
VKAPI_ATTR VkResult VKAPI_CALL vkGetBufferCollectionPropertiesFUCHSIA(
VkDevice device,
VkBufferCollectionFUCHSIA collection,
VkBufferCollectionPropertiesFUCHSIA* pProperties);
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,58 @@
#ifndef VULKAN_GGP_H_
#define VULKAN_GGP_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_GGP_stream_descriptor_surface 1
#define VK_GGP_STREAM_DESCRIPTOR_SURFACE_SPEC_VERSION 1
#define VK_GGP_STREAM_DESCRIPTOR_SURFACE_EXTENSION_NAME "VK_GGP_stream_descriptor_surface"
typedef VkFlags VkStreamDescriptorSurfaceCreateFlagsGGP;
typedef struct VkStreamDescriptorSurfaceCreateInfoGGP {
VkStructureType sType;
const void* pNext;
VkStreamDescriptorSurfaceCreateFlagsGGP flags;
GgpStreamDescriptor streamDescriptor;
} VkStreamDescriptorSurfaceCreateInfoGGP;
typedef VkResult (VKAPI_PTR *PFN_vkCreateStreamDescriptorSurfaceGGP)(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateStreamDescriptorSurfaceGGP(
VkInstance instance,
const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#define VK_GGP_frame_token 1
#define VK_GGP_FRAME_TOKEN_SPEC_VERSION 1
#define VK_GGP_FRAME_TOKEN_EXTENSION_NAME "VK_GGP_frame_token"
typedef struct VkPresentFrameTokenGGP {
VkStructureType sType;
const void* pNext;
GgpFrameToken frameToken;
} VkPresentFrameTokenGGP;
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,47 @@
#ifndef VULKAN_IOS_H_
#define VULKAN_IOS_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_MVK_ios_surface 1
#define VK_MVK_IOS_SURFACE_SPEC_VERSION 3
#define VK_MVK_IOS_SURFACE_EXTENSION_NAME "VK_MVK_ios_surface"
typedef VkFlags VkIOSSurfaceCreateFlagsMVK;
typedef struct VkIOSSurfaceCreateInfoMVK {
VkStructureType sType;
const void* pNext;
VkIOSSurfaceCreateFlagsMVK flags;
const void* pView;
} VkIOSSurfaceCreateInfoMVK;
typedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK(
VkInstance instance,
const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,47 @@
#ifndef VULKAN_MACOS_H_
#define VULKAN_MACOS_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_MVK_macos_surface 1
#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 3
#define VK_MVK_MACOS_SURFACE_EXTENSION_NAME "VK_MVK_macos_surface"
typedef VkFlags VkMacOSSurfaceCreateFlagsMVK;
typedef struct VkMacOSSurfaceCreateInfoMVK {
VkStructureType sType;
const void* pNext;
VkMacOSSurfaceCreateFlagsMVK flags;
const void* pView;
} VkMacOSSurfaceCreateInfoMVK;
typedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK(
VkInstance instance,
const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,193 @@
#ifndef VULKAN_METAL_H_
#define VULKAN_METAL_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_EXT_metal_surface 1
#ifdef __OBJC__
@class CAMetalLayer;
#else
typedef void CAMetalLayer;
#endif
#define VK_EXT_METAL_SURFACE_SPEC_VERSION 1
#define VK_EXT_METAL_SURFACE_EXTENSION_NAME "VK_EXT_metal_surface"
typedef VkFlags VkMetalSurfaceCreateFlagsEXT;
typedef struct VkMetalSurfaceCreateInfoEXT {
VkStructureType sType;
const void* pNext;
VkMetalSurfaceCreateFlagsEXT flags;
const CAMetalLayer* pLayer;
} VkMetalSurfaceCreateInfoEXT;
typedef VkResult (VKAPI_PTR *PFN_vkCreateMetalSurfaceEXT)(VkInstance instance, const VkMetalSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT(
VkInstance instance,
const VkMetalSurfaceCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#define VK_EXT_metal_objects 1
#ifdef __OBJC__
@protocol MTLDevice;
typedef id<MTLDevice> MTLDevice_id;
#else
typedef void* MTLDevice_id;
#endif
#ifdef __OBJC__
@protocol MTLCommandQueue;
typedef id<MTLCommandQueue> MTLCommandQueue_id;
#else
typedef void* MTLCommandQueue_id;
#endif
#ifdef __OBJC__
@protocol MTLBuffer;
typedef id<MTLBuffer> MTLBuffer_id;
#else
typedef void* MTLBuffer_id;
#endif
#ifdef __OBJC__
@protocol MTLTexture;
typedef id<MTLTexture> MTLTexture_id;
#else
typedef void* MTLTexture_id;
#endif
typedef struct __IOSurface* IOSurfaceRef;
#ifdef __OBJC__
@protocol MTLSharedEvent;
typedef id<MTLSharedEvent> MTLSharedEvent_id;
#else
typedef void* MTLSharedEvent_id;
#endif
#define VK_EXT_METAL_OBJECTS_SPEC_VERSION 1
#define VK_EXT_METAL_OBJECTS_EXTENSION_NAME "VK_EXT_metal_objects"
typedef enum VkExportMetalObjectTypeFlagBitsEXT {
VK_EXPORT_METAL_OBJECT_TYPE_METAL_DEVICE_BIT_EXT = 0x00000001,
VK_EXPORT_METAL_OBJECT_TYPE_METAL_COMMAND_QUEUE_BIT_EXT = 0x00000002,
VK_EXPORT_METAL_OBJECT_TYPE_METAL_BUFFER_BIT_EXT = 0x00000004,
VK_EXPORT_METAL_OBJECT_TYPE_METAL_TEXTURE_BIT_EXT = 0x00000008,
VK_EXPORT_METAL_OBJECT_TYPE_METAL_IOSURFACE_BIT_EXT = 0x00000010,
VK_EXPORT_METAL_OBJECT_TYPE_METAL_SHARED_EVENT_BIT_EXT = 0x00000020,
VK_EXPORT_METAL_OBJECT_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
} VkExportMetalObjectTypeFlagBitsEXT;
typedef VkFlags VkExportMetalObjectTypeFlagsEXT;
typedef struct VkExportMetalObjectCreateInfoEXT {
VkStructureType sType;
const void* pNext;
VkExportMetalObjectTypeFlagBitsEXT exportObjectType;
} VkExportMetalObjectCreateInfoEXT;
typedef struct VkExportMetalObjectsInfoEXT {
VkStructureType sType;
const void* pNext;
} VkExportMetalObjectsInfoEXT;
typedef struct VkExportMetalDeviceInfoEXT {
VkStructureType sType;
const void* pNext;
MTLDevice_id mtlDevice;
} VkExportMetalDeviceInfoEXT;
typedef struct VkExportMetalCommandQueueInfoEXT {
VkStructureType sType;
const void* pNext;
VkQueue queue;
MTLCommandQueue_id mtlCommandQueue;
} VkExportMetalCommandQueueInfoEXT;
typedef struct VkExportMetalBufferInfoEXT {
VkStructureType sType;
const void* pNext;
VkDeviceMemory memory;
MTLBuffer_id mtlBuffer;
} VkExportMetalBufferInfoEXT;
typedef struct VkImportMetalBufferInfoEXT {
VkStructureType sType;
const void* pNext;
MTLBuffer_id mtlBuffer;
} VkImportMetalBufferInfoEXT;
typedef struct VkExportMetalTextureInfoEXT {
VkStructureType sType;
const void* pNext;
VkImage image;
VkImageView imageView;
VkBufferView bufferView;
VkImageAspectFlagBits plane;
MTLTexture_id mtlTexture;
} VkExportMetalTextureInfoEXT;
typedef struct VkImportMetalTextureInfoEXT {
VkStructureType sType;
const void* pNext;
VkImageAspectFlagBits plane;
MTLTexture_id mtlTexture;
} VkImportMetalTextureInfoEXT;
typedef struct VkExportMetalIOSurfaceInfoEXT {
VkStructureType sType;
const void* pNext;
VkImage image;
IOSurfaceRef ioSurface;
} VkExportMetalIOSurfaceInfoEXT;
typedef struct VkImportMetalIOSurfaceInfoEXT {
VkStructureType sType;
const void* pNext;
IOSurfaceRef ioSurface;
} VkImportMetalIOSurfaceInfoEXT;
typedef struct VkExportMetalSharedEventInfoEXT {
VkStructureType sType;
const void* pNext;
VkSemaphore semaphore;
VkEvent event;
MTLSharedEvent_id mtlSharedEvent;
} VkExportMetalSharedEventInfoEXT;
typedef struct VkImportMetalSharedEventInfoEXT {
VkStructureType sType;
const void* pNext;
MTLSharedEvent_id mtlSharedEvent;
} VkImportMetalSharedEventInfoEXT;
typedef void (VKAPI_PTR *PFN_vkExportMetalObjectsEXT)(VkDevice device, VkExportMetalObjectsInfoEXT* pMetalObjectsInfo);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR void VKAPI_CALL vkExportMetalObjectsEXT(
VkDevice device,
VkExportMetalObjectsInfoEXT* pMetalObjectsInfo);
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,54 @@
#ifndef VULKAN_SCREEN_H_
#define VULKAN_SCREEN_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_QNX_screen_surface 1
#define VK_QNX_SCREEN_SURFACE_SPEC_VERSION 1
#define VK_QNX_SCREEN_SURFACE_EXTENSION_NAME "VK_QNX_screen_surface"
typedef VkFlags VkScreenSurfaceCreateFlagsQNX;
typedef struct VkScreenSurfaceCreateInfoQNX {
VkStructureType sType;
const void* pNext;
VkScreenSurfaceCreateFlagsQNX flags;
struct _screen_context* context;
struct _screen_window* window;
} VkScreenSurfaceCreateInfoQNX;
typedef VkResult (VKAPI_PTR *PFN_vkCreateScreenSurfaceQNX)(VkInstance instance, const VkScreenSurfaceCreateInfoQNX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct _screen_window* window);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateScreenSurfaceQNX(
VkInstance instance,
const VkScreenSurfaceCreateInfoQNX* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceScreenPresentationSupportQNX(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct _screen_window* window);
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,47 @@
#ifndef VULKAN_VI_H_
#define VULKAN_VI_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_NN_vi_surface 1
#define VK_NN_VI_SURFACE_SPEC_VERSION 1
#define VK_NN_VI_SURFACE_EXTENSION_NAME "VK_NN_vi_surface"
typedef VkFlags VkViSurfaceCreateFlagsNN;
typedef struct VkViSurfaceCreateInfoNN {
VkStructureType sType;
const void* pNext;
VkViSurfaceCreateFlagsNN flags;
void* window;
} VkViSurfaceCreateInfoNN;
typedef VkResult (VKAPI_PTR *PFN_vkCreateViSurfaceNN)(VkInstance instance, const VkViSurfaceCreateInfoNN* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateViSurfaceNN(
VkInstance instance,
const VkViSurfaceCreateInfoNN* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,54 @@
#ifndef VULKAN_WAYLAND_H_
#define VULKAN_WAYLAND_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_KHR_wayland_surface 1
#define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 6
#define VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME "VK_KHR_wayland_surface"
typedef VkFlags VkWaylandSurfaceCreateFlagsKHR;
typedef struct VkWaylandSurfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkWaylandSurfaceCreateFlagsKHR flags;
struct wl_display* display;
struct wl_surface* surface;
} VkWaylandSurfaceCreateInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateWaylandSurfaceKHR)(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR(
VkInstance instance,
const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display* display);
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,315 @@
#ifndef VULKAN_WIN32_H_
#define VULKAN_WIN32_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_KHR_win32_surface 1
#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6
#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface"
typedef VkFlags VkWin32SurfaceCreateFlagsKHR;
typedef struct VkWin32SurfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkWin32SurfaceCreateFlagsKHR flags;
HINSTANCE hinstance;
HWND hwnd;
} VkWin32SurfaceCreateInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(
VkInstance instance,
const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex);
#endif
#define VK_KHR_external_memory_win32 1
#define VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1
#define VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_KHR_external_memory_win32"
typedef struct VkImportMemoryWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
VkExternalMemoryHandleTypeFlagBits handleType;
HANDLE handle;
LPCWSTR name;
} VkImportMemoryWin32HandleInfoKHR;
typedef struct VkExportMemoryWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
const SECURITY_ATTRIBUTES* pAttributes;
DWORD dwAccess;
LPCWSTR name;
} VkExportMemoryWin32HandleInfoKHR;
typedef struct VkMemoryWin32HandlePropertiesKHR {
VkStructureType sType;
void* pNext;
uint32_t memoryTypeBits;
} VkMemoryWin32HandlePropertiesKHR;
typedef struct VkMemoryGetWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
VkDeviceMemory memory;
VkExternalMemoryHandleTypeFlagBits handleType;
} VkMemoryGetWin32HandleInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHR)(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle);
typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHR(
VkDevice device,
const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo,
HANDLE* pHandle);
VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHR(
VkDevice device,
VkExternalMemoryHandleTypeFlagBits handleType,
HANDLE handle,
VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties);
#endif
#define VK_KHR_win32_keyed_mutex 1
#define VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION 1
#define VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_KHR_win32_keyed_mutex"
typedef struct VkWin32KeyedMutexAcquireReleaseInfoKHR {
VkStructureType sType;
const void* pNext;
uint32_t acquireCount;
const VkDeviceMemory* pAcquireSyncs;
const uint64_t* pAcquireKeys;
const uint32_t* pAcquireTimeouts;
uint32_t releaseCount;
const VkDeviceMemory* pReleaseSyncs;
const uint64_t* pReleaseKeys;
} VkWin32KeyedMutexAcquireReleaseInfoKHR;
#define VK_KHR_external_semaphore_win32 1
#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1
#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME "VK_KHR_external_semaphore_win32"
typedef struct VkImportSemaphoreWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
VkSemaphore semaphore;
VkSemaphoreImportFlags flags;
VkExternalSemaphoreHandleTypeFlagBits handleType;
HANDLE handle;
LPCWSTR name;
} VkImportSemaphoreWin32HandleInfoKHR;
typedef struct VkExportSemaphoreWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
const SECURITY_ATTRIBUTES* pAttributes;
DWORD dwAccess;
LPCWSTR name;
} VkExportSemaphoreWin32HandleInfoKHR;
typedef struct VkD3D12FenceSubmitInfoKHR {
VkStructureType sType;
const void* pNext;
uint32_t waitSemaphoreValuesCount;
const uint64_t* pWaitSemaphoreValues;
uint32_t signalSemaphoreValuesCount;
const uint64_t* pSignalSemaphoreValues;
} VkD3D12FenceSubmitInfoKHR;
typedef struct VkSemaphoreGetWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
VkSemaphore semaphore;
VkExternalSemaphoreHandleTypeFlagBits handleType;
} VkSemaphoreGetWin32HandleInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHR)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo);
typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHR)(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHR(
VkDevice device,
const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHR(
VkDevice device,
const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
HANDLE* pHandle);
#endif
#define VK_KHR_external_fence_win32 1
#define VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION 1
#define VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME "VK_KHR_external_fence_win32"
typedef struct VkImportFenceWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
VkFence fence;
VkFenceImportFlags flags;
VkExternalFenceHandleTypeFlagBits handleType;
HANDLE handle;
LPCWSTR name;
} VkImportFenceWin32HandleInfoKHR;
typedef struct VkExportFenceWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
const SECURITY_ATTRIBUTES* pAttributes;
DWORD dwAccess;
LPCWSTR name;
} VkExportFenceWin32HandleInfoKHR;
typedef struct VkFenceGetWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
VkFence fence;
VkExternalFenceHandleTypeFlagBits handleType;
} VkFenceGetWin32HandleInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkImportFenceWin32HandleKHR)(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo);
typedef VkResult (VKAPI_PTR *PFN_vkGetFenceWin32HandleKHR)(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceWin32HandleKHR(
VkDevice device,
const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceWin32HandleKHR(
VkDevice device,
const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo,
HANDLE* pHandle);
#endif
#define VK_NV_external_memory_win32 1
#define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1
#define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_NV_external_memory_win32"
typedef struct VkImportMemoryWin32HandleInfoNV {
VkStructureType sType;
const void* pNext;
VkExternalMemoryHandleTypeFlagsNV handleType;
HANDLE handle;
} VkImportMemoryWin32HandleInfoNV;
typedef struct VkExportMemoryWin32HandleInfoNV {
VkStructureType sType;
const void* pNext;
const SECURITY_ATTRIBUTES* pAttributes;
DWORD dwAccess;
} VkExportMemoryWin32HandleInfoNV;
typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV(
VkDevice device,
VkDeviceMemory memory,
VkExternalMemoryHandleTypeFlagsNV handleType,
HANDLE* pHandle);
#endif
#define VK_NV_win32_keyed_mutex 1
#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 2
#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex"
typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV {
VkStructureType sType;
const void* pNext;
uint32_t acquireCount;
const VkDeviceMemory* pAcquireSyncs;
const uint64_t* pAcquireKeys;
const uint32_t* pAcquireTimeoutMilliseconds;
uint32_t releaseCount;
const VkDeviceMemory* pReleaseSyncs;
const uint64_t* pReleaseKeys;
} VkWin32KeyedMutexAcquireReleaseInfoNV;
#define VK_EXT_full_screen_exclusive 1
#define VK_EXT_FULL_SCREEN_EXCLUSIVE_SPEC_VERSION 4
#define VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME "VK_EXT_full_screen_exclusive"
typedef enum VkFullScreenExclusiveEXT {
VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT = 0,
VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT = 1,
VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT = 2,
VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT = 3,
VK_FULL_SCREEN_EXCLUSIVE_MAX_ENUM_EXT = 0x7FFFFFFF
} VkFullScreenExclusiveEXT;
typedef struct VkSurfaceFullScreenExclusiveInfoEXT {
VkStructureType sType;
void* pNext;
VkFullScreenExclusiveEXT fullScreenExclusive;
} VkSurfaceFullScreenExclusiveInfoEXT;
typedef struct VkSurfaceCapabilitiesFullScreenExclusiveEXT {
VkStructureType sType;
void* pNext;
VkBool32 fullScreenExclusiveSupported;
} VkSurfaceCapabilitiesFullScreenExclusiveEXT;
typedef struct VkSurfaceFullScreenExclusiveWin32InfoEXT {
VkStructureType sType;
const void* pNext;
HMONITOR hmonitor;
} VkSurfaceFullScreenExclusiveWin32InfoEXT;
typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes);
typedef VkResult (VKAPI_PTR *PFN_vkAcquireFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain);
typedef VkResult (VKAPI_PTR *PFN_vkReleaseFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain);
typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModes2EXT)(VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR* pModes);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModes2EXT(
VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
uint32_t* pPresentModeCount,
VkPresentModeKHR* pPresentModes);
VKAPI_ATTR VkResult VKAPI_CALL vkAcquireFullScreenExclusiveModeEXT(
VkDevice device,
VkSwapchainKHR swapchain);
VKAPI_ATTR VkResult VKAPI_CALL vkReleaseFullScreenExclusiveModeEXT(
VkDevice device,
VkSwapchainKHR swapchain);
VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModes2EXT(
VkDevice device,
const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
VkDeviceGroupPresentModeFlagsKHR* pModes);
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,55 @@
#ifndef VULKAN_XCB_H_
#define VULKAN_XCB_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_KHR_xcb_surface 1
#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6
#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface"
typedef VkFlags VkXcbSurfaceCreateFlagsKHR;
typedef struct VkXcbSurfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkXcbSurfaceCreateFlagsKHR flags;
xcb_connection_t* connection;
xcb_window_t window;
} VkXcbSurfaceCreateInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(
VkInstance instance,
const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
xcb_connection_t* connection,
xcb_visualid_t visual_id);
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,55 @@
#ifndef VULKAN_XLIB_H_
#define VULKAN_XLIB_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_KHR_xlib_surface 1
#define VK_KHR_XLIB_SURFACE_SPEC_VERSION 6
#define VK_KHR_XLIB_SURFACE_EXTENSION_NAME "VK_KHR_xlib_surface"
typedef VkFlags VkXlibSurfaceCreateFlagsKHR;
typedef struct VkXlibSurfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkXlibSurfaceCreateFlagsKHR flags;
Display* dpy;
Window window;
} VkXlibSurfaceCreateInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateXlibSurfaceKHR)(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(
VkInstance instance,
const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
Display* dpy,
VisualID visualID);
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,45 @@
#ifndef VULKAN_XLIB_XRANDR_H_
#define VULKAN_XLIB_XRANDR_H_ 1
/*
** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_EXT_acquire_xlib_display 1
#define VK_EXT_ACQUIRE_XLIB_DISPLAY_SPEC_VERSION 1
#define VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME "VK_EXT_acquire_xlib_display"
typedef VkResult (VKAPI_PTR *PFN_vkAcquireXlibDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display);
typedef VkResult (VKAPI_PTR *PFN_vkGetRandROutputDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, VkDisplayKHR* pDisplay);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkAcquireXlibDisplayEXT(
VkPhysicalDevice physicalDevice,
Display* dpy,
VkDisplayKHR display);
VKAPI_ATTR VkResult VKAPI_CALL vkGetRandROutputDisplayEXT(
VkPhysicalDevice physicalDevice,
Display* dpy,
RROutput rrOutput,
VkDisplayKHR* pDisplay);
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -31,36 +31,40 @@ namespace thermion::windows::vulkan {
class TVulkanPlatform : public filament::backend::VulkanPlatform {
public:
TVulkanPlatform() {
_customization.gpu.index = 0;
}
virtual VulkanPlatform::Customization getCustomization() const noexcept override {
return _customization;
}
SwapChainPtr createSwapChain(void* nativeWindow, uint64_t flags,
VkExtent2D extent = {0, 0}) override {
std::lock_guard lock(mutex);
_current = filament::backend::VulkanPlatform::createSwapChain(nativeWindow, flags, extent);
current = filament::backend::VulkanPlatform::createSwapChain(nativeWindow, flags, extent);
std::cout << "Created swap chain with flags " << flags << std::endl;
return _current;
return current;
}
void destroy(SwapChainPtr handle) override {
std::lock_guard lock(mutex);
_current = nullptr;
current = nullptr;
std::cout << "Destroyed swap chain" << std::endl;
}
// VkResult acquire(SwapChainPtr handle, VkSemaphore clientSignal, uint32_t* index) override {
// auto result = filament::backend::VulkanPlatform::acquire(handle, clientSignal, index);
// _currentColorIndex = *index;
// return result;
// }
VkResult present(SwapChainPtr handle, uint32_t index, VkSemaphore finishedDrawing) override {
auto result = filament::backend::VulkanPlatform::present(handle, index, finishedDrawing);
_currentColorIndex = index;
currentColorIndex = index;
return result;
}
SwapChainPtr current;
std::mutex mutex;
uint32_t currentColorIndex = 0;
SwapChainPtr _current;
std::mutex mutex;
uint32_t _currentColorIndex = 0;
private:
filament::backend::VulkanPlatform::Customization _customization;
};

View File

@@ -121,9 +121,32 @@ namespace thermion::windows::d3d
}
_d3dTexture2D->AddRef();
Flush();
std::cout << "Created external D3D texture " << width << "x" << height << std::endl;
return std::make_unique<D3DTexture>(_d3dTexture2D, _d3dTexture2DHandle, width, height);
auto texture = std::make_unique<D3DTexture>(_d3dTexture2D, _d3dTexture2DHandle, width, height);
ID3D11RenderTargetView* rtv = nullptr;
D3D11_RENDER_TARGET_VIEW_DESC rtvDesc = {};
rtvDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
rtvDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
rtvDesc.Texture2D.MipSlice = 0;
hr = _D3D11Device->CreateRenderTargetView(_d3dTexture2D.Get(), &rtvDesc, &rtv);
if (FAILED(hr)) {
std::cout << "Failed to create render target view" << std::endl;
// return;
}
std::cout << "Created render target view" << std::endl;
// Clear the texture to blue
float blueColor[4] = { 1.0f, 0.0f, 1.0f, 1.0f }; // RGBA
_D3D11DeviceContext->ClearRenderTargetView(rtv, blueColor);
std::cout << "FLUSH RENDER TARGET" << std::endl;
Flush();
return texture;
}
void D3DContext::Flush()

View File

@@ -138,12 +138,12 @@ void ThermionVulkanContext::Flush() {
// Function to perform the blit operation
void ThermionVulkanContext::BlitFromSwapchain() {
std::lock_guard lock(_platform->mutex);
if(!_platform->_current || _d3dTextures.size() == 0) {
if(!_platform->current || _d3dTextures.size() == 0) {
return;
}
auto&& vkTexture = _vulkanTextures.back();
auto image = vkTexture->GetImage();
@@ -152,8 +152,8 @@ void ThermionVulkanContext::BlitFromSwapchain() {
auto height = texture->GetHeight();
auto width = texture->GetWidth();
auto bundle = _platform->getSwapChainBundle(_platform->_current);
VkImage swapchainImage = bundle.colors[_platform->_currentColorIndex];
auto bundle = _platform->getSwapChainBundle(_platform->current);
VkImage swapchainImage = bundle.colors[_platform->currentColorIndex];
// Command buffer allocation
VkCommandBufferAllocateInfo cmdBufInfo{};
cmdBufInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;

View File

@@ -1,850 +0,0 @@
#ifndef __wglext_h_
#define __wglext_h_ 1
#ifdef __cplusplus
extern "C" {
#endif
/*
** Copyright (c) 2013-2017 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a
** copy of this software and/or associated documentation files (the
** "Materials"), to deal in the Materials without restriction, including
** without limitation the rights to use, copy, modify, merge, publish,
** distribute, sublicense, and/or sell copies of the Materials, and to
** permit persons to whom the Materials are furnished to do so, subject to
** the following conditions:
**
** The above copyright notice and this permission notice shall be included
** in all copies or substantial portions of the Materials.
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
*/
/*
** This header is generated from the Khronos OpenGL / OpenGL ES XML
** API Registry. The current version of the Registry, generator scripts
** used to make the header, and the header can be found at
** https://github.com/KhronosGroup/OpenGL-Registry
*/
#if defined(_WIN32) && !defined(APIENTRY) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__)
#define WIN32_LEAN_AND_MEAN 1
#include <windows.h>
#endif
#define WGL_WGLEXT_VERSION 20171125
/* Generated C header for:
* API: wgl
* Versions considered: .*
* Versions emitted: _nomatch_^
* Default extensions included: wgl
* Additional extensions included: _nomatch_^
* Extensions removed: _nomatch_^
*/
#ifndef WGL_ARB_buffer_region
#define WGL_ARB_buffer_region 1
#define WGL_FRONT_COLOR_BUFFER_BIT_ARB 0x00000001
#define WGL_BACK_COLOR_BUFFER_BIT_ARB 0x00000002
#define WGL_DEPTH_BUFFER_BIT_ARB 0x00000004
#define WGL_STENCIL_BUFFER_BIT_ARB 0x00000008
typedef HANDLE (WINAPI * PFNWGLCREATEBUFFERREGIONARBPROC) (HDC hDC, int iLayerPlane, UINT uType);
typedef VOID (WINAPI * PFNWGLDELETEBUFFERREGIONARBPROC) (HANDLE hRegion);
typedef BOOL (WINAPI * PFNWGLSAVEBUFFERREGIONARBPROC) (HANDLE hRegion, int x, int y, int width, int height);
typedef BOOL (WINAPI * PFNWGLRESTOREBUFFERREGIONARBPROC) (HANDLE hRegion, int x, int y, int width, int height, int xSrc, int ySrc);
#ifdef WGL_WGLEXT_PROTOTYPES
HANDLE WINAPI wglCreateBufferRegionARB (HDC hDC, int iLayerPlane, UINT uType);
VOID WINAPI wglDeleteBufferRegionARB (HANDLE hRegion);
BOOL WINAPI wglSaveBufferRegionARB (HANDLE hRegion, int x, int y, int width, int height);
BOOL WINAPI wglRestoreBufferRegionARB (HANDLE hRegion, int x, int y, int width, int height, int xSrc, int ySrc);
#endif
#endif /* WGL_ARB_buffer_region */
#ifndef WGL_ARB_context_flush_control
#define WGL_ARB_context_flush_control 1
#define WGL_CONTEXT_RELEASE_BEHAVIOR_ARB 0x2097
#define WGL_CONTEXT_RELEASE_BEHAVIOR_NONE_ARB 0
#define WGL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_ARB 0x2098
#endif /* WGL_ARB_context_flush_control */
#ifndef WGL_ARB_create_context
#define WGL_ARB_create_context 1
#define WGL_CONTEXT_DEBUG_BIT_ARB 0x00000001
#define WGL_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB 0x00000002
#define WGL_CONTEXT_MAJOR_VERSION_ARB 0x2091
#define WGL_CONTEXT_MINOR_VERSION_ARB 0x2092
#define WGL_CONTEXT_LAYER_PLANE_ARB 0x2093
#define WGL_CONTEXT_FLAGS_ARB 0x2094
#define ERROR_INVALID_VERSION_ARB 0x2095
typedef HGLRC (WINAPI * PFNWGLCREATECONTEXTATTRIBSARBPROC) (HDC hDC, HGLRC hShareContext, const int *attribList);
#ifdef WGL_WGLEXT_PROTOTYPES
HGLRC WINAPI wglCreateContextAttribsARB (HDC hDC, HGLRC hShareContext, const int *attribList);
#endif
#endif /* WGL_ARB_create_context */
#ifndef WGL_ARB_create_context_no_error
#define WGL_ARB_create_context_no_error 1
#define WGL_CONTEXT_OPENGL_NO_ERROR_ARB 0x31B3
#endif /* WGL_ARB_create_context_no_error */
#ifndef WGL_ARB_create_context_profile
#define WGL_ARB_create_context_profile 1
#define WGL_CONTEXT_PROFILE_MASK_ARB 0x9126
#define WGL_CONTEXT_CORE_PROFILE_BIT_ARB 0x00000001
#define WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB 0x00000002
#define ERROR_INVALID_PROFILE_ARB 0x2096
#endif /* WGL_ARB_create_context_profile */
#ifndef WGL_ARB_create_context_robustness
#define WGL_ARB_create_context_robustness 1
#define WGL_CONTEXT_ROBUST_ACCESS_BIT_ARB 0x00000004
#define WGL_LOSE_CONTEXT_ON_RESET_ARB 0x8252
#define WGL_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB 0x8256
#define WGL_NO_RESET_NOTIFICATION_ARB 0x8261
#endif /* WGL_ARB_create_context_robustness */
#ifndef WGL_ARB_extensions_string
#define WGL_ARB_extensions_string 1
typedef const char *(WINAPI * PFNWGLGETEXTENSIONSSTRINGARBPROC) (HDC hdc);
#ifdef WGL_WGLEXT_PROTOTYPES
const char *WINAPI wglGetExtensionsStringARB (HDC hdc);
#endif
#endif /* WGL_ARB_extensions_string */
#ifndef WGL_ARB_framebuffer_sRGB
#define WGL_ARB_framebuffer_sRGB 1
#define WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB 0x20A9
#endif /* WGL_ARB_framebuffer_sRGB */
#ifndef WGL_ARB_make_current_read
#define WGL_ARB_make_current_read 1
#define ERROR_INVALID_PIXEL_TYPE_ARB 0x2043
#define ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB 0x2054
typedef BOOL (WINAPI * PFNWGLMAKECONTEXTCURRENTARBPROC) (HDC hDrawDC, HDC hReadDC, HGLRC hglrc);
typedef HDC (WINAPI * PFNWGLGETCURRENTREADDCARBPROC) (void);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglMakeContextCurrentARB (HDC hDrawDC, HDC hReadDC, HGLRC hglrc);
HDC WINAPI wglGetCurrentReadDCARB (void);
#endif
#endif /* WGL_ARB_make_current_read */
#ifndef WGL_ARB_multisample
#define WGL_ARB_multisample 1
#define WGL_SAMPLE_BUFFERS_ARB 0x2041
#define WGL_SAMPLES_ARB 0x2042
#endif /* WGL_ARB_multisample */
#ifndef WGL_ARB_pbuffer
#define WGL_ARB_pbuffer 1
DECLARE_HANDLE(HPBUFFERARB);
#define WGL_DRAW_TO_PBUFFER_ARB 0x202D
#define WGL_MAX_PBUFFER_PIXELS_ARB 0x202E
#define WGL_MAX_PBUFFER_WIDTH_ARB 0x202F
#define WGL_MAX_PBUFFER_HEIGHT_ARB 0x2030
#define WGL_PBUFFER_LARGEST_ARB 0x2033
#define WGL_PBUFFER_WIDTH_ARB 0x2034
#define WGL_PBUFFER_HEIGHT_ARB 0x2035
#define WGL_PBUFFER_LOST_ARB 0x2036
typedef HPBUFFERARB (WINAPI * PFNWGLCREATEPBUFFERARBPROC) (HDC hDC, int iPixelFormat, int iWidth, int iHeight, const int *piAttribList);
typedef HDC (WINAPI * PFNWGLGETPBUFFERDCARBPROC) (HPBUFFERARB hPbuffer);
typedef int (WINAPI * PFNWGLRELEASEPBUFFERDCARBPROC) (HPBUFFERARB hPbuffer, HDC hDC);
typedef BOOL (WINAPI * PFNWGLDESTROYPBUFFERARBPROC) (HPBUFFERARB hPbuffer);
typedef BOOL (WINAPI * PFNWGLQUERYPBUFFERARBPROC) (HPBUFFERARB hPbuffer, int iAttribute, int *piValue);
#ifdef WGL_WGLEXT_PROTOTYPES
HPBUFFERARB WINAPI wglCreatePbufferARB (HDC hDC, int iPixelFormat, int iWidth, int iHeight, const int *piAttribList);
HDC WINAPI wglGetPbufferDCARB (HPBUFFERARB hPbuffer);
int WINAPI wglReleasePbufferDCARB (HPBUFFERARB hPbuffer, HDC hDC);
BOOL WINAPI wglDestroyPbufferARB (HPBUFFERARB hPbuffer);
BOOL WINAPI wglQueryPbufferARB (HPBUFFERARB hPbuffer, int iAttribute, int *piValue);
#endif
#endif /* WGL_ARB_pbuffer */
#ifndef WGL_ARB_pixel_format
#define WGL_ARB_pixel_format 1
#define WGL_NUMBER_PIXEL_FORMATS_ARB 0x2000
#define WGL_DRAW_TO_WINDOW_ARB 0x2001
#define WGL_DRAW_TO_BITMAP_ARB 0x2002
#define WGL_ACCELERATION_ARB 0x2003
#define WGL_NEED_PALETTE_ARB 0x2004
#define WGL_NEED_SYSTEM_PALETTE_ARB 0x2005
#define WGL_SWAP_LAYER_BUFFERS_ARB 0x2006
#define WGL_SWAP_METHOD_ARB 0x2007
#define WGL_NUMBER_OVERLAYS_ARB 0x2008
#define WGL_NUMBER_UNDERLAYS_ARB 0x2009
#define WGL_TRANSPARENT_ARB 0x200A
#define WGL_TRANSPARENT_RED_VALUE_ARB 0x2037
#define WGL_TRANSPARENT_GREEN_VALUE_ARB 0x2038
#define WGL_TRANSPARENT_BLUE_VALUE_ARB 0x2039
#define WGL_TRANSPARENT_ALPHA_VALUE_ARB 0x203A
#define WGL_TRANSPARENT_INDEX_VALUE_ARB 0x203B
#define WGL_SHARE_DEPTH_ARB 0x200C
#define WGL_SHARE_STENCIL_ARB 0x200D
#define WGL_SHARE_ACCUM_ARB 0x200E
#define WGL_SUPPORT_GDI_ARB 0x200F
#define WGL_SUPPORT_OPENGL_ARB 0x2010
#define WGL_DOUBLE_BUFFER_ARB 0x2011
#define WGL_STEREO_ARB 0x2012
#define WGL_PIXEL_TYPE_ARB 0x2013
#define WGL_COLOR_BITS_ARB 0x2014
#define WGL_RED_BITS_ARB 0x2015
#define WGL_RED_SHIFT_ARB 0x2016
#define WGL_GREEN_BITS_ARB 0x2017
#define WGL_GREEN_SHIFT_ARB 0x2018
#define WGL_BLUE_BITS_ARB 0x2019
#define WGL_BLUE_SHIFT_ARB 0x201A
#define WGL_ALPHA_BITS_ARB 0x201B
#define WGL_ALPHA_SHIFT_ARB 0x201C
#define WGL_ACCUM_BITS_ARB 0x201D
#define WGL_ACCUM_RED_BITS_ARB 0x201E
#define WGL_ACCUM_GREEN_BITS_ARB 0x201F
#define WGL_ACCUM_BLUE_BITS_ARB 0x2020
#define WGL_ACCUM_ALPHA_BITS_ARB 0x2021
#define WGL_DEPTH_BITS_ARB 0x2022
#define WGL_STENCIL_BITS_ARB 0x2023
#define WGL_AUX_BUFFERS_ARB 0x2024
#define WGL_NO_ACCELERATION_ARB 0x2025
#define WGL_GENERIC_ACCELERATION_ARB 0x2026
#define WGL_FULL_ACCELERATION_ARB 0x2027
#define WGL_SWAP_EXCHANGE_ARB 0x2028
#define WGL_SWAP_COPY_ARB 0x2029
#define WGL_SWAP_UNDEFINED_ARB 0x202A
#define WGL_TYPE_RGBA_ARB 0x202B
#define WGL_TYPE_COLORINDEX_ARB 0x202C
typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBIVARBPROC) (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, const int *piAttributes, int *piValues);
typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBFVARBPROC) (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, const int *piAttributes, FLOAT *pfValues);
typedef BOOL (WINAPI * PFNWGLCHOOSEPIXELFORMATARBPROC) (HDC hdc, const int *piAttribIList, const FLOAT *pfAttribFList, UINT nMaxFormats, int *piFormats, UINT *nNumFormats);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglGetPixelFormatAttribivARB (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, const int *piAttributes, int *piValues);
BOOL WINAPI wglGetPixelFormatAttribfvARB (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, const int *piAttributes, FLOAT *pfValues);
BOOL WINAPI wglChoosePixelFormatARB (HDC hdc, const int *piAttribIList, const FLOAT *pfAttribFList, UINT nMaxFormats, int *piFormats, UINT *nNumFormats);
#endif
#endif /* WGL_ARB_pixel_format */
#ifndef WGL_ARB_pixel_format_float
#define WGL_ARB_pixel_format_float 1
#define WGL_TYPE_RGBA_FLOAT_ARB 0x21A0
#endif /* WGL_ARB_pixel_format_float */
#ifndef WGL_ARB_render_texture
#define WGL_ARB_render_texture 1
#define WGL_BIND_TO_TEXTURE_RGB_ARB 0x2070
#define WGL_BIND_TO_TEXTURE_RGBA_ARB 0x2071
#define WGL_TEXTURE_FORMAT_ARB 0x2072
#define WGL_TEXTURE_TARGET_ARB 0x2073
#define WGL_MIPMAP_TEXTURE_ARB 0x2074
#define WGL_TEXTURE_RGB_ARB 0x2075
#define WGL_TEXTURE_RGBA_ARB 0x2076
#define WGL_NO_TEXTURE_ARB 0x2077
#define WGL_TEXTURE_CUBE_MAP_ARB 0x2078
#define WGL_TEXTURE_1D_ARB 0x2079
#define WGL_TEXTURE_2D_ARB 0x207A
#define WGL_MIPMAP_LEVEL_ARB 0x207B
#define WGL_CUBE_MAP_FACE_ARB 0x207C
#define WGL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB 0x207D
#define WGL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB 0x207E
#define WGL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB 0x207F
#define WGL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB 0x2080
#define WGL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB 0x2081
#define WGL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB 0x2082
#define WGL_FRONT_LEFT_ARB 0x2083
#define WGL_FRONT_RIGHT_ARB 0x2084
#define WGL_BACK_LEFT_ARB 0x2085
#define WGL_BACK_RIGHT_ARB 0x2086
#define WGL_AUX0_ARB 0x2087
#define WGL_AUX1_ARB 0x2088
#define WGL_AUX2_ARB 0x2089
#define WGL_AUX3_ARB 0x208A
#define WGL_AUX4_ARB 0x208B
#define WGL_AUX5_ARB 0x208C
#define WGL_AUX6_ARB 0x208D
#define WGL_AUX7_ARB 0x208E
#define WGL_AUX8_ARB 0x208F
#define WGL_AUX9_ARB 0x2090
typedef BOOL (WINAPI * PFNWGLBINDTEXIMAGEARBPROC) (HPBUFFERARB hPbuffer, int iBuffer);
typedef BOOL (WINAPI * PFNWGLRELEASETEXIMAGEARBPROC) (HPBUFFERARB hPbuffer, int iBuffer);
typedef BOOL (WINAPI * PFNWGLSETPBUFFERATTRIBARBPROC) (HPBUFFERARB hPbuffer, const int *piAttribList);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglBindTexImageARB (HPBUFFERARB hPbuffer, int iBuffer);
BOOL WINAPI wglReleaseTexImageARB (HPBUFFERARB hPbuffer, int iBuffer);
BOOL WINAPI wglSetPbufferAttribARB (HPBUFFERARB hPbuffer, const int *piAttribList);
#endif
#endif /* WGL_ARB_render_texture */
#ifndef WGL_ARB_robustness_application_isolation
#define WGL_ARB_robustness_application_isolation 1
#define WGL_CONTEXT_RESET_ISOLATION_BIT_ARB 0x00000008
#endif /* WGL_ARB_robustness_application_isolation */
#ifndef WGL_ARB_robustness_share_group_isolation
#define WGL_ARB_robustness_share_group_isolation 1
#endif /* WGL_ARB_robustness_share_group_isolation */
#ifndef WGL_3DFX_multisample
#define WGL_3DFX_multisample 1
#define WGL_SAMPLE_BUFFERS_3DFX 0x2060
#define WGL_SAMPLES_3DFX 0x2061
#endif /* WGL_3DFX_multisample */
#ifndef WGL_3DL_stereo_control
#define WGL_3DL_stereo_control 1
#define WGL_STEREO_EMITTER_ENABLE_3DL 0x2055
#define WGL_STEREO_EMITTER_DISABLE_3DL 0x2056
#define WGL_STEREO_POLARITY_NORMAL_3DL 0x2057
#define WGL_STEREO_POLARITY_INVERT_3DL 0x2058
typedef BOOL (WINAPI * PFNWGLSETSTEREOEMITTERSTATE3DLPROC) (HDC hDC, UINT uState);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglSetStereoEmitterState3DL (HDC hDC, UINT uState);
#endif
#endif /* WGL_3DL_stereo_control */
#ifndef WGL_AMD_gpu_association
#define WGL_AMD_gpu_association 1
#define WGL_GPU_VENDOR_AMD 0x1F00
#define WGL_GPU_RENDERER_STRING_AMD 0x1F01
#define WGL_GPU_OPENGL_VERSION_STRING_AMD 0x1F02
#define WGL_GPU_FASTEST_TARGET_GPUS_AMD 0x21A2
#define WGL_GPU_RAM_AMD 0x21A3
#define WGL_GPU_CLOCK_AMD 0x21A4
#define WGL_GPU_NUM_PIPES_AMD 0x21A5
#define WGL_GPU_NUM_SIMD_AMD 0x21A6
#define WGL_GPU_NUM_RB_AMD 0x21A7
#define WGL_GPU_NUM_SPI_AMD 0x21A8
typedef UINT (WINAPI * PFNWGLGETGPUIDSAMDPROC) (UINT maxCount, UINT *ids);
typedef INT (WINAPI * PFNWGLGETGPUINFOAMDPROC) (UINT id, int property, GLenum dataType, UINT size, void *data);
typedef UINT (WINAPI * PFNWGLGETCONTEXTGPUIDAMDPROC) (HGLRC hglrc);
typedef HGLRC (WINAPI * PFNWGLCREATEASSOCIATEDCONTEXTAMDPROC) (UINT id);
typedef HGLRC (WINAPI * PFNWGLCREATEASSOCIATEDCONTEXTATTRIBSAMDPROC) (UINT id, HGLRC hShareContext, const int *attribList);
typedef BOOL (WINAPI * PFNWGLDELETEASSOCIATEDCONTEXTAMDPROC) (HGLRC hglrc);
typedef BOOL (WINAPI * PFNWGLMAKEASSOCIATEDCONTEXTCURRENTAMDPROC) (HGLRC hglrc);
typedef HGLRC (WINAPI * PFNWGLGETCURRENTASSOCIATEDCONTEXTAMDPROC) (void);
typedef VOID (WINAPI * PFNWGLBLITCONTEXTFRAMEBUFFERAMDPROC) (HGLRC dstCtx, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
#ifdef WGL_WGLEXT_PROTOTYPES
UINT WINAPI wglGetGPUIDsAMD (UINT maxCount, UINT *ids);
INT WINAPI wglGetGPUInfoAMD (UINT id, int property, GLenum dataType, UINT size, void *data);
UINT WINAPI wglGetContextGPUIDAMD (HGLRC hglrc);
HGLRC WINAPI wglCreateAssociatedContextAMD (UINT id);
HGLRC WINAPI wglCreateAssociatedContextAttribsAMD (UINT id, HGLRC hShareContext, const int *attribList);
BOOL WINAPI wglDeleteAssociatedContextAMD (HGLRC hglrc);
BOOL WINAPI wglMakeAssociatedContextCurrentAMD (HGLRC hglrc);
HGLRC WINAPI wglGetCurrentAssociatedContextAMD (void);
VOID WINAPI wglBlitContextFramebufferAMD (HGLRC dstCtx, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
#endif
#endif /* WGL_AMD_gpu_association */
#ifndef WGL_ATI_pixel_format_float
#define WGL_ATI_pixel_format_float 1
#define WGL_TYPE_RGBA_FLOAT_ATI 0x21A0
#endif /* WGL_ATI_pixel_format_float */
#ifndef WGL_EXT_colorspace
#define WGL_EXT_colorspace 1
#define WGL_COLORSPACE_EXT 0x3087
#define WGL_COLORSPACE_SRGB_EXT 0x3089
#define WGL_COLORSPACE_LINEAR_EXT 0x308A
#endif /* WGL_EXT_colorspace */
#ifndef WGL_EXT_create_context_es2_profile
#define WGL_EXT_create_context_es2_profile 1
#define WGL_CONTEXT_ES2_PROFILE_BIT_EXT 0x00000004
#endif /* WGL_EXT_create_context_es2_profile */
#ifndef WGL_EXT_create_context_es_profile
#define WGL_EXT_create_context_es_profile 1
#define WGL_CONTEXT_ES_PROFILE_BIT_EXT 0x00000004
#endif /* WGL_EXT_create_context_es_profile */
#ifndef WGL_EXT_depth_float
#define WGL_EXT_depth_float 1
#define WGL_DEPTH_FLOAT_EXT 0x2040
#endif /* WGL_EXT_depth_float */
#ifndef WGL_EXT_display_color_table
#define WGL_EXT_display_color_table 1
typedef GLboolean (WINAPI * PFNWGLCREATEDISPLAYCOLORTABLEEXTPROC) (GLushort id);
typedef GLboolean (WINAPI * PFNWGLLOADDISPLAYCOLORTABLEEXTPROC) (const GLushort *table, GLuint length);
typedef GLboolean (WINAPI * PFNWGLBINDDISPLAYCOLORTABLEEXTPROC) (GLushort id);
typedef VOID (WINAPI * PFNWGLDESTROYDISPLAYCOLORTABLEEXTPROC) (GLushort id);
#ifdef WGL_WGLEXT_PROTOTYPES
GLboolean WINAPI wglCreateDisplayColorTableEXT (GLushort id);
GLboolean WINAPI wglLoadDisplayColorTableEXT (const GLushort *table, GLuint length);
GLboolean WINAPI wglBindDisplayColorTableEXT (GLushort id);
VOID WINAPI wglDestroyDisplayColorTableEXT (GLushort id);
#endif
#endif /* WGL_EXT_display_color_table */
#ifndef WGL_EXT_extensions_string
#define WGL_EXT_extensions_string 1
typedef const char *(WINAPI * PFNWGLGETEXTENSIONSSTRINGEXTPROC) (void);
#ifdef WGL_WGLEXT_PROTOTYPES
const char *WINAPI wglGetExtensionsStringEXT (void);
#endif
#endif /* WGL_EXT_extensions_string */
#ifndef WGL_EXT_framebuffer_sRGB
#define WGL_EXT_framebuffer_sRGB 1
#define WGL_FRAMEBUFFER_SRGB_CAPABLE_EXT 0x20A9
#endif /* WGL_EXT_framebuffer_sRGB */
#ifndef WGL_EXT_make_current_read
#define WGL_EXT_make_current_read 1
#define ERROR_INVALID_PIXEL_TYPE_EXT 0x2043
typedef BOOL (WINAPI * PFNWGLMAKECONTEXTCURRENTEXTPROC) (HDC hDrawDC, HDC hReadDC, HGLRC hglrc);
typedef HDC (WINAPI * PFNWGLGETCURRENTREADDCEXTPROC) (void);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglMakeContextCurrentEXT (HDC hDrawDC, HDC hReadDC, HGLRC hglrc);
HDC WINAPI wglGetCurrentReadDCEXT (void);
#endif
#endif /* WGL_EXT_make_current_read */
#ifndef WGL_EXT_multisample
#define WGL_EXT_multisample 1
#define WGL_SAMPLE_BUFFERS_EXT 0x2041
#define WGL_SAMPLES_EXT 0x2042
#endif /* WGL_EXT_multisample */
#ifndef WGL_EXT_pbuffer
#define WGL_EXT_pbuffer 1
DECLARE_HANDLE(HPBUFFEREXT);
#define WGL_DRAW_TO_PBUFFER_EXT 0x202D
#define WGL_MAX_PBUFFER_PIXELS_EXT 0x202E
#define WGL_MAX_PBUFFER_WIDTH_EXT 0x202F
#define WGL_MAX_PBUFFER_HEIGHT_EXT 0x2030
#define WGL_OPTIMAL_PBUFFER_WIDTH_EXT 0x2031
#define WGL_OPTIMAL_PBUFFER_HEIGHT_EXT 0x2032
#define WGL_PBUFFER_LARGEST_EXT 0x2033
#define WGL_PBUFFER_WIDTH_EXT 0x2034
#define WGL_PBUFFER_HEIGHT_EXT 0x2035
typedef HPBUFFEREXT (WINAPI * PFNWGLCREATEPBUFFEREXTPROC) (HDC hDC, int iPixelFormat, int iWidth, int iHeight, const int *piAttribList);
typedef HDC (WINAPI * PFNWGLGETPBUFFERDCEXTPROC) (HPBUFFEREXT hPbuffer);
typedef int (WINAPI * PFNWGLRELEASEPBUFFERDCEXTPROC) (HPBUFFEREXT hPbuffer, HDC hDC);
typedef BOOL (WINAPI * PFNWGLDESTROYPBUFFEREXTPROC) (HPBUFFEREXT hPbuffer);
typedef BOOL (WINAPI * PFNWGLQUERYPBUFFEREXTPROC) (HPBUFFEREXT hPbuffer, int iAttribute, int *piValue);
#ifdef WGL_WGLEXT_PROTOTYPES
HPBUFFEREXT WINAPI wglCreatePbufferEXT (HDC hDC, int iPixelFormat, int iWidth, int iHeight, const int *piAttribList);
HDC WINAPI wglGetPbufferDCEXT (HPBUFFEREXT hPbuffer);
int WINAPI wglReleasePbufferDCEXT (HPBUFFEREXT hPbuffer, HDC hDC);
BOOL WINAPI wglDestroyPbufferEXT (HPBUFFEREXT hPbuffer);
BOOL WINAPI wglQueryPbufferEXT (HPBUFFEREXT hPbuffer, int iAttribute, int *piValue);
#endif
#endif /* WGL_EXT_pbuffer */
#ifndef WGL_EXT_pixel_format
#define WGL_EXT_pixel_format 1
#define WGL_NUMBER_PIXEL_FORMATS_EXT 0x2000
#define WGL_DRAW_TO_WINDOW_EXT 0x2001
#define WGL_DRAW_TO_BITMAP_EXT 0x2002
#define WGL_ACCELERATION_EXT 0x2003
#define WGL_NEED_PALETTE_EXT 0x2004
#define WGL_NEED_SYSTEM_PALETTE_EXT 0x2005
#define WGL_SWAP_LAYER_BUFFERS_EXT 0x2006
#define WGL_SWAP_METHOD_EXT 0x2007
#define WGL_NUMBER_OVERLAYS_EXT 0x2008
#define WGL_NUMBER_UNDERLAYS_EXT 0x2009
#define WGL_TRANSPARENT_EXT 0x200A
#define WGL_TRANSPARENT_VALUE_EXT 0x200B
#define WGL_SHARE_DEPTH_EXT 0x200C
#define WGL_SHARE_STENCIL_EXT 0x200D
#define WGL_SHARE_ACCUM_EXT 0x200E
#define WGL_SUPPORT_GDI_EXT 0x200F
#define WGL_SUPPORT_OPENGL_EXT 0x2010
#define WGL_DOUBLE_BUFFER_EXT 0x2011
#define WGL_STEREO_EXT 0x2012
#define WGL_PIXEL_TYPE_EXT 0x2013
#define WGL_COLOR_BITS_EXT 0x2014
#define WGL_RED_BITS_EXT 0x2015
#define WGL_RED_SHIFT_EXT 0x2016
#define WGL_GREEN_BITS_EXT 0x2017
#define WGL_GREEN_SHIFT_EXT 0x2018
#define WGL_BLUE_BITS_EXT 0x2019
#define WGL_BLUE_SHIFT_EXT 0x201A
#define WGL_ALPHA_BITS_EXT 0x201B
#define WGL_ALPHA_SHIFT_EXT 0x201C
#define WGL_ACCUM_BITS_EXT 0x201D
#define WGL_ACCUM_RED_BITS_EXT 0x201E
#define WGL_ACCUM_GREEN_BITS_EXT 0x201F
#define WGL_ACCUM_BLUE_BITS_EXT 0x2020
#define WGL_ACCUM_ALPHA_BITS_EXT 0x2021
#define WGL_DEPTH_BITS_EXT 0x2022
#define WGL_STENCIL_BITS_EXT 0x2023
#define WGL_AUX_BUFFERS_EXT 0x2024
#define WGL_NO_ACCELERATION_EXT 0x2025
#define WGL_GENERIC_ACCELERATION_EXT 0x2026
#define WGL_FULL_ACCELERATION_EXT 0x2027
#define WGL_SWAP_EXCHANGE_EXT 0x2028
#define WGL_SWAP_COPY_EXT 0x2029
#define WGL_SWAP_UNDEFINED_EXT 0x202A
#define WGL_TYPE_RGBA_EXT 0x202B
#define WGL_TYPE_COLORINDEX_EXT 0x202C
typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBIVEXTPROC) (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, int *piAttributes, int *piValues);
typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBFVEXTPROC) (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, int *piAttributes, FLOAT *pfValues);
typedef BOOL (WINAPI * PFNWGLCHOOSEPIXELFORMATEXTPROC) (HDC hdc, const int *piAttribIList, const FLOAT *pfAttribFList, UINT nMaxFormats, int *piFormats, UINT *nNumFormats);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglGetPixelFormatAttribivEXT (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, int *piAttributes, int *piValues);
BOOL WINAPI wglGetPixelFormatAttribfvEXT (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, int *piAttributes, FLOAT *pfValues);
BOOL WINAPI wglChoosePixelFormatEXT (HDC hdc, const int *piAttribIList, const FLOAT *pfAttribFList, UINT nMaxFormats, int *piFormats, UINT *nNumFormats);
#endif
#endif /* WGL_EXT_pixel_format */
#ifndef WGL_EXT_pixel_format_packed_float
#define WGL_EXT_pixel_format_packed_float 1
#define WGL_TYPE_RGBA_UNSIGNED_FLOAT_EXT 0x20A8
#endif /* WGL_EXT_pixel_format_packed_float */
#ifndef WGL_EXT_swap_control
#define WGL_EXT_swap_control 1
typedef BOOL (WINAPI * PFNWGLSWAPINTERVALEXTPROC) (int interval);
typedef int (WINAPI * PFNWGLGETSWAPINTERVALEXTPROC) (void);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglSwapIntervalEXT (int interval);
int WINAPI wglGetSwapIntervalEXT (void);
#endif
#endif /* WGL_EXT_swap_control */
#ifndef WGL_EXT_swap_control_tear
#define WGL_EXT_swap_control_tear 1
#endif /* WGL_EXT_swap_control_tear */
#ifndef WGL_I3D_digital_video_control
#define WGL_I3D_digital_video_control 1
#define WGL_DIGITAL_VIDEO_CURSOR_ALPHA_FRAMEBUFFER_I3D 0x2050
#define WGL_DIGITAL_VIDEO_CURSOR_ALPHA_VALUE_I3D 0x2051
#define WGL_DIGITAL_VIDEO_CURSOR_INCLUDED_I3D 0x2052
#define WGL_DIGITAL_VIDEO_GAMMA_CORRECTED_I3D 0x2053
typedef BOOL (WINAPI * PFNWGLGETDIGITALVIDEOPARAMETERSI3DPROC) (HDC hDC, int iAttribute, int *piValue);
typedef BOOL (WINAPI * PFNWGLSETDIGITALVIDEOPARAMETERSI3DPROC) (HDC hDC, int iAttribute, const int *piValue);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglGetDigitalVideoParametersI3D (HDC hDC, int iAttribute, int *piValue);
BOOL WINAPI wglSetDigitalVideoParametersI3D (HDC hDC, int iAttribute, const int *piValue);
#endif
#endif /* WGL_I3D_digital_video_control */
#ifndef WGL_I3D_gamma
#define WGL_I3D_gamma 1
#define WGL_GAMMA_TABLE_SIZE_I3D 0x204E
#define WGL_GAMMA_EXCLUDE_DESKTOP_I3D 0x204F
typedef BOOL (WINAPI * PFNWGLGETGAMMATABLEPARAMETERSI3DPROC) (HDC hDC, int iAttribute, int *piValue);
typedef BOOL (WINAPI * PFNWGLSETGAMMATABLEPARAMETERSI3DPROC) (HDC hDC, int iAttribute, const int *piValue);
typedef BOOL (WINAPI * PFNWGLGETGAMMATABLEI3DPROC) (HDC hDC, int iEntries, USHORT *puRed, USHORT *puGreen, USHORT *puBlue);
typedef BOOL (WINAPI * PFNWGLSETGAMMATABLEI3DPROC) (HDC hDC, int iEntries, const USHORT *puRed, const USHORT *puGreen, const USHORT *puBlue);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglGetGammaTableParametersI3D (HDC hDC, int iAttribute, int *piValue);
BOOL WINAPI wglSetGammaTableParametersI3D (HDC hDC, int iAttribute, const int *piValue);
BOOL WINAPI wglGetGammaTableI3D (HDC hDC, int iEntries, USHORT *puRed, USHORT *puGreen, USHORT *puBlue);
BOOL WINAPI wglSetGammaTableI3D (HDC hDC, int iEntries, const USHORT *puRed, const USHORT *puGreen, const USHORT *puBlue);
#endif
#endif /* WGL_I3D_gamma */
#ifndef WGL_I3D_genlock
#define WGL_I3D_genlock 1
#define WGL_GENLOCK_SOURCE_MULTIVIEW_I3D 0x2044
#define WGL_GENLOCK_SOURCE_EXTERNAL_SYNC_I3D 0x2045
#define WGL_GENLOCK_SOURCE_EXTERNAL_FIELD_I3D 0x2046
#define WGL_GENLOCK_SOURCE_EXTERNAL_TTL_I3D 0x2047
#define WGL_GENLOCK_SOURCE_DIGITAL_SYNC_I3D 0x2048
#define WGL_GENLOCK_SOURCE_DIGITAL_FIELD_I3D 0x2049
#define WGL_GENLOCK_SOURCE_EDGE_FALLING_I3D 0x204A
#define WGL_GENLOCK_SOURCE_EDGE_RISING_I3D 0x204B
#define WGL_GENLOCK_SOURCE_EDGE_BOTH_I3D 0x204C
typedef BOOL (WINAPI * PFNWGLENABLEGENLOCKI3DPROC) (HDC hDC);
typedef BOOL (WINAPI * PFNWGLDISABLEGENLOCKI3DPROC) (HDC hDC);
typedef BOOL (WINAPI * PFNWGLISENABLEDGENLOCKI3DPROC) (HDC hDC, BOOL *pFlag);
typedef BOOL (WINAPI * PFNWGLGENLOCKSOURCEI3DPROC) (HDC hDC, UINT uSource);
typedef BOOL (WINAPI * PFNWGLGETGENLOCKSOURCEI3DPROC) (HDC hDC, UINT *uSource);
typedef BOOL (WINAPI * PFNWGLGENLOCKSOURCEEDGEI3DPROC) (HDC hDC, UINT uEdge);
typedef BOOL (WINAPI * PFNWGLGETGENLOCKSOURCEEDGEI3DPROC) (HDC hDC, UINT *uEdge);
typedef BOOL (WINAPI * PFNWGLGENLOCKSAMPLERATEI3DPROC) (HDC hDC, UINT uRate);
typedef BOOL (WINAPI * PFNWGLGETGENLOCKSAMPLERATEI3DPROC) (HDC hDC, UINT *uRate);
typedef BOOL (WINAPI * PFNWGLGENLOCKSOURCEDELAYI3DPROC) (HDC hDC, UINT uDelay);
typedef BOOL (WINAPI * PFNWGLGETGENLOCKSOURCEDELAYI3DPROC) (HDC hDC, UINT *uDelay);
typedef BOOL (WINAPI * PFNWGLQUERYGENLOCKMAXSOURCEDELAYI3DPROC) (HDC hDC, UINT *uMaxLineDelay, UINT *uMaxPixelDelay);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglEnableGenlockI3D (HDC hDC);
BOOL WINAPI wglDisableGenlockI3D (HDC hDC);
BOOL WINAPI wglIsEnabledGenlockI3D (HDC hDC, BOOL *pFlag);
BOOL WINAPI wglGenlockSourceI3D (HDC hDC, UINT uSource);
BOOL WINAPI wglGetGenlockSourceI3D (HDC hDC, UINT *uSource);
BOOL WINAPI wglGenlockSourceEdgeI3D (HDC hDC, UINT uEdge);
BOOL WINAPI wglGetGenlockSourceEdgeI3D (HDC hDC, UINT *uEdge);
BOOL WINAPI wglGenlockSampleRateI3D (HDC hDC, UINT uRate);
BOOL WINAPI wglGetGenlockSampleRateI3D (HDC hDC, UINT *uRate);
BOOL WINAPI wglGenlockSourceDelayI3D (HDC hDC, UINT uDelay);
BOOL WINAPI wglGetGenlockSourceDelayI3D (HDC hDC, UINT *uDelay);
BOOL WINAPI wglQueryGenlockMaxSourceDelayI3D (HDC hDC, UINT *uMaxLineDelay, UINT *uMaxPixelDelay);
#endif
#endif /* WGL_I3D_genlock */
#ifndef WGL_I3D_image_buffer
#define WGL_I3D_image_buffer 1
#define WGL_IMAGE_BUFFER_MIN_ACCESS_I3D 0x00000001
#define WGL_IMAGE_BUFFER_LOCK_I3D 0x00000002
typedef LPVOID (WINAPI * PFNWGLCREATEIMAGEBUFFERI3DPROC) (HDC hDC, DWORD dwSize, UINT uFlags);
typedef BOOL (WINAPI * PFNWGLDESTROYIMAGEBUFFERI3DPROC) (HDC hDC, LPVOID pAddress);
typedef BOOL (WINAPI * PFNWGLASSOCIATEIMAGEBUFFEREVENTSI3DPROC) (HDC hDC, const HANDLE *pEvent, const LPVOID *pAddress, const DWORD *pSize, UINT count);
typedef BOOL (WINAPI * PFNWGLRELEASEIMAGEBUFFEREVENTSI3DPROC) (HDC hDC, const LPVOID *pAddress, UINT count);
#ifdef WGL_WGLEXT_PROTOTYPES
LPVOID WINAPI wglCreateImageBufferI3D (HDC hDC, DWORD dwSize, UINT uFlags);
BOOL WINAPI wglDestroyImageBufferI3D (HDC hDC, LPVOID pAddress);
BOOL WINAPI wglAssociateImageBufferEventsI3D (HDC hDC, const HANDLE *pEvent, const LPVOID *pAddress, const DWORD *pSize, UINT count);
BOOL WINAPI wglReleaseImageBufferEventsI3D (HDC hDC, const LPVOID *pAddress, UINT count);
#endif
#endif /* WGL_I3D_image_buffer */
#ifndef WGL_I3D_swap_frame_lock
#define WGL_I3D_swap_frame_lock 1
typedef BOOL (WINAPI * PFNWGLENABLEFRAMELOCKI3DPROC) (void);
typedef BOOL (WINAPI * PFNWGLDISABLEFRAMELOCKI3DPROC) (void);
typedef BOOL (WINAPI * PFNWGLISENABLEDFRAMELOCKI3DPROC) (BOOL *pFlag);
typedef BOOL (WINAPI * PFNWGLQUERYFRAMELOCKMASTERI3DPROC) (BOOL *pFlag);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglEnableFrameLockI3D (void);
BOOL WINAPI wglDisableFrameLockI3D (void);
BOOL WINAPI wglIsEnabledFrameLockI3D (BOOL *pFlag);
BOOL WINAPI wglQueryFrameLockMasterI3D (BOOL *pFlag);
#endif
#endif /* WGL_I3D_swap_frame_lock */
#ifndef WGL_I3D_swap_frame_usage
#define WGL_I3D_swap_frame_usage 1
typedef BOOL (WINAPI * PFNWGLGETFRAMEUSAGEI3DPROC) (float *pUsage);
typedef BOOL (WINAPI * PFNWGLBEGINFRAMETRACKINGI3DPROC) (void);
typedef BOOL (WINAPI * PFNWGLENDFRAMETRACKINGI3DPROC) (void);
typedef BOOL (WINAPI * PFNWGLQUERYFRAMETRACKINGI3DPROC) (DWORD *pFrameCount, DWORD *pMissedFrames, float *pLastMissedUsage);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglGetFrameUsageI3D (float *pUsage);
BOOL WINAPI wglBeginFrameTrackingI3D (void);
BOOL WINAPI wglEndFrameTrackingI3D (void);
BOOL WINAPI wglQueryFrameTrackingI3D (DWORD *pFrameCount, DWORD *pMissedFrames, float *pLastMissedUsage);
#endif
#endif /* WGL_I3D_swap_frame_usage */
#ifndef WGL_NV_DX_interop
#define WGL_NV_DX_interop 1
#define WGL_ACCESS_READ_ONLY_NV 0x00000000
#define WGL_ACCESS_READ_WRITE_NV 0x00000001
#define WGL_ACCESS_WRITE_DISCARD_NV 0x00000002
typedef BOOL (WINAPI * PFNWGLDXSETRESOURCESHAREHANDLENVPROC) (void *dxObject, HANDLE shareHandle);
typedef HANDLE (WINAPI * PFNWGLDXOPENDEVICENVPROC) (void *dxDevice);
typedef BOOL (WINAPI * PFNWGLDXCLOSEDEVICENVPROC) (HANDLE hDevice);
typedef HANDLE (WINAPI * PFNWGLDXREGISTEROBJECTNVPROC) (HANDLE hDevice, void *dxObject, GLuint name, GLenum type, GLenum access);
typedef BOOL (WINAPI * PFNWGLDXUNREGISTEROBJECTNVPROC) (HANDLE hDevice, HANDLE hObject);
typedef BOOL (WINAPI * PFNWGLDXOBJECTACCESSNVPROC) (HANDLE hObject, GLenum access);
typedef BOOL (WINAPI * PFNWGLDXLOCKOBJECTSNVPROC) (HANDLE hDevice, GLint count, HANDLE *hObjects);
typedef BOOL (WINAPI * PFNWGLDXUNLOCKOBJECTSNVPROC) (HANDLE hDevice, GLint count, HANDLE *hObjects);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglDXSetResourceShareHandleNV (void *dxObject, HANDLE shareHandle);
HANDLE WINAPI wglDXOpenDeviceNV (void *dxDevice);
BOOL WINAPI wglDXCloseDeviceNV (HANDLE hDevice);
HANDLE WINAPI wglDXRegisterObjectNV (HANDLE hDevice, void *dxObject, GLuint name, GLenum type, GLenum access);
BOOL WINAPI wglDXUnregisterObjectNV (HANDLE hDevice, HANDLE hObject);
BOOL WINAPI wglDXObjectAccessNV (HANDLE hObject, GLenum access);
BOOL WINAPI wglDXLockObjectsNV (HANDLE hDevice, GLint count, HANDLE *hObjects);
BOOL WINAPI wglDXUnlockObjectsNV (HANDLE hDevice, GLint count, HANDLE *hObjects);
#endif
#endif /* WGL_NV_DX_interop */
#ifndef WGL_NV_DX_interop2
#define WGL_NV_DX_interop2 1
#endif /* WGL_NV_DX_interop2 */
#ifndef WGL_NV_copy_image
#define WGL_NV_copy_image 1
typedef BOOL (WINAPI * PFNWGLCOPYIMAGESUBDATANVPROC) (HGLRC hSrcRC, GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, HGLRC hDstRC, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei width, GLsizei height, GLsizei depth);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglCopyImageSubDataNV (HGLRC hSrcRC, GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, HGLRC hDstRC, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei width, GLsizei height, GLsizei depth);
#endif
#endif /* WGL_NV_copy_image */
#ifndef WGL_NV_delay_before_swap
#define WGL_NV_delay_before_swap 1
typedef BOOL (WINAPI * PFNWGLDELAYBEFORESWAPNVPROC) (HDC hDC, GLfloat seconds);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglDelayBeforeSwapNV (HDC hDC, GLfloat seconds);
#endif
#endif /* WGL_NV_delay_before_swap */
#ifndef WGL_NV_float_buffer
#define WGL_NV_float_buffer 1
#define WGL_FLOAT_COMPONENTS_NV 0x20B0
#define WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_R_NV 0x20B1
#define WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RG_NV 0x20B2
#define WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGB_NV 0x20B3
#define WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGBA_NV 0x20B4
#define WGL_TEXTURE_FLOAT_R_NV 0x20B5
#define WGL_TEXTURE_FLOAT_RG_NV 0x20B6
#define WGL_TEXTURE_FLOAT_RGB_NV 0x20B7
#define WGL_TEXTURE_FLOAT_RGBA_NV 0x20B8
#endif /* WGL_NV_float_buffer */
#ifndef WGL_NV_gpu_affinity
#define WGL_NV_gpu_affinity 1
DECLARE_HANDLE(HGPUNV);
struct _GPU_DEVICE {
DWORD cb;
CHAR DeviceName[32];
CHAR DeviceString[128];
DWORD Flags;
RECT rcVirtualScreen;
};
typedef struct _GPU_DEVICE *PGPU_DEVICE;
#define ERROR_INCOMPATIBLE_AFFINITY_MASKS_NV 0x20D0
#define ERROR_MISSING_AFFINITY_MASK_NV 0x20D1
typedef BOOL (WINAPI * PFNWGLENUMGPUSNVPROC) (UINT iGpuIndex, HGPUNV *phGpu);
typedef BOOL (WINAPI * PFNWGLENUMGPUDEVICESNVPROC) (HGPUNV hGpu, UINT iDeviceIndex, PGPU_DEVICE lpGpuDevice);
typedef HDC (WINAPI * PFNWGLCREATEAFFINITYDCNVPROC) (const HGPUNV *phGpuList);
typedef BOOL (WINAPI * PFNWGLENUMGPUSFROMAFFINITYDCNVPROC) (HDC hAffinityDC, UINT iGpuIndex, HGPUNV *hGpu);
typedef BOOL (WINAPI * PFNWGLDELETEDCNVPROC) (HDC hdc);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglEnumGpusNV (UINT iGpuIndex, HGPUNV *phGpu);
BOOL WINAPI wglEnumGpuDevicesNV (HGPUNV hGpu, UINT iDeviceIndex, PGPU_DEVICE lpGpuDevice);
HDC WINAPI wglCreateAffinityDCNV (const HGPUNV *phGpuList);
BOOL WINAPI wglEnumGpusFromAffinityDCNV (HDC hAffinityDC, UINT iGpuIndex, HGPUNV *hGpu);
BOOL WINAPI wglDeleteDCNV (HDC hdc);
#endif
#endif /* WGL_NV_gpu_affinity */
#ifndef WGL_NV_multisample_coverage
#define WGL_NV_multisample_coverage 1
#define WGL_COVERAGE_SAMPLES_NV 0x2042
#define WGL_COLOR_SAMPLES_NV 0x20B9
#endif /* WGL_NV_multisample_coverage */
#ifndef WGL_NV_present_video
#define WGL_NV_present_video 1
DECLARE_HANDLE(HVIDEOOUTPUTDEVICENV);
#define WGL_NUM_VIDEO_SLOTS_NV 0x20F0
typedef int (WINAPI * PFNWGLENUMERATEVIDEODEVICESNVPROC) (HDC hDC, HVIDEOOUTPUTDEVICENV *phDeviceList);
typedef BOOL (WINAPI * PFNWGLBINDVIDEODEVICENVPROC) (HDC hDC, unsigned int uVideoSlot, HVIDEOOUTPUTDEVICENV hVideoDevice, const int *piAttribList);
typedef BOOL (WINAPI * PFNWGLQUERYCURRENTCONTEXTNVPROC) (int iAttribute, int *piValue);
#ifdef WGL_WGLEXT_PROTOTYPES
int WINAPI wglEnumerateVideoDevicesNV (HDC hDC, HVIDEOOUTPUTDEVICENV *phDeviceList);
BOOL WINAPI wglBindVideoDeviceNV (HDC hDC, unsigned int uVideoSlot, HVIDEOOUTPUTDEVICENV hVideoDevice, const int *piAttribList);
BOOL WINAPI wglQueryCurrentContextNV (int iAttribute, int *piValue);
#endif
#endif /* WGL_NV_present_video */
#ifndef WGL_NV_render_depth_texture
#define WGL_NV_render_depth_texture 1
#define WGL_BIND_TO_TEXTURE_DEPTH_NV 0x20A3
#define WGL_BIND_TO_TEXTURE_RECTANGLE_DEPTH_NV 0x20A4
#define WGL_DEPTH_TEXTURE_FORMAT_NV 0x20A5
#define WGL_TEXTURE_DEPTH_COMPONENT_NV 0x20A6
#define WGL_DEPTH_COMPONENT_NV 0x20A7
#endif /* WGL_NV_render_depth_texture */
#ifndef WGL_NV_render_texture_rectangle
#define WGL_NV_render_texture_rectangle 1
#define WGL_BIND_TO_TEXTURE_RECTANGLE_RGB_NV 0x20A0
#define WGL_BIND_TO_TEXTURE_RECTANGLE_RGBA_NV 0x20A1
#define WGL_TEXTURE_RECTANGLE_NV 0x20A2
#endif /* WGL_NV_render_texture_rectangle */
#ifndef WGL_NV_swap_group
#define WGL_NV_swap_group 1
typedef BOOL (WINAPI * PFNWGLJOINSWAPGROUPNVPROC) (HDC hDC, GLuint group);
typedef BOOL (WINAPI * PFNWGLBINDSWAPBARRIERNVPROC) (GLuint group, GLuint barrier);
typedef BOOL (WINAPI * PFNWGLQUERYSWAPGROUPNVPROC) (HDC hDC, GLuint *group, GLuint *barrier);
typedef BOOL (WINAPI * PFNWGLQUERYMAXSWAPGROUPSNVPROC) (HDC hDC, GLuint *maxGroups, GLuint *maxBarriers);
typedef BOOL (WINAPI * PFNWGLQUERYFRAMECOUNTNVPROC) (HDC hDC, GLuint *count);
typedef BOOL (WINAPI * PFNWGLRESETFRAMECOUNTNVPROC) (HDC hDC);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglJoinSwapGroupNV (HDC hDC, GLuint group);
BOOL WINAPI wglBindSwapBarrierNV (GLuint group, GLuint barrier);
BOOL WINAPI wglQuerySwapGroupNV (HDC hDC, GLuint *group, GLuint *barrier);
BOOL WINAPI wglQueryMaxSwapGroupsNV (HDC hDC, GLuint *maxGroups, GLuint *maxBarriers);
BOOL WINAPI wglQueryFrameCountNV (HDC hDC, GLuint *count);
BOOL WINAPI wglResetFrameCountNV (HDC hDC);
#endif
#endif /* WGL_NV_swap_group */
#ifndef WGL_NV_vertex_array_range
#define WGL_NV_vertex_array_range 1
typedef void *(WINAPI * PFNWGLALLOCATEMEMORYNVPROC) (GLsizei size, GLfloat readfreq, GLfloat writefreq, GLfloat priority);
typedef void (WINAPI * PFNWGLFREEMEMORYNVPROC) (void *pointer);
#ifdef WGL_WGLEXT_PROTOTYPES
void *WINAPI wglAllocateMemoryNV (GLsizei size, GLfloat readfreq, GLfloat writefreq, GLfloat priority);
void WINAPI wglFreeMemoryNV (void *pointer);
#endif
#endif /* WGL_NV_vertex_array_range */
#ifndef WGL_NV_video_capture
#define WGL_NV_video_capture 1
DECLARE_HANDLE(HVIDEOINPUTDEVICENV);
#define WGL_UNIQUE_ID_NV 0x20CE
#define WGL_NUM_VIDEO_CAPTURE_SLOTS_NV 0x20CF
typedef BOOL (WINAPI * PFNWGLBINDVIDEOCAPTUREDEVICENVPROC) (UINT uVideoSlot, HVIDEOINPUTDEVICENV hDevice);
typedef UINT (WINAPI * PFNWGLENUMERATEVIDEOCAPTUREDEVICESNVPROC) (HDC hDc, HVIDEOINPUTDEVICENV *phDeviceList);
typedef BOOL (WINAPI * PFNWGLLOCKVIDEOCAPTUREDEVICENVPROC) (HDC hDc, HVIDEOINPUTDEVICENV hDevice);
typedef BOOL (WINAPI * PFNWGLQUERYVIDEOCAPTUREDEVICENVPROC) (HDC hDc, HVIDEOINPUTDEVICENV hDevice, int iAttribute, int *piValue);
typedef BOOL (WINAPI * PFNWGLRELEASEVIDEOCAPTUREDEVICENVPROC) (HDC hDc, HVIDEOINPUTDEVICENV hDevice);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglBindVideoCaptureDeviceNV (UINT uVideoSlot, HVIDEOINPUTDEVICENV hDevice);
UINT WINAPI wglEnumerateVideoCaptureDevicesNV (HDC hDc, HVIDEOINPUTDEVICENV *phDeviceList);
BOOL WINAPI wglLockVideoCaptureDeviceNV (HDC hDc, HVIDEOINPUTDEVICENV hDevice);
BOOL WINAPI wglQueryVideoCaptureDeviceNV (HDC hDc, HVIDEOINPUTDEVICENV hDevice, int iAttribute, int *piValue);
BOOL WINAPI wglReleaseVideoCaptureDeviceNV (HDC hDc, HVIDEOINPUTDEVICENV hDevice);
#endif
#endif /* WGL_NV_video_capture */
#ifndef WGL_NV_video_output
#define WGL_NV_video_output 1
DECLARE_HANDLE(HPVIDEODEV);
#define WGL_BIND_TO_VIDEO_RGB_NV 0x20C0
#define WGL_BIND_TO_VIDEO_RGBA_NV 0x20C1
#define WGL_BIND_TO_VIDEO_RGB_AND_DEPTH_NV 0x20C2
#define WGL_VIDEO_OUT_COLOR_NV 0x20C3
#define WGL_VIDEO_OUT_ALPHA_NV 0x20C4
#define WGL_VIDEO_OUT_DEPTH_NV 0x20C5
#define WGL_VIDEO_OUT_COLOR_AND_ALPHA_NV 0x20C6
#define WGL_VIDEO_OUT_COLOR_AND_DEPTH_NV 0x20C7
#define WGL_VIDEO_OUT_FRAME 0x20C8
#define WGL_VIDEO_OUT_FIELD_1 0x20C9
#define WGL_VIDEO_OUT_FIELD_2 0x20CA
#define WGL_VIDEO_OUT_STACKED_FIELDS_1_2 0x20CB
#define WGL_VIDEO_OUT_STACKED_FIELDS_2_1 0x20CC
typedef BOOL (WINAPI * PFNWGLGETVIDEODEVICENVPROC) (HDC hDC, int numDevices, HPVIDEODEV *hVideoDevice);
typedef BOOL (WINAPI * PFNWGLRELEASEVIDEODEVICENVPROC) (HPVIDEODEV hVideoDevice);
typedef BOOL (WINAPI * PFNWGLBINDVIDEOIMAGENVPROC) (HPVIDEODEV hVideoDevice, HPBUFFERARB hPbuffer, int iVideoBuffer);
typedef BOOL (WINAPI * PFNWGLRELEASEVIDEOIMAGENVPROC) (HPBUFFERARB hPbuffer, int iVideoBuffer);
typedef BOOL (WINAPI * PFNWGLSENDPBUFFERTOVIDEONVPROC) (HPBUFFERARB hPbuffer, int iBufferType, unsigned long *pulCounterPbuffer, BOOL bBlock);
typedef BOOL (WINAPI * PFNWGLGETVIDEOINFONVPROC) (HPVIDEODEV hpVideoDevice, unsigned long *pulCounterOutputPbuffer, unsigned long *pulCounterOutputVideo);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglGetVideoDeviceNV (HDC hDC, int numDevices, HPVIDEODEV *hVideoDevice);
BOOL WINAPI wglReleaseVideoDeviceNV (HPVIDEODEV hVideoDevice);
BOOL WINAPI wglBindVideoImageNV (HPVIDEODEV hVideoDevice, HPBUFFERARB hPbuffer, int iVideoBuffer);
BOOL WINAPI wglReleaseVideoImageNV (HPBUFFERARB hPbuffer, int iVideoBuffer);
BOOL WINAPI wglSendPbufferToVideoNV (HPBUFFERARB hPbuffer, int iBufferType, unsigned long *pulCounterPbuffer, BOOL bBlock);
BOOL WINAPI wglGetVideoInfoNV (HPVIDEODEV hpVideoDevice, unsigned long *pulCounterOutputPbuffer, unsigned long *pulCounterOutputVideo);
#endif
#endif /* WGL_NV_video_output */
#ifndef WGL_OML_sync_control
#define WGL_OML_sync_control 1
typedef BOOL (WINAPI * PFNWGLGETSYNCVALUESOMLPROC) (HDC hdc, INT64 *ust, INT64 *msc, INT64 *sbc);
typedef BOOL (WINAPI * PFNWGLGETMSCRATEOMLPROC) (HDC hdc, INT32 *numerator, INT32 *denominator);
typedef INT64 (WINAPI * PFNWGLSWAPBUFFERSMSCOMLPROC) (HDC hdc, INT64 target_msc, INT64 divisor, INT64 remainder);
typedef INT64 (WINAPI * PFNWGLSWAPLAYERBUFFERSMSCOMLPROC) (HDC hdc, int fuPlanes, INT64 target_msc, INT64 divisor, INT64 remainder);
typedef BOOL (WINAPI * PFNWGLWAITFORMSCOMLPROC) (HDC hdc, INT64 target_msc, INT64 divisor, INT64 remainder, INT64 *ust, INT64 *msc, INT64 *sbc);
typedef BOOL (WINAPI * PFNWGLWAITFORSBCOMLPROC) (HDC hdc, INT64 target_sbc, INT64 *ust, INT64 *msc, INT64 *sbc);
#ifdef WGL_WGLEXT_PROTOTYPES
BOOL WINAPI wglGetSyncValuesOML (HDC hdc, INT64 *ust, INT64 *msc, INT64 *sbc);
BOOL WINAPI wglGetMscRateOML (HDC hdc, INT32 *numerator, INT32 *denominator);
INT64 WINAPI wglSwapBuffersMscOML (HDC hdc, INT64 target_msc, INT64 divisor, INT64 remainder);
INT64 WINAPI wglSwapLayerBuffersMscOML (HDC hdc, int fuPlanes, INT64 target_msc, INT64 divisor, INT64 remainder);
BOOL WINAPI wglWaitForMscOML (HDC hdc, INT64 target_msc, INT64 divisor, INT64 remainder, INT64 *ust, INT64 *msc, INT64 *sbc);
BOOL WINAPI wglWaitForSbcOML (HDC hdc, INT64 target_sbc, INT64 *ust, INT64 *msc, INT64 *sbc);
#endif
#endif /* WGL_OML_sync_control */
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,2 +0,0 @@
DisableFormat: true
SortIncludes: false

View File

@@ -1,290 +0,0 @@
#ifndef __khrplatform_h_
#define __khrplatform_h_
/*
** Copyright (c) 2008-2018 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a
** copy of this software and/or associated documentation files (the
** "Materials"), to deal in the Materials without restriction, including
** without limitation the rights to use, copy, modify, merge, publish,
** distribute, sublicense, and/or sell copies of the Materials, and to
** permit persons to whom the Materials are furnished to do so, subject to
** the following conditions:
**
** The above copyright notice and this permission notice shall be included
** in all copies or substantial portions of the Materials.
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
*/
/* Khronos platform-specific types and definitions.
*
* The master copy of khrplatform.h is maintained in the Khronos EGL
* Registry repository at https://github.com/KhronosGroup/EGL-Registry
* The last semantic modification to khrplatform.h was at commit ID:
* 67a3e0864c2d75ea5287b9f3d2eb74a745936692
*
* Adopters may modify this file to suit their platform. Adopters are
* encouraged to submit platform specific modifications to the Khronos
* group so that they can be included in future versions of this file.
* Please submit changes by filing pull requests or issues on
* the EGL Registry repository linked above.
*
*
* See the Implementer's Guidelines for information about where this file
* should be located on your system and for more details of its use:
* http://www.khronos.org/registry/implementers_guide.pdf
*
* This file should be included as
* #include <KHR/khrplatform.h>
* by Khronos client API header files that use its types and defines.
*
* The types in khrplatform.h should only be used to define API-specific types.
*
* Types defined in khrplatform.h:
* khronos_int8_t signed 8 bit
* khronos_uint8_t unsigned 8 bit
* khronos_int16_t signed 16 bit
* khronos_uint16_t unsigned 16 bit
* khronos_int32_t signed 32 bit
* khronos_uint32_t unsigned 32 bit
* khronos_int64_t signed 64 bit
* khronos_uint64_t unsigned 64 bit
* khronos_intptr_t signed same number of bits as a pointer
* khronos_uintptr_t unsigned same number of bits as a pointer
* khronos_ssize_t signed size
* khronos_usize_t unsigned size
* khronos_float_t signed 32 bit floating point
* khronos_time_ns_t unsigned 64 bit time in nanoseconds
* khronos_utime_nanoseconds_t unsigned time interval or absolute time in
* nanoseconds
* khronos_stime_nanoseconds_t signed time interval in nanoseconds
* khronos_boolean_enum_t enumerated boolean type. This should
* only be used as a base type when a client API's boolean type is
* an enum. Client APIs which use an integer or other type for
* booleans cannot use this as the base type for their boolean.
*
* Tokens defined in khrplatform.h:
*
* KHRONOS_FALSE, KHRONOS_TRUE Enumerated boolean false/true values.
*
* KHRONOS_SUPPORT_INT64 is 1 if 64 bit integers are supported; otherwise 0.
* KHRONOS_SUPPORT_FLOAT is 1 if floats are supported; otherwise 0.
*
* Calling convention macros defined in this file:
* KHRONOS_APICALL
* KHRONOS_APIENTRY
* KHRONOS_APIATTRIBUTES
*
* These may be used in function prototypes as:
*
* KHRONOS_APICALL void KHRONOS_APIENTRY funcname(
* int arg1,
* int arg2) KHRONOS_APIATTRIBUTES;
*/
#if defined(__SCITECH_SNAP__) && !defined(KHRONOS_STATIC)
# define KHRONOS_STATIC 1
#endif
/*-------------------------------------------------------------------------
* Definition of KHRONOS_APICALL
*-------------------------------------------------------------------------
* This precedes the return type of the function in the function prototype.
*/
#if defined(KHRONOS_STATIC)
/* If the preprocessor constant KHRONOS_STATIC is defined, make the
* header compatible with static linking. */
# define KHRONOS_APICALL
#elif defined(_WIN32)
# define KHRONOS_APICALL __declspec(dllimport)
#elif defined (__SYMBIAN32__)
# define KHRONOS_APICALL IMPORT_C
#elif defined(__ANDROID__)
# define KHRONOS_APICALL __attribute__((visibility("default")))
#else
# define KHRONOS_APICALL
#endif
/*-------------------------------------------------------------------------
* Definition of KHRONOS_APIENTRY
*-------------------------------------------------------------------------
* This follows the return type of the function and precedes the function
* name in the function prototype.
*/
#if defined(_WIN32) && !defined(_WIN32_WCE) && !defined(__SCITECH_SNAP__)
/* Win32 but not WinCE */
# define KHRONOS_APIENTRY __stdcall
#else
# define KHRONOS_APIENTRY
#endif
/*-------------------------------------------------------------------------
* Definition of KHRONOS_APIATTRIBUTES
*-------------------------------------------------------------------------
* This follows the closing parenthesis of the function prototype arguments.
*/
#if defined (__ARMCC_2__)
#define KHRONOS_APIATTRIBUTES __softfp
#else
#define KHRONOS_APIATTRIBUTES
#endif
/*-------------------------------------------------------------------------
* basic type definitions
*-----------------------------------------------------------------------*/
#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(__GNUC__) || defined(__SCO__) || defined(__USLC__)
/*
* Using <stdint.h>
*/
#include <stdint.h>
typedef int32_t khronos_int32_t;
typedef uint32_t khronos_uint32_t;
typedef int64_t khronos_int64_t;
typedef uint64_t khronos_uint64_t;
#define KHRONOS_SUPPORT_INT64 1
#define KHRONOS_SUPPORT_FLOAT 1
#elif defined(__VMS ) || defined(__sgi)
/*
* Using <inttypes.h>
*/
#include <inttypes.h>
typedef int32_t khronos_int32_t;
typedef uint32_t khronos_uint32_t;
typedef int64_t khronos_int64_t;
typedef uint64_t khronos_uint64_t;
#define KHRONOS_SUPPORT_INT64 1
#define KHRONOS_SUPPORT_FLOAT 1
#elif defined(_WIN32) && !defined(__SCITECH_SNAP__)
/*
* Win32
*/
typedef __int32 khronos_int32_t;
typedef unsigned __int32 khronos_uint32_t;
typedef __int64 khronos_int64_t;
typedef unsigned __int64 khronos_uint64_t;
#define KHRONOS_SUPPORT_INT64 1
#define KHRONOS_SUPPORT_FLOAT 1
#elif defined(__sun__) || defined(__digital__)
/*
* Sun or Digital
*/
typedef int khronos_int32_t;
typedef unsigned int khronos_uint32_t;
#if defined(__arch64__) || defined(_LP64)
typedef long int khronos_int64_t;
typedef unsigned long int khronos_uint64_t;
#else
typedef long long int khronos_int64_t;
typedef unsigned long long int khronos_uint64_t;
#endif /* __arch64__ */
#define KHRONOS_SUPPORT_INT64 1
#define KHRONOS_SUPPORT_FLOAT 1
#elif 0
/*
* Hypothetical platform with no float or int64 support
*/
typedef int khronos_int32_t;
typedef unsigned int khronos_uint32_t;
#define KHRONOS_SUPPORT_INT64 0
#define KHRONOS_SUPPORT_FLOAT 0
#else
/*
* Generic fallback
*/
#include <stdint.h>
typedef int32_t khronos_int32_t;
typedef uint32_t khronos_uint32_t;
typedef int64_t khronos_int64_t;
typedef uint64_t khronos_uint64_t;
#define KHRONOS_SUPPORT_INT64 1
#define KHRONOS_SUPPORT_FLOAT 1
#endif
/*
* Types that are (so far) the same on all platforms
*/
typedef signed char khronos_int8_t;
typedef unsigned char khronos_uint8_t;
typedef signed short int khronos_int16_t;
typedef unsigned short int khronos_uint16_t;
/*
* Types that differ between LLP64 and LP64 architectures - in LLP64,
* pointers are 64 bits, but 'long' is still 32 bits. Win64 appears
* to be the only LLP64 architecture in current use.
*/
#ifdef _WIN64
typedef signed long long int khronos_intptr_t;
typedef unsigned long long int khronos_uintptr_t;
typedef signed long long int khronos_ssize_t;
typedef unsigned long long int khronos_usize_t;
#else
typedef signed long int khronos_intptr_t;
typedef unsigned long int khronos_uintptr_t;
typedef signed long int khronos_ssize_t;
typedef unsigned long int khronos_usize_t;
#endif
#if KHRONOS_SUPPORT_FLOAT
/*
* Float type
*/
typedef float khronos_float_t;
#endif
#if KHRONOS_SUPPORT_INT64
/* Time types
*
* These types can be used to represent a time interval in nanoseconds or
* an absolute Unadjusted System Time. Unadjusted System Time is the number
* of nanoseconds since some arbitrary system event (e.g. since the last
* time the system booted). The Unadjusted System Time is an unsigned
* 64 bit value that wraps back to 0 every 584 years. Time intervals
* may be either signed or unsigned.
*/
typedef khronos_uint64_t khronos_utime_nanoseconds_t;
typedef khronos_int64_t khronos_stime_nanoseconds_t;
#endif
/*
* Dummy value used to pad enum types to 32 bits.
*/
#ifndef KHRONOS_MAX_ENUM
#define KHRONOS_MAX_ENUM 0x7FFFFFFF
#endif
/*
* Enumerated boolean type
*
* Values other than zero should be considered to be true. Therefore
* comparisons should not be made against KHRONOS_TRUE.
*/
typedef enum {
KHRONOS_FALSE = 0,
KHRONOS_TRUE = 1,
KHRONOS_BOOLEAN_ENUM_FORCE_SIZE = KHRONOS_MAX_ENUM
} khronos_boolean_enum_t;
#endif /* __khrplatform_h_ */

View File

@@ -18,6 +18,7 @@
#define TNT_FILAMENT_BACKEND_PRIVATE_ACQUIREDIMAGE_H
#include <backend/DriverEnums.h>
#include <math/mat3.h>
namespace filament::backend {

View File

@@ -0,0 +1,58 @@
/*
* Copyright (C) 2025 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_FILAMENT_BACKEND_BUFFEROBJECTSTREAMDESCRIPTOR_H
#define TNT_FILAMENT_BACKEND_BUFFEROBJECTSTREAMDESCRIPTOR_H
#include <backend/Handle.h>
#include <vector>
namespace filament::backend {
/**
* The type of association between a buffer object and a stream.
*/
enum class BufferObjectStreamAssociationType { TRANSFORM_MATRIX };
/**
* A descriptor for a buffer object to stream association.
*/
class UTILS_PUBLIC BufferObjectStreamDescriptor {
public:
//! creates an empty descriptor
BufferObjectStreamDescriptor() noexcept = default;
BufferObjectStreamDescriptor(const BufferObjectStreamDescriptor& rhs) = delete;
BufferObjectStreamDescriptor& operator=(const BufferObjectStreamDescriptor& rhs) = delete;
BufferObjectStreamDescriptor(BufferObjectStreamDescriptor&& rhs) = default;
BufferObjectStreamDescriptor& operator=(BufferObjectStreamDescriptor&& rhs) = default;
// --------------------------------------------------------------------------------------------
struct StreamDataDescriptor {
uint32_t offset;
Handle<HwStream> stream;
BufferObjectStreamAssociationType associationType;
};
std::vector<StreamDataDescriptor> mStreams;
};
} // namespace filament::backend
#endif // TNT_FILAMENT_BACKEND_BUFFEROBJECTSTREAMDESCRIPTOR_H

View File

@@ -0,0 +1,107 @@
/*
* Copyright (C) 2024 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_FILAMENT_BACKEND_COMMANDSTREAMVECTOR_H
#define TNT_FILAMENT_BACKEND_COMMANDSTREAMVECTOR_H
#include <backend/DriverApiForward.h>
#include <utils/ostream.h>
#include <initializer_list>
#include <memory>
#include <stddef.h>
#include <stdint.h>
namespace filament::backend {
void* allocateFromCommandStream(DriverApi& driver, size_t size, size_t alignment) noexcept;
class DescriptorSetOffsetArray {
public:
using value_type = uint32_t;
using reference = value_type&;
using const_reference = value_type const&;
using size_type = uint32_t;
using difference_type = int32_t;
using pointer = value_type*;
using const_pointer = value_type const*;
using iterator = pointer;
using const_iterator = const_pointer;
DescriptorSetOffsetArray() noexcept = default;
~DescriptorSetOffsetArray() noexcept = default;
DescriptorSetOffsetArray(size_type size, DriverApi& driver) noexcept {
mOffsets = (value_type *)allocateFromCommandStream(driver,
size * sizeof(value_type), alignof(value_type));
std::uninitialized_fill_n(mOffsets, size, 0);
}
DescriptorSetOffsetArray(std::initializer_list<uint32_t> list, DriverApi& driver) noexcept {
mOffsets = (value_type *)allocateFromCommandStream(driver,
list.size() * sizeof(value_type), alignof(value_type));
std::uninitialized_copy(list.begin(), list.end(), mOffsets);
}
DescriptorSetOffsetArray(DescriptorSetOffsetArray const&) = delete;
DescriptorSetOffsetArray& operator=(DescriptorSetOffsetArray const&) = delete;
DescriptorSetOffsetArray(DescriptorSetOffsetArray&& rhs) noexcept
: mOffsets(rhs.mOffsets) {
rhs.mOffsets = nullptr;
}
DescriptorSetOffsetArray& operator=(DescriptorSetOffsetArray&& rhs) noexcept {
if (this != &rhs) {
mOffsets = rhs.mOffsets;
rhs.mOffsets = nullptr;
}
return *this;
}
bool empty() const noexcept { return mOffsets == nullptr; }
value_type* data() noexcept { return mOffsets; }
const value_type* data() const noexcept { return mOffsets; }
reference operator[](size_type n) noexcept {
return *(data() + n);
}
const_reference operator[](size_type n) const noexcept {
return *(data() + n);
}
void clear() noexcept {
mOffsets = nullptr;
}
private:
value_type *mOffsets = nullptr;
};
} // namespace filament::backend
#if !defined(NDEBUG)
utils::io::ostream& operator<<(utils::io::ostream& out, const filament::backend::DescriptorSetOffsetArray& rhs);
#endif
#endif //TNT_FILAMENT_BACKEND_COMMANDSTREAMVECTOR_H

View File

@@ -19,17 +19,23 @@
#ifndef TNT_FILAMENT_BACKEND_DRIVERENUMS_H
#define TNT_FILAMENT_BACKEND_DRIVERENUMS_H
#include <utils/BitmaskEnum.h>
#include <utils/unwindows.h> // Because we define ERROR in the FenceStatus enum.
#include <backend/Platform.h>
#include <backend/PresentCallable.h>
#include <utils/BitmaskEnum.h>
#include <utils/FixedCapacityVector.h>
#include <utils/Invocable.h>
#include <utils/compiler.h>
#include <utils/debug.h>
#include <utils/ostream.h>
#include <math/vec4.h>
#include <array> // FIXME: STL headers are not allowed in public headers
#include <type_traits> // FIXME: STL headers are not allowed in public headers
#include <array>
#include <type_traits>
#include <variant>
#include <stddef.h>
#include <stdint.h>
@@ -90,11 +96,16 @@ static constexpr uint64_t SWAP_CHAIN_HAS_STENCIL_BUFFER = SWAP_CHAIN_CON
*/
static constexpr uint64_t SWAP_CHAIN_CONFIG_PROTECTED_CONTENT = 0x40;
static constexpr size_t MAX_VERTEX_ATTRIBUTE_COUNT = 16; // This is guaranteed by OpenGL ES.
static constexpr size_t MAX_SAMPLER_COUNT = 62; // Maximum needed at feature level 3.
static constexpr size_t MAX_VERTEX_BUFFER_COUNT = 16; // Max number of bound buffer objects.
static constexpr size_t MAX_SSBO_COUNT = 4; // This is guaranteed by OpenGL ES.
static constexpr size_t MAX_DESCRIPTOR_SET_COUNT = 4; // This is guaranteed by Vulkan.
static constexpr size_t MAX_DESCRIPTOR_COUNT = 64; // per set
static constexpr size_t MAX_PUSH_CONSTANT_COUNT = 32; // Vulkan 1.1 spec allows for 128-byte
// of push constant (we assume 4-byte
// types).
// Per feature level caps
// Use (int)FeatureLevel to index this array
@@ -112,7 +123,7 @@ static_assert(MAX_VERTEX_BUFFER_COUNT <= MAX_VERTEX_ATTRIBUTE_COUNT,
"The number of buffer objects that can be attached to a VertexBuffer must be "
"less than or equal to the maximum number of vertex attributes.");
static constexpr size_t CONFIG_UNIFORM_BINDING_COUNT = 10; // This is guaranteed by OpenGL ES.
static constexpr size_t CONFIG_UNIFORM_BINDING_COUNT = 9; // This is guaranteed by OpenGL ES.
static constexpr size_t CONFIG_SAMPLER_BINDING_COUNT = 4; // This is guaranteed by OpenGL ES.
/**
@@ -133,7 +144,8 @@ enum class Backend : uint8_t {
OPENGL = 1, //!< Selects the OpenGL/ES driver (default on Android)
VULKAN = 2, //!< Selects the Vulkan driver if the platform supports it (default on Linux/Windows)
METAL = 3, //!< Selects the Metal driver if the platform supports it (default on MacOS/iOS).
NOOP = 4, //!< Selects the no-op driver for testing purposes.
WEBGPU = 4, //!< Selects the Webgpu driver if the platform supports webgpu.
NOOP = 5, //!< Selects the no-op driver for testing purposes.
};
enum class TimerQueryResult : int8_t {
@@ -152,20 +164,24 @@ static constexpr const char* backendToString(Backend backend) {
return "Vulkan";
case Backend::METAL:
return "Metal";
case Backend::WEBGPU:
return "WebGPU";
default:
return "Unknown";
}
}
/**
* Defines the shader language. Similar to the above backend enum, but the OpenGL backend can select
* between two shader languages: ESSL 1.0 and ESSL 3.0.
* Defines the shader language. Similar to the above backend enum, with some differences:
* - The OpenGL backend can select between two shader languages: ESSL 1.0 and ESSL 3.0.
* - The Metal backend can prefer precompiled Metal libraries, while falling back to MSL.
*/
enum class ShaderLanguage {
ESSL1 = 0,
ESSL3 = 1,
SPIRV = 2,
MSL = 3,
METAL_LIBRARY = 4,
};
static constexpr const char* shaderLanguageToString(ShaderLanguage shaderLanguage) {
@@ -178,9 +194,76 @@ static constexpr const char* shaderLanguageToString(ShaderLanguage shaderLanguag
return "SPIR-V";
case ShaderLanguage::MSL:
return "MSL";
case ShaderLanguage::METAL_LIBRARY:
return "Metal precompiled library";
}
}
enum class ShaderStage : uint8_t {
VERTEX = 0,
FRAGMENT = 1,
COMPUTE = 2
};
static constexpr size_t PIPELINE_STAGE_COUNT = 2;
enum class ShaderStageFlags : uint8_t {
NONE = 0,
VERTEX = 0x1,
FRAGMENT = 0x2,
COMPUTE = 0x4,
ALL_SHADER_STAGE_FLAGS = VERTEX | FRAGMENT | COMPUTE
};
static inline constexpr bool hasShaderType(ShaderStageFlags flags, ShaderStage type) noexcept {
switch (type) {
case ShaderStage::VERTEX:
return bool(uint8_t(flags) & uint8_t(ShaderStageFlags::VERTEX));
case ShaderStage::FRAGMENT:
return bool(uint8_t(flags) & uint8_t(ShaderStageFlags::FRAGMENT));
case ShaderStage::COMPUTE:
return bool(uint8_t(flags) & uint8_t(ShaderStageFlags::COMPUTE));
}
}
enum class DescriptorType : uint8_t {
UNIFORM_BUFFER,
SHADER_STORAGE_BUFFER,
SAMPLER,
INPUT_ATTACHMENT,
SAMPLER_EXTERNAL
};
enum class DescriptorFlags : uint8_t {
NONE = 0x00,
DYNAMIC_OFFSET = 0x01
};
using descriptor_set_t = uint8_t;
using descriptor_binding_t = uint8_t;
struct DescriptorSetLayoutBinding {
DescriptorType type;
ShaderStageFlags stageFlags;
descriptor_binding_t binding;
DescriptorFlags flags = DescriptorFlags::NONE;
uint16_t count = 0;
friend inline bool operator==(
DescriptorSetLayoutBinding const& lhs,
DescriptorSetLayoutBinding const& rhs) noexcept {
return lhs.type == rhs.type &&
lhs.flags == rhs.flags &&
lhs.count == rhs.count &&
lhs.stageFlags == rhs.stageFlags;
}
};
struct DescriptorSetLayout {
utils::FixedCapacityVector<DescriptorSetLayoutBinding> bindings;
};
/**
* Bitmask for selecting render buffers
*/
@@ -239,8 +322,19 @@ struct Viewport {
int32_t right() const noexcept { return left + int32_t(width); }
//! get the top coordinate in window space of the viewport
int32_t top() const noexcept { return bottom + int32_t(height); }
};
friend bool operator==(Viewport const& lhs, Viewport const& rhs) noexcept {
// clang can do this branchless with xor/or
return lhs.left == rhs.left && lhs.bottom == rhs.bottom &&
lhs.width == rhs.width && lhs.height == rhs.height;
}
friend bool operator!=(Viewport const& lhs, Viewport const& rhs) noexcept {
// clang is being dumb and uses branches
return bool(((lhs.left ^ rhs.left) | (lhs.bottom ^ rhs.bottom)) |
((lhs.width ^ rhs.width) | (lhs.height ^ rhs.height)));
}
};
/**
* Specifies the mapping of the near and far clipping plane to window coordinates.
@@ -260,15 +354,6 @@ enum class FenceStatus : int8_t {
TIMEOUT_EXPIRED = 1, //!< wait()'s timeout expired. The Fence condition is not satisfied.
};
/**
* Status codes for sync objects
*/
enum class SyncStatus : int8_t {
ERROR = -1, //!< An error occurred. The Sync is not signaled.
SIGNALED = 0, //!< The Sync is signaled.
NOT_SIGNALED = 1, //!< The Sync is not signaled yet
};
static constexpr uint64_t FENCE_WAIT_FOR_EVER = uint64_t(-1);
/**
@@ -327,7 +412,7 @@ enum class UniformType : uint8_t {
/**
* Supported constant parameter types
*/
enum class ConstantType : uint8_t {
enum class ConstantType : uint8_t {
INT,
FLOAT,
BOOL
@@ -682,8 +767,9 @@ enum class TextureUsage : uint16_t {
SUBPASS_INPUT = 0x0020, //!< Texture can be used as a subpass input
BLIT_SRC = 0x0040, //!< Texture can be used the source of a blit()
BLIT_DST = 0x0080, //!< Texture can be used the destination of a blit()
PROTECTED = 0x0100, //!< Texture can be used the destination of a blit()
DEFAULT = UPLOADABLE | SAMPLEABLE //!< Default texture usage
PROTECTED = 0x0100, //!< Texture can be used for protected content
DEFAULT = UPLOADABLE | SAMPLEABLE, //!< Default texture usage
ALL_ATTACHMENTS = COLOR_ATTACHMENT | DEPTH_ATTACHMENT | STENCIL_ATTACHMENT | SUBPASS_INPUT, //!< Mask of all attachments
};
//! Texture swizzle
@@ -877,6 +963,9 @@ struct SamplerParams { // NOLINT
struct EqualTo {
bool operator()(SamplerParams lhs, SamplerParams rhs) const noexcept {
assert_invariant(lhs.padding0 == 0);
assert_invariant(lhs.padding1 == 0);
assert_invariant(lhs.padding2 == 0);
auto* pLhs = reinterpret_cast<uint32_t const*>(reinterpret_cast<char const*>(&lhs));
auto* pRhs = reinterpret_cast<uint32_t const*>(reinterpret_cast<char const*>(&rhs));
return *pLhs == *pRhs;
@@ -885,6 +974,9 @@ struct SamplerParams { // NOLINT
struct LessThan {
bool operator()(SamplerParams lhs, SamplerParams rhs) const noexcept {
assert_invariant(lhs.padding0 == 0);
assert_invariant(lhs.padding1 == 0);
assert_invariant(lhs.padding2 == 0);
auto* pLhs = reinterpret_cast<uint32_t const*>(reinterpret_cast<char const*>(&lhs));
auto* pRhs = reinterpret_cast<uint32_t const*>(reinterpret_cast<char const*>(&rhs));
return *pLhs == *pRhs;
@@ -892,6 +984,12 @@ struct SamplerParams { // NOLINT
};
private:
friend inline bool operator == (SamplerParams lhs, SamplerParams rhs) noexcept {
return SamplerParams::EqualTo{}(lhs, rhs);
}
friend inline bool operator != (SamplerParams lhs, SamplerParams rhs) noexcept {
return !SamplerParams::EqualTo{}(lhs, rhs);
}
friend inline bool operator < (SamplerParams lhs, SamplerParams rhs) noexcept {
return SamplerParams::LessThan{}(lhs, rhs);
}
@@ -1048,7 +1146,7 @@ struct RasterState {
bool inverseFrontFaces : 1; // 31
//! padding, must be 0
uint8_t padding : 1; // 32
bool depthClamp : 1; // 32
};
uint32_t u = 0;
};
@@ -1059,32 +1157,6 @@ struct RasterState {
* \privatesection
*/
enum class ShaderStage : uint8_t {
VERTEX = 0,
FRAGMENT = 1,
COMPUTE = 2
};
static constexpr size_t PIPELINE_STAGE_COUNT = 2;
enum class ShaderStageFlags : uint8_t {
NONE = 0,
VERTEX = 0x1,
FRAGMENT = 0x2,
COMPUTE = 0x4,
ALL_SHADER_STAGE_FLAGS = VERTEX | FRAGMENT | COMPUTE
};
static inline constexpr bool hasShaderType(ShaderStageFlags flags, ShaderStage type) noexcept {
switch (type) {
case ShaderStage::VERTEX:
return bool(uint8_t(flags) & uint8_t(ShaderStageFlags::VERTEX));
case ShaderStage::FRAGMENT:
return bool(uint8_t(flags) & uint8_t(ShaderStageFlags::FRAGMENT));
case ShaderStage::COMPUTE:
return bool(uint8_t(flags) & uint8_t(ShaderStageFlags::COMPUTE));
}
}
/**
* Selects which buffers to clear at the beginning of the render pass, as well as which buffers
* can be discarded at the beginning and end of the render pass.
@@ -1214,13 +1286,15 @@ struct StencilState {
uint8_t padding = 0;
};
using PushConstantVariant = std::variant<int32_t, float, bool>;
static_assert(sizeof(StencilState::StencilOperations) == 5u,
"StencilOperations size not what was intended");
static_assert(sizeof(StencilState) == 12u,
"StencilState size not what was intended");
using FrameScheduledCallback = void(*)(PresentCallable callable, void* user);
using FrameScheduledCallback = utils::Invocable<void(backend::PresentCallable)>;
enum class Workaround : uint16_t {
// The EASU pass must split because shader compiler flattens early-exit branch
@@ -1231,24 +1305,15 @@ enum class Workaround : uint16_t {
// for some uniform arrays, it's needed to do an initialization to avoid crash on adreno gpu
ADRENO_UNIFORM_ARRAY_CRASH,
// Workaround a Metal pipeline compilation error with the message:
// "Could not statically determine the target of a texture". See light_indirect.fs
A8X_STATIC_TEXTURE_TARGET_ERROR,
// "Could not statically determine the target of a texture". See surface_light_indirect.fs
METAL_STATIC_TEXTURE_TARGET_ERROR,
// Adreno drivers sometimes aren't able to blit into a layer of a texture array.
DISABLE_BLIT_INTO_TEXTURE_ARRAY,
// Multiple workarounds needed for PowerVR GPUs
POWER_VR_SHADER_WORKAROUNDS,
// The driver has some threads pinned, and we can't easily know on which core, it can hurt
// performance more if we end-up pinned on the same one.
DISABLE_THREAD_AFFINITY
};
//! The type of technique for stereoscopic rendering
enum class StereoscopicType : uint8_t {
// Stereoscopic rendering is performed using instanced rendering technique.
INSTANCED,
// Stereoscopic rendering is performed using the multiview feature from the graphics backend.
MULTIVIEW,
};
using StereoscopicType = backend::Platform::StereoscopicType;
} // namespace filament::backend
@@ -1256,6 +1321,8 @@ template<> struct utils::EnableBitMaskOperators<filament::backend::ShaderStageFl
: public std::true_type {};
template<> struct utils::EnableBitMaskOperators<filament::backend::TargetBufferFlags>
: public std::true_type {};
template<> struct utils::EnableBitMaskOperators<filament::backend::DescriptorFlags>
: public std::true_type {};
template<> struct utils::EnableBitMaskOperators<filament::backend::TextureUsage>
: public std::true_type {};
template<> struct utils::EnableBitMaskOperators<filament::backend::StencilFace>
@@ -1288,12 +1355,16 @@ utils::io::ostream& operator<<(utils::io::ostream& out, filament::backend::Textu
utils::io::ostream& operator<<(utils::io::ostream& out, filament::backend::TextureUsage usage);
utils::io::ostream& operator<<(utils::io::ostream& out, filament::backend::BufferObjectBinding binding);
utils::io::ostream& operator<<(utils::io::ostream& out, filament::backend::TextureSwizzle swizzle);
utils::io::ostream& operator<<(utils::io::ostream& out, filament::backend::ShaderStage shaderStage);
utils::io::ostream& operator<<(utils::io::ostream& out, filament::backend::ShaderStageFlags stageFlags);
utils::io::ostream& operator<<(utils::io::ostream& out, filament::backend::CompilerPriorityQueue compilerPriorityQueue);
utils::io::ostream& operator<<(utils::io::ostream& out, filament::backend::PushConstantVariant pushConstantVariant);
utils::io::ostream& operator<<(utils::io::ostream& out, const filament::backend::AttributeArray& type);
utils::io::ostream& operator<<(utils::io::ostream& out, const filament::backend::DescriptorSetLayout& dsl);
utils::io::ostream& operator<<(utils::io::ostream& out, const filament::backend::PolygonOffset& po);
utils::io::ostream& operator<<(utils::io::ostream& out, const filament::backend::RasterState& rs);
utils::io::ostream& operator<<(utils::io::ostream& out, const filament::backend::RenderPassParams& b);
utils::io::ostream& operator<<(utils::io::ostream& out, const filament::backend::Viewport& v);
utils::io::ostream& operator<<(utils::io::ostream& out, filament::backend::ShaderStageFlags stageFlags);
#endif
#endif // TNT_FILAMENT_BACKEND_DRIVERENUMS_H

View File

@@ -23,6 +23,7 @@
#include <utils/debug.h>
#include <type_traits> // FIXME: STL headers are not allowed in public headers
#include <utility>
#include <stdint.h>
@@ -34,13 +35,14 @@ struct HwIndexBuffer;
struct HwProgram;
struct HwRenderPrimitive;
struct HwRenderTarget;
struct HwSamplerGroup;
struct HwStream;
struct HwSwapChain;
struct HwTexture;
struct HwTimerQuery;
struct HwVertexBufferInfo;
struct HwVertexBuffer;
struct HwDescriptorSetLayout;
struct HwDescriptorSet;
/*
* A handle to a backend resource. HandleBase is for internal use only.
@@ -75,6 +77,19 @@ protected:
HandleBase(HandleBase const& rhs) noexcept = default;
HandleBase& operator=(HandleBase const& rhs) noexcept = default;
HandleBase(HandleBase&& rhs) noexcept
: object(rhs.object) {
rhs.object = nullid;
}
HandleBase& operator=(HandleBase&& rhs) noexcept {
if (this != &rhs) {
object = rhs.object;
rhs.object = nullid;
}
return *this;
}
private:
HandleId object;
};
@@ -89,8 +104,20 @@ struct Handle : public HandleBase {
Handle() noexcept = default;
Handle(Handle const& rhs) noexcept = default;
Handle(Handle&& rhs) noexcept = default;
Handle& operator=(Handle const& rhs) noexcept = default;
// Explicitly redefine copy/move assignment operators rather than just using default here.
// Because it doesn't make a call to the parent's method automatically during the std::move
// function call(https://en.cppreference.com/w/cpp/algorithm/move) in certain compilers like
// NDK 25.1.8937393 and below (see b/371980551)
Handle& operator=(Handle const& rhs) noexcept {
HandleBase::operator=(rhs);
return *this;
}
Handle& operator=(Handle&& rhs) noexcept {
HandleBase::operator=(std::move(rhs));
return *this;
}
explicit Handle(HandleId id) noexcept : HandleBase(id) { }
@@ -103,7 +130,7 @@ struct Handle : public HandleBase {
bool operator>=(const Handle& rhs) const noexcept { return getId() >= rhs.getId(); }
// type-safe Handle cast
template<typename B, typename = std::enable_if_t<std::is_base_of<T, B>::value> >
template<typename B, typename = std::enable_if_t<std::is_base_of_v<T, B>> >
Handle(Handle<B> const& base) noexcept : HandleBase(base) { } // NOLINT(hicpp-explicit-conversions,google-explicit-constructor)
private:
@@ -115,19 +142,20 @@ private:
// Types used by the command stream
// (we use this renaming because the macro-system doesn't deal well with "<" and ">")
using BufferObjectHandle = Handle<HwBufferObject>;
using FenceHandle = Handle<HwFence>;
using IndexBufferHandle = Handle<HwIndexBuffer>;
using ProgramHandle = Handle<HwProgram>;
using RenderPrimitiveHandle = Handle<HwRenderPrimitive>;
using RenderTargetHandle = Handle<HwRenderTarget>;
using SamplerGroupHandle = Handle<HwSamplerGroup>;
using StreamHandle = Handle<HwStream>;
using SwapChainHandle = Handle<HwSwapChain>;
using TextureHandle = Handle<HwTexture>;
using TimerQueryHandle = Handle<HwTimerQuery>;
using VertexBufferHandle = Handle<HwVertexBuffer>;
using VertexBufferInfoHandle = Handle<HwVertexBufferInfo>;
using BufferObjectHandle = Handle<HwBufferObject>;
using FenceHandle = Handle<HwFence>;
using IndexBufferHandle = Handle<HwIndexBuffer>;
using ProgramHandle = Handle<HwProgram>;
using RenderPrimitiveHandle = Handle<HwRenderPrimitive>;
using RenderTargetHandle = Handle<HwRenderTarget>;
using StreamHandle = Handle<HwStream>;
using SwapChainHandle = Handle<HwSwapChain>;
using TextureHandle = Handle<HwTexture>;
using TimerQueryHandle = Handle<HwTimerQuery>;
using VertexBufferHandle = Handle<HwVertexBuffer>;
using VertexBufferInfoHandle = Handle<HwVertexBufferInfo>;
using DescriptorSetLayoutHandle = Handle<HwDescriptorSetLayout>;
using DescriptorSetHandle = Handle<HwDescriptorSet>;
} // namespace filament::backend

View File

@@ -22,15 +22,23 @@
#include <utils/ostream.h>
#include <array>
#include <stdint.h>
namespace filament::backend {
//! \privatesection
struct PipelineLayout {
using SetLayout = std::array<Handle<HwDescriptorSetLayout>, MAX_DESCRIPTOR_SET_COUNT>;
SetLayout setLayout; // 16
};
struct PipelineState {
Handle<HwProgram> program; // 4
Handle<HwVertexBufferInfo> vertexBufferInfo; // 4
PipelineLayout pipelineLayout; // 16
RasterState rasterState; // 4
StencilState stencilState; // 12
PolygonOffset polygonOffset; // 8

View File

@@ -25,6 +25,8 @@
#include <stddef.h>
#include <stdint.h>
#include <atomic>
namespace filament::backend {
class Driver;
@@ -41,6 +43,70 @@ public:
struct Fence {};
struct Stream {};
class ExternalImageHandle;
class ExternalImage {
friend class ExternalImageHandle;
std::atomic_uint32_t mRefCount{0};
protected:
virtual ~ExternalImage() noexcept;
};
class ExternalImageHandle {
ExternalImage* UTILS_NULLABLE mTarget = nullptr;
static void incref(ExternalImage* UTILS_NULLABLE p) noexcept;
static void decref(ExternalImage* UTILS_NULLABLE p) noexcept;
public:
ExternalImageHandle() noexcept;
~ExternalImageHandle() noexcept;
explicit ExternalImageHandle(ExternalImage* UTILS_NULLABLE p) noexcept;
ExternalImageHandle(ExternalImageHandle const& rhs) noexcept;
ExternalImageHandle(ExternalImageHandle&& rhs) noexcept;
ExternalImageHandle& operator=(ExternalImageHandle const& rhs) noexcept;
ExternalImageHandle& operator=(ExternalImageHandle&& rhs) noexcept;
explicit operator bool() const noexcept { return mTarget != nullptr; }
ExternalImage* UTILS_NULLABLE get() noexcept { return mTarget; }
ExternalImage const* UTILS_NULLABLE get() const noexcept { return mTarget; }
ExternalImage* UTILS_NULLABLE operator->() noexcept { return mTarget; }
ExternalImage const* UTILS_NULLABLE operator->() const noexcept { return mTarget; }
ExternalImage& operator*() noexcept { return *mTarget; }
ExternalImage const& operator*() const noexcept { return *mTarget; }
void clear() noexcept;
void reset(ExternalImage* UTILS_NULLABLE p) noexcept;
private:
friend utils::io::ostream& operator<<(utils::io::ostream& out,
ExternalImageHandle const& handle);
};
using ExternalImageHandleRef = ExternalImageHandle const&;
/**
* The type of technique for stereoscopic rendering. (Note that the materials used will need to
* be compatible with the chosen technique.)
*/
enum class StereoscopicType : uint8_t {
/**
* No stereoscopic rendering
*/
NONE,
/**
* Stereoscopic rendering is performed using instanced rendering technique.
*/
INSTANCED,
/**
* Stereoscopic rendering is performed using the multiview feature from the graphics
* backend.
*/
MULTIVIEW,
};
struct DriverConfig {
/**
* Size of handle arena in bytes. Setting to 0 indicates default value is to be used.
@@ -48,12 +114,7 @@ public:
*/
size_t handleArenaSize = 0;
/**
* This number of most-recently destroyed textures will be tracked for use-after-free.
* Throws an exception when a texture is freed but still bound to a SamplerGroup and used in
* a draw call. 0 disables completely. Currently only respected by the Metal backend.
*/
size_t textureUseAfterFreePoolSize = 0;
size_t metalUploadBufferSizeBytes = 512 * 1024;
/**
* Set to `true` to forcibly disable parallel shader compilation in the backend.
@@ -65,6 +126,29 @@ public:
* Disable backend handles use-after-free checks.
*/
bool disableHandleUseAfterFreeCheck = false;
/**
* Disable backend handles tags for heap allocated (fallback) handles
*/
bool disableHeapHandleTags = false;
/**
* Force GLES2 context if supported, or pretend the context is ES2. Only meaningful on
* GLES 3.x backends.
*/
bool forceGLES2Context = false;
/**
* Sets the technique for stereoscopic rendering.
*/
StereoscopicType stereoscopicType = StereoscopicType::NONE;
/**
* Assert the native window associated to a SwapChain is valid when calling makeCurrent().
* This is only supported for:
* - PlatformEGLAndroid
*/
bool assertNativeWindowIsValid = false;
};
Platform() noexcept;
@@ -90,7 +174,7 @@ public:
*
* @return nullptr on failure, or a pointer to the newly created driver.
*/
virtual backend::Driver* UTILS_NULLABLE createDriver(void* UTILS_NULLABLE sharedContext,
virtual Driver* UTILS_NULLABLE createDriver(void* UTILS_NULLABLE sharedContext,
const DriverConfig& driverConfig) noexcept = 0;
/**

View File

@@ -48,7 +48,7 @@ namespace filament::backend {
* and optional user data:
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* swapChain->setFrameScheduledCallback(myFrameScheduledCallback, nullptr);
* swapChain->setFrameScheduledCallback(nullptr, myFrameScheduledCallback);
* if (renderer->beginFrame(swapChain)) {
* renderer->render(view);
* renderer->endFrame();
@@ -58,8 +58,6 @@ namespace filament::backend {
* @remark Only Filament's Metal backend supports PresentCallables and frame callbacks. Other
* backends ignore the callback (which will never be called) and proceed normally.
*
* @remark The SwapChain::FrameScheduledCallback is called on an arbitrary thread.
*
* Applications *must* call each PresentCallable they receive. Each PresentCallable represents a
* frame that is waiting to be presented. If an application fails to call a PresentCallable, a
* memory leak could occur. To "cancel" the presentation of a frame, pass false to the

View File

@@ -24,9 +24,11 @@
#include <backend/DriverEnums.h>
#include <array> // FIXME: STL headers are not allowed in public headers
#include <utility> // FIXME: STL headers are not allowed in public headers
#include <variant> // FIXME: STL headers are not allowed in public headers
#include <array>
#include <unordered_map>
#include <tuple>
#include <utility>
#include <variant>
#include <stddef.h>
#include <stdint.h>
@@ -40,29 +42,36 @@ public:
static constexpr size_t UNIFORM_BINDING_COUNT = CONFIG_UNIFORM_BINDING_COUNT;
static constexpr size_t SAMPLER_BINDING_COUNT = CONFIG_SAMPLER_BINDING_COUNT;
struct Sampler {
utils::CString name = {}; // name of the sampler in the shader
uint32_t binding = 0; // binding point of the sampler in the shader
struct Descriptor {
utils::CString name;
backend::DescriptorType type;
backend::descriptor_binding_t binding;
};
struct SamplerGroupData {
utils::FixedCapacityVector<Sampler> samplers;
ShaderStageFlags stageFlags = ShaderStageFlags::ALL_SHADER_STAGE_FLAGS;
struct SpecializationConstant {
using Type = std::variant<int32_t, float, bool>;
uint32_t id; // id set in glsl
Type value; // value and type
};
struct Uniform {
struct Uniform { // For ES2 support
utils::CString name; // full qualified name of the uniform field
uint16_t offset; // offset in 'uint32_t' into the uniform buffer
uint8_t size; // >1 for arrays
UniformType type; // uniform type
};
using UniformBlockInfo = std::array<utils::CString, UNIFORM_BINDING_COUNT>;
using UniformInfo = utils::FixedCapacityVector<Uniform>;
using SamplerGroupInfo = std::array<SamplerGroupData, SAMPLER_BINDING_COUNT>;
using DescriptorBindingsInfo = utils::FixedCapacityVector<Descriptor>;
using DescriptorSetInfo = std::array<DescriptorBindingsInfo, MAX_DESCRIPTOR_SET_COUNT>;
using SpecializationConstantsInfo = utils::FixedCapacityVector<SpecializationConstant>;
using ShaderBlob = utils::FixedCapacityVector<uint8_t>;
using ShaderSource = std::array<ShaderBlob, SHADER_TYPE_COUNT>;
using AttributesInfo = utils::FixedCapacityVector<std::pair<utils::CString, uint8_t>>;
using UniformInfo = utils::FixedCapacityVector<Uniform>;
using BindingUniformsInfo = utils::FixedCapacityVector<
std::tuple<uint8_t, utils::CString, Program::UniformInfo>>;
Program() noexcept;
Program(const Program& rhs) = delete;
@@ -79,69 +88,47 @@ public:
Program& diagnostics(utils::CString const& name,
utils::Invocable<utils::io::ostream&(utils::io::ostream& out)>&& logger);
// sets one of the program's shader (e.g. vertex, fragment)
// Sets one of the program's shader (e.g. vertex, fragment)
// string-based shaders are null terminated, consequently the size parameter must include the
// null terminating character.
Program& shader(ShaderStage shader, void const* data, size_t size);
// Note: This is only needed for GLES3.0 backends, because the layout(binding=) syntax is
// not permitted in glsl. The backend needs a way to associate a uniform block
// to a binding point.
Program& uniformBlockBindings(
utils::FixedCapacityVector<std::pair<utils::CString, uint8_t>> const& uniformBlockBindings) noexcept;
// Sets the language of the shader sources provided with shader() (defaults to ESSL3)
Program& shaderLanguage(ShaderLanguage shaderLanguage);
// Note: This is only needed for GLES2.0, this is used to emulate UBO. This function tells
// the program everything it needs to know about the uniforms at a given binding
Program& uniforms(uint32_t index, UniformInfo const& uniforms) noexcept;
// Descriptor binding (set, binding, type -> shader name) info
Program& descriptorBindings(backend::descriptor_set_t set,
DescriptorBindingsInfo descriptorBindings) noexcept;
// Note: This is only needed for GLES2.0.
Program& attributes(
utils::FixedCapacityVector<std::pair<utils::CString, uint8_t>> attributes) noexcept;
Program& specializationConstants(SpecializationConstantsInfo specConstants) noexcept;
// sets the 'bindingPoint' sampler group descriptor for this program.
// 'samplers' can be destroyed after this call.
// This effectively associates a set of (BindingPoints, index) to a texture unit in the shader.
// Or more precisely, what layout(binding=) is set to in GLSL.
Program& setSamplerGroup(size_t bindingPoint, ShaderStageFlags stageFlags,
Sampler const* samplers, size_t count) noexcept;
struct SpecializationConstant {
using Type = std::variant<int32_t, float, bool>;
uint32_t id; // id set in glsl
Type value; // value and type
struct PushConstant {
utils::CString name;
ConstantType type;
};
Program& specializationConstants(
utils::FixedCapacityVector<SpecializationConstant> specConstants) noexcept;
Program& pushConstants(ShaderStage stage,
utils::FixedCapacityVector<PushConstant> constants) noexcept;
Program& cacheId(uint64_t cacheId) noexcept;
Program& multiview(bool multiview) noexcept;
// For ES2 support only...
Program& uniforms(uint32_t index, utils::CString name, UniformInfo uniforms) noexcept;
Program& attributes(AttributesInfo attributes) noexcept;
//
// Getters for program construction...
//
ShaderSource const& getShadersSource() const noexcept { return mShadersSource; }
ShaderSource& getShadersSource() noexcept { return mShadersSource; }
UniformBlockInfo const& getUniformBlockBindings() const noexcept { return mUniformBlocks; }
UniformBlockInfo& getUniformBlockBindings() noexcept { return mUniformBlocks; }
SamplerGroupInfo const& getSamplerGroupInfo() const { return mSamplerGroups; }
SamplerGroupInfo& getSamplerGroupInfo() { return mSamplerGroups; }
auto const& getBindingUniformInfo() const { return mBindingUniformInfo; }
auto& getBindingUniformInfo() { return mBindingUniformInfo; }
auto const& getAttributes() const { return mAttributes; }
auto& getAttributes() { return mAttributes; }
utils::CString const& getName() const noexcept { return mName; }
utils::CString& getName() noexcept { return mName; }
utils::FixedCapacityVector<SpecializationConstant> const& getSpecializationConstants() const noexcept {
return mSpecializationConstants;
}
utils::FixedCapacityVector<SpecializationConstant>& getSpecializationConstants() noexcept {
return mSpecializationConstants;
}
auto const& getShaderLanguage() const { return mShaderLanguage; }
uint64_t getCacheId() const noexcept { return mCacheId; }
@@ -149,19 +136,50 @@ public:
CompilerPriorityQueue getPriorityQueue() const noexcept { return mPriorityQueue; }
SpecializationConstantsInfo const& getSpecializationConstants() const noexcept {
return mSpecializationConstants;
}
SpecializationConstantsInfo& getSpecializationConstants() noexcept {
return mSpecializationConstants;
}
DescriptorSetInfo& getDescriptorBindings() noexcept {
return mDescriptorBindings;
}
utils::FixedCapacityVector<PushConstant> const& getPushConstants(
ShaderStage stage) const noexcept {
return mPushConstants[static_cast<uint8_t>(stage)];
}
utils::FixedCapacityVector<PushConstant>& getPushConstants(ShaderStage stage) noexcept {
return mPushConstants[static_cast<uint8_t>(stage)];
}
auto const& getBindingUniformInfo() const { return mBindingUniformsInfo; }
auto& getBindingUniformInfo() { return mBindingUniformsInfo; }
auto const& getAttributes() const { return mAttributes; }
auto& getAttributes() { return mAttributes; }
private:
friend utils::io::ostream& operator<<(utils::io::ostream& out, const Program& builder);
UniformBlockInfo mUniformBlocks = {};
SamplerGroupInfo mSamplerGroups = {};
ShaderSource mShadersSource;
ShaderLanguage mShaderLanguage = ShaderLanguage::ESSL3;
utils::CString mName;
uint64_t mCacheId{};
utils::Invocable<utils::io::ostream&(utils::io::ostream& out)> mLogger;
utils::FixedCapacityVector<SpecializationConstant> mSpecializationConstants;
utils::FixedCapacityVector<std::pair<utils::CString, uint8_t>> mAttributes;
std::array<UniformInfo, Program::UNIFORM_BINDING_COUNT> mBindingUniformInfo;
CompilerPriorityQueue mPriorityQueue = CompilerPriorityQueue::HIGH;
utils::Invocable<utils::io::ostream&(utils::io::ostream& out)> mLogger;
SpecializationConstantsInfo mSpecializationConstants;
std::array<utils::FixedCapacityVector<PushConstant>, SHADER_TYPE_COUNT> mPushConstants;
DescriptorSetInfo mDescriptorBindings;
// For ES2 support only
AttributesInfo mAttributes;
BindingUniformsInfo mBindingUniformsInfo;
// Indicates the current engine was initialized with multiview stereo, and the variant for this
// program contains STE flag. This will be referred later for the OpenGL shader compiler to
// determine whether shader code replacement for the num_views should be performed.

View File

@@ -29,17 +29,33 @@ namespace filament::backend {
//! \privatesection
struct TargetBufferInfo {
// note: the parameters of this constructor are not in the order of this structure's fields
TargetBufferInfo(Handle<HwTexture> handle, uint8_t level, uint16_t layer) noexcept
: handle(handle), level(level), layer(layer) {
}
TargetBufferInfo(Handle<HwTexture> handle, uint8_t level) noexcept
: handle(handle), level(level) {
}
TargetBufferInfo(Handle<HwTexture> handle) noexcept // NOLINT(*-explicit-constructor)
: handle(handle) {
}
TargetBufferInfo() noexcept = default;
// texture to be used as render target
Handle<HwTexture> handle;
// starting layer index for multiview. This value is only used when the `layerCount` for the
// render target is greater than 1.
uint8_t baseViewIndex = 0;
// level to be used
uint8_t level = 0;
// for cubemaps and 3D textures. See TextureCubemapFace for the face->layer mapping
// - For cubemap textures, this indicates the face of the cubemap. See TextureCubemapFace for
// the face->layer mapping)
// - For 2d array, cubemap array, and 3d textures, this indicates an index of a single layer of
// them.
// - For multiview textures (i.e., layerCount for the RenderTarget is greater than 1), this
// indicates a starting layer index of the current 2d array texture for multiview.
uint16_t layer = 0;
};
@@ -64,7 +80,7 @@ public:
MRT() noexcept = default;
MRT(TargetBufferInfo const& color) noexcept // NOLINT(hicpp-explicit-conversions)
MRT(TargetBufferInfo const& color) noexcept // NOLINT(hicpp-explicit-conversions, *-explicit-constructor)
: mInfos{ color } {
}
@@ -84,7 +100,7 @@ public:
// this is here for backward compatibility
MRT(Handle<HwTexture> handle, uint8_t level, uint16_t layer) noexcept
: mInfos{{ handle, 0, level, layer }} {
: mInfos{{ handle, level, layer }} {
}
};

View File

@@ -23,9 +23,11 @@
#include <utils/compiler.h>
#include <utils/Invocable.h>
#include <utils/CString.h>
#include <stddef.h>
#include <stdint.h>
#include <math/mat3.h>
namespace filament::backend {
@@ -51,12 +53,23 @@ protected:
~OpenGLPlatform() noexcept override;
public:
struct ExternalTexture {
unsigned int target; // GLenum target
unsigned int id; // GLuint id
unsigned int target; // GLenum target
unsigned int id; // GLuint id
};
/**
* Return the OpenGL vendor string of the specified Driver instance.
* @return The GL_VENDOR string
*/
static utils::CString getVendorString(Driver const* UTILS_NONNULL driver);
/**
* Return the OpenGL vendor string of the specified Driver instance
* @return The GL_RENDERER string
*/
static utils::CString getRendererString(Driver const* UTILS_NONNULL driver);
/**
* Called by the driver to destroy the OpenGL context. This should clean up any windows
* or buffers from initialization. This is for instance where `eglDestroyContext` would be
@@ -140,6 +153,23 @@ public:
*/
virtual uint32_t getDefaultFramebufferObject() noexcept;
/**
* Called by the backend when a frame starts.
* @param steady_clock_ns vsync time point on the monotonic clock
* @param refreshIntervalNs refresh interval in nanosecond
* @param frameId a frame id
*/
virtual void beginFrame(
int64_t monotonic_clock_ns,
int64_t refreshIntervalNs,
uint32_t frameId) noexcept;
/**
* Called by the backend when a frame ends.
* @param frameId the frame id used in beginFrame
*/
virtual void endFrame(
uint32_t frameId) noexcept;
/**
* Type of contexts available
@@ -191,6 +221,12 @@ public:
utils::Invocable<void()> preContextChange,
utils::Invocable<void(size_t index)> postContextChange) noexcept;
/**
* Called by the backend just before calling commit()
* @see commit()
*/
virtual void preCommit() noexcept;
/**
* Called by the driver once the current frame finishes drawing. Typically, this should present
* the drawSwapChain. This is for example where `eglMakeCurrent()` would be called.
@@ -284,6 +320,13 @@ public:
virtual void updateTexImage(Stream* UTILS_NONNULL stream,
int64_t* UTILS_NONNULL timestamp) noexcept;
/**
* Returns the transform matrix of the texture attached to the stream.
* @param stream Stream to get the transform matrix from
* @param uvTransform Output parameter: Transform matrix of the image bound to the texture. Returns identity if not supported.
*/
virtual math::mat3f getTransformMatrix(Stream* UTILS_NONNULL stream) noexcept;
// --------------------------------------------------------------------------------------------
// External Image support
@@ -301,36 +344,45 @@ public:
* Destroys an external texture handle and associated data.
* @param texture a pointer to the handle to destroy.
*/
virtual void destroyExternalImage(ExternalTexture* UTILS_NONNULL texture) noexcept;
virtual void destroyExternalImageTexture(ExternalTexture* UTILS_NONNULL texture) noexcept;
// called on the application thread to allow Filament to take ownership of the image
/**
* Takes ownership of the externalImage. The externalImage parameter depends on the Platform's
* concrete implementation. Ownership is released when destroyExternalImage() is called.
* concrete implementation. Ownership is released when destroyExternalImageTexture() is called.
*
* WARNING: This is called synchronously from the application thread (NOT the Driver thread)
*
* @param externalImage A token representing the platform's external image.
* @see destroyExternalImage
* @{
*/
virtual void retainExternalImage(void* UTILS_NONNULL externalImage) noexcept;
virtual void retainExternalImage(ExternalImageHandleRef externalImage) noexcept;
/** @}*/
/**
* Called to bind the platform-specific externalImage to an ExternalTexture.
* ExternalTexture::id is guaranteed to be bound when this method is called and ExternalTexture
* is updated with new values for id/target if necessary.
*
* WARNING: this method is not allowed to change the bound texture, or must restore the previous
* binding upon return. This is to avoid problem with a backend doing state caching.
* binding upon return. This is to avoid a problem with a backend doing state caching.
*
* @param externalImage The platform-specific external image.
* @param texture an in/out pointer to ExternalTexture, id and target can be updated if necessary.
* @return true on success, false on error.
* @{
*/
virtual bool setExternalImage(void* UTILS_NONNULL externalImage,
ExternalTexture* UTILS_NONNULL texture) noexcept;
virtual bool setExternalImage(ExternalImageHandleRef externalImage,
ExternalTexture* UTILS_NONNULL texture) noexcept;
/** @}*/
/**
* The method allows platforms to convert a user-supplied external image object into a new type
* (e.g. HardwareBuffer => EGLImage). The default implementation returns source.

View File

@@ -17,7 +17,6 @@
#ifndef TNT_FILAMENT_BACKEND_OPENGL_OPENGL_PLATFORM_COCOA_GL_H
#define TNT_FILAMENT_BACKEND_OPENGL_OPENGL_PLATFORM_COCOA_GL_H
#include <backend/DriverEnums.h>
#include <backend/platforms/OpenGLPlatform.h>
#include <stdint.h>
@@ -34,12 +33,14 @@ public:
PlatformCocoaGL();
~PlatformCocoaGL() noexcept override;
ExternalImageHandle createExternalImage(void* cvPixelBuffer) noexcept;
protected:
// --------------------------------------------------------------------------------------------
// Platform Interface
Driver* createDriver(void* sharedContext,
const Platform::DriverConfig& driverConfig) noexcept override;
const DriverConfig& driverConfig) noexcept override;
// Currently returns 0
int getOSVersion() const noexcept override;
@@ -59,10 +60,12 @@ protected:
void destroySwapChain(SwapChain* swapChain) noexcept override;
bool makeCurrent(ContextType type, SwapChain* drawSwapChain, SwapChain* readSwapChain) noexcept override;
void commit(SwapChain* swapChain) noexcept override;
OpenGLPlatform::ExternalTexture* createExternalImageTexture() noexcept override;
void destroyExternalImage(ExternalTexture* texture) noexcept override;
ExternalTexture* createExternalImageTexture() noexcept override;
void destroyExternalImageTexture(ExternalTexture* texture) noexcept override;
void retainExternalImage(void* externalImage) noexcept override;
bool setExternalImage(void* externalImage, ExternalTexture* texture) noexcept override;
void retainExternalImage(ExternalImageHandleRef externalImage) noexcept override;
bool setExternalImage(ExternalImageHandleRef externalImage, ExternalTexture* texture) noexcept override;
private:
PlatformCocoaGLImpl* pImpl = nullptr;

View File

@@ -32,11 +32,13 @@ public:
PlatformCocoaTouchGL();
~PlatformCocoaTouchGL() noexcept override;
ExternalImageHandle createExternalImage(void* cvPixelBuffer) noexcept;
// --------------------------------------------------------------------------------------------
// Platform Interface
Driver* createDriver(void* sharedGLContext,
const Platform::DriverConfig& driverConfig) noexcept override;
const DriverConfig& driverConfig) noexcept override;
int getOSVersion() const noexcept final { return 0; }
@@ -56,10 +58,12 @@ public:
bool makeCurrent(ContextType type, SwapChain* drawSwapChain, SwapChain* readSwapChain) noexcept override;
void commit(SwapChain* swapChain) noexcept override;
OpenGLPlatform::ExternalTexture* createExternalImageTexture() noexcept override;
void destroyExternalImage(ExternalTexture* texture) noexcept override;
ExternalTexture* createExternalImageTexture() noexcept override;
void destroyExternalImageTexture(ExternalTexture* texture) noexcept override;
void retainExternalImage(void* externalImage) noexcept override;
bool setExternalImage(void* externalImage, ExternalTexture* texture) noexcept override;
void retainExternalImage(ExternalImageHandleRef externalImage) noexcept override;
bool setExternalImage(ExternalImageHandleRef externalImage, ExternalTexture* texture) noexcept override;
private:
PlatformCocoaTouchGLImpl* pImpl = nullptr;

View File

@@ -47,6 +47,11 @@ public:
// Return true if we're on an OpenGL platform (as opposed to OpenGL ES). false by default.
virtual bool isOpenGL() const noexcept;
/**
* Creates an ExternalImage from a EGLImageKHR
*/
ExternalImageHandle createExternalImage(EGLImageKHR eglImage) noexcept;
protected:
// --------------------------------------------------------------------------------------------
// Helper for EGL configs and attributes parameters
@@ -75,8 +80,7 @@ protected:
* Initializes EGL, creates the OpenGL context and returns a concrete Driver implementation
* that supports OpenGL/OpenGL ES.
*/
Driver* createDriver(void* sharedContext,
const Platform::DriverConfig& driverConfig) noexcept override;
Driver* createDriver(void* sharedContext, const DriverConfig& driverConfig) noexcept override;
/**
* This returns zero. This method can be overridden to return something more useful.
@@ -118,9 +122,10 @@ protected:
void destroyFence(Fence* fence) noexcept override;
FenceStatus waitFence(Fence* fence, uint64_t timeout) noexcept override;
OpenGLPlatform::ExternalTexture* createExternalImageTexture() noexcept override;
void destroyExternalImage(ExternalTexture* texture) noexcept override;
ExternalTexture* createExternalImageTexture() noexcept override;
void destroyExternalImageTexture(ExternalTexture* texture) noexcept override;
bool setExternalImage(void* externalImage, ExternalTexture* texture) noexcept override;
bool setExternalImage(ExternalImageHandleRef externalImage, ExternalTexture* texture) noexcept override;
/**
* Logs glGetError() to slog.e
@@ -188,6 +193,12 @@ protected:
void initializeGlExtensions() noexcept;
struct ExternalImageEGL : public ExternalImage {
EGLImageKHR eglImage = EGL_NO_IMAGE;
protected:
~ExternalImageEGL() override;
};
protected:
EGLConfig findSwapChainConfig(uint64_t flags, bool window, bool pbuffer) const;

View File

@@ -22,6 +22,10 @@
#include <backend/platforms/OpenGLPlatform.h>
#include <backend/platforms/PlatformEGL.h>
#include <utils/android/PerformanceHintManager.h>
#include <chrono>
#include <stddef.h>
#include <stdint.h>
@@ -53,11 +57,23 @@ protected:
Driver* createDriver(void* sharedContext,
const Platform::DriverConfig& driverConfig) noexcept override;
/**
* Creates an ExternalImage from a EGLImageKHR
*/
ExternalImageHandle createExternalImage(AHardwareBuffer const *buffer, bool sRGB) noexcept;
// --------------------------------------------------------------------------------------------
// OpenGLPlatform Interface
void terminate() noexcept override;
void beginFrame(
int64_t monotonic_clock_ns,
int64_t refreshIntervalNs,
uint32_t frameId) noexcept override;
void preCommit() noexcept override;
/**
* Set the presentation time using `eglPresentationTimeANDROID`
* @param presentationTimeInNanosecond
@@ -70,6 +86,7 @@ protected:
void attach(Stream* stream, intptr_t tname) noexcept override;
void detach(Stream* stream) noexcept override;
void updateTexImage(Stream* stream, int64_t* timestamp) noexcept override;
math::mat3f getTransformMatrix(Stream* stream) noexcept override;
/**
* Converts a AHardwareBuffer to EGLImage
@@ -78,9 +95,37 @@ protected:
*/
AcquiredImage transformAcquiredImage(AcquiredImage source) noexcept override;
bool setExternalImage(ExternalImageHandleRef externalImage, ExternalTexture* texture) noexcept override;
struct ExternalImageEGLAndroid : public ExternalImageEGL {
AHardwareBuffer* aHardwareBuffer = nullptr;
bool sRGB = false;
protected:
~ExternalImageEGLAndroid() override;
};
protected:
bool makeCurrent(ContextType type,
SwapChain* drawSwapChain,
SwapChain* readSwapChain) noexcept override;
private:
struct InitializeJvmForPerformanceManagerIfNeeded {
InitializeJvmForPerformanceManagerIfNeeded();
};
int mOSVersion;
ExternalStreamManagerAndroid& mExternalStreamManager;
InitializeJvmForPerformanceManagerIfNeeded const mInitializeJvmForPerformanceManagerIfNeeded;
utils::PerformanceHintManager mPerformanceHintManager;
utils::PerformanceHintManager::Session mPerformanceHintSession;
using clock = std::chrono::high_resolution_clock;
clock::time_point mStartTimeOfActualWork;
void* mNativeWindowLib = nullptr;
int32_t (*ANativeWindow_getBuffersDefaultDataSpace)(ANativeWindow* window) = nullptr;
bool mAssertNativeWindowIsValid = false;
};
} // namespace filament::backend

View File

@@ -0,0 +1,38 @@
/*
* Copyright (C) 2025 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_FILAMENT_BACKEND_PRIVATE_PLATFORMMETAL_OBJC_H
#define TNT_FILAMENT_BACKEND_PRIVATE_PLATFORMMETAL_OBJC_H
#import <Metal/Metal.h>
namespace filament::backend {
struct MetalDevice {
id<MTLDevice> device;
};
struct MetalCommandQueue {
id<MTLCommandQueue> commandQueue;
};
struct MetalCommandBuffer {
id<MTLCommandBuffer> commandBuffer;
};
} // namespace filament::backend
#endif // TNT_FILAMENT_BACKEND_PRIVATE_PLATFORMMETAL_OBJC_H

View File

@@ -0,0 +1,92 @@
/*
* Copyright (C) 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_FILAMENT_BACKEND_PRIVATE_PLATFORMMETAL_H
#define TNT_FILAMENT_BACKEND_PRIVATE_PLATFORMMETAL_H
#include <backend/DriverEnums.h>
#include <backend/Platform.h>
namespace filament::backend {
struct PlatformMetalImpl;
// In order for this header to be compatible with Objective-C and C++, we use these wrappers around
// id<MTL*> objects.
// See PlatformMetal-Objc.h.
struct MetalDevice;
struct MetalCommandQueue;
struct MetalCommandBuffer;
class PlatformMetal final : public Platform {
public:
PlatformMetal();
~PlatformMetal() noexcept override;
Driver* createDriver(void* sharedContext, const Platform::DriverConfig& driverConfig) noexcept override;
int getOSVersion() const noexcept override { return 0; }
/**
* Obtain the preferred Metal device object for the backend to use.
*
* On desktop platforms, there may be multiple GPUs suitable for rendering, and this method is
* free to decide which one to use. On mobile systems with a single GPU, implementations should
* simply return the result of MTLCreateSystemDefaultDevice();
*/
virtual void createDevice(MetalDevice& outDevice) noexcept;
/**
* Create a command submission queue on the Metal device object.
*
* @param device The device which was returned from createDevice()
*/
virtual void createCommandQueue(
MetalDevice& device, MetalCommandQueue& outCommandQueue) noexcept;
/**
* Obtain a MTLCommandBuffer enqueued on this Platform's MTLCommandQueue. The command buffer is
* guaranteed to execute before all subsequent command buffers created either by Filament, or
* further calls to this method.
*/
void createAndEnqueueCommandBuffer(MetalCommandBuffer& outCommandBuffer) noexcept;
/**
* The action to take if a Drawable cannot be acquired.
*
* Each frame rendered requires a CAMetalDrawable texture, which is presented on-screen at the
* completion of each frame. These are limited and provided round-robin style by the system.
*/
enum class DrawableFailureBehavior : uint8_t {
/**
* Terminates the application and reports an error message (default).
*/
PANIC,
/*
* Aborts execution of the current frame. The Metal backend will attempt to acquire a new
* drawable at the next frame.
*/
ABORT_FRAME
};
void setDrawableFailureBehavior(DrawableFailureBehavior behavior) noexcept;
DrawableFailureBehavior getDrawableFailureBehavior() const noexcept;
private:
PlatformMetalImpl* pImpl = nullptr;
};
} // namespace filament::backend
#endif // TNT_FILAMENT_BACKEND_PRIVATE_PLATFORMMETAL_H

View File

@@ -0,0 +1,64 @@
/*
* Copyright (C) 2024 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_FILAMENT_BACKEND_OPENGL_OPENGL_PLATFORM_OSMESA_H
#define TNT_FILAMENT_BACKEND_OPENGL_OPENGL_PLATFORM_OSMESA_H
#include <stdint.h>
#include "bluegl/BlueGL.h"
#include <osmesa.h>
#include <backend/platforms/OpenGLPlatform.h>
#include <backend/DriverEnums.h>
namespace filament::backend {
/**
* A concrete implementation of OpenGLPlatform that uses OSMesa, which is an offscreen
* context that can be used in conjunction with Mesa for software rasterization.
* See https://docs.mesa3d.org/osmesa.html for more information.
*/
class PlatformOSMesa : public OpenGLPlatform {
protected:
// --------------------------------------------------------------------------------------------
// Platform Interface
Driver* createDriver(void* sharedGLContext, const DriverConfig& driverConfig) noexcept override;
int getOSVersion() const noexcept final override { return 0; }
// --------------------------------------------------------------------------------------------
// OpenGLPlatform Interface
void terminate() noexcept override;
SwapChain* createSwapChain(void* nativewindow, uint64_t flags) noexcept override;
SwapChain* createSwapChain(uint32_t width, uint32_t height, uint64_t flags) noexcept override;
void destroySwapChain(SwapChain* swapChain) noexcept override;
bool makeCurrent(ContextType type, SwapChain* drawSwapChain,
SwapChain* readSwapChain) noexcept override;
void commit(SwapChain* swapChain) noexcept override;
private:
OSMesaContext mContext;
void* mOsMesaApi = nullptr;
};
} // namespace filament::backend
#endif // TNT_FILAMENT_BACKEND_OPENGL_OPENGL_PLATFORM_OSMESA_H

View File

@@ -23,9 +23,9 @@
#include <utils/CString.h>
#include <utils/FixedCapacityVector.h>
#include <utils/Hash.h>
#include <utils/PrivateImplementation.h>
#include <string_view>
#include <tuple>
#include <unordered_set>
@@ -47,6 +47,14 @@ struct VulkanPlatformPrivate;
class VulkanPlatform : public Platform, utils::PrivateImplementation<VulkanPlatformPrivate> {
public:
struct ExtensionHashFn {
std::size_t operator()(utils::CString const& s) const noexcept {
return std::hash<std::string>{}(s.data());
}
};
// Utility for managing device or instance extensions during initialization.
using ExtensionSet = std::unordered_set<utils::CString, ExtensionHashFn>;
/**
* A collection of handles to objects and metadata that comprises a Vulkan context. The client
* can instantiate this struct and pass to Engine::Builder::sharedContext if they wishes to
@@ -63,6 +71,9 @@ public:
// where the gpu only has one graphics queue. Then the client needs to ensure that no
// concurrent access can occur.
uint32_t graphicsQueueIndex = 0xFFFFFFFF;
bool debugUtilsSupported = false;
bool debugMarkersSupported = false;
bool multiviewSupported = false;
};
/**
@@ -80,6 +91,22 @@ public:
VkFormat colorFormat = VK_FORMAT_UNDEFINED;
VkFormat depthFormat = VK_FORMAT_UNDEFINED;
VkExtent2D extent = {0, 0};
uint32_t layerCount = 1;
bool isProtected = false;
};
struct ImageSyncData {
static constexpr uint32_t INVALID_IMAGE_INDEX = UINT32_MAX;
// The index of the next image as returned by vkAcquireNextImage or equivalent.
uint32_t imageIndex = INVALID_IMAGE_INDEX;
// Semaphore to be signaled once the image is available.
VkSemaphore imageReadySemaphore = VK_NULL_HANDLE;
// A function called right before vkQueueSubmit. After this call, the image must be
// available. This pointer can be null if imageReadySemaphore is not VK_NULL_HANDLE.
std::function<void(SwapChainPtr handle)> explicitImageReadyWait = nullptr;
};
VulkanPlatform();
@@ -119,6 +146,12 @@ public:
* before recreating the swapchain. Default is true.
*/
bool flushAndWaitOnWindowResize = true;
/**
* Whether the swapchain image should be transitioned to a layout suitable for
* presentation. Default is true.
*/
bool transitionSwapChainImageLayoutForPresent = true;
};
/**
@@ -147,13 +180,10 @@ public:
* corresponding VkImage will be used as the output color attachment. The client should signal
* the `clientSignal` semaphore when the image is ready to be used by the backend.
* @param handle The handle returned by createSwapChain()
* @param clientSignal The semaphore that the client will signal to indicate that the backend
* may render into the image.
* @param index Pointer to memory that will be filled with the index that corresponding
* to an image in the `SwapChainBundle.colors` array.
* @param outImageSyncData The synchronization data used for image readiness
* @return Result of acquire
*/
virtual VkResult acquire(SwapChainPtr handle, VkSemaphore clientSignal, uint32_t* index);
virtual VkResult acquire(SwapChainPtr handle, ImageSyncData* outImageSyncData);
/**
* Present the image corresponding to `index` to the display. The client should wait on
@@ -174,6 +204,13 @@ public:
*/
virtual bool hasResized(SwapChainPtr handle);
/**
* Check if the surface is protected.
* @param handle The handle returned by createSwapChain()
* @return Whether the swapchain is protected
*/
virtual bool isProtected(SwapChainPtr handle);
/**
* Carry out a recreation of the swapchain.
* @param handle The handle returned by createSwapChain()
@@ -192,6 +229,13 @@ public:
virtual SwapChainPtr createSwapChain(void* nativeWindow, uint64_t flags = 0,
VkExtent2D extent = {0, 0});
/**
* Allows implementers to provide instance extensions that they'd like to include in the
* instance creation.
* @return A set of extensions to enable for the instance.
*/
virtual ExtensionSet getRequiredInstanceExtensions() { return {}; }
/**
* Destroy the swapchain.
* @param handle The handle returned by createSwapChain()
@@ -235,11 +279,88 @@ public:
*/
VkQueue getGraphicsQueue() const noexcept;
private:
// Platform dependent helper methods
using ExtensionSet = std::unordered_set<std::string_view>;
static ExtensionSet getRequiredInstanceExtensions();
/**
* @return The family index of the protected graphics queue selected for the
* Vulkan backend.
*/
uint32_t getProtectedGraphicsQueueFamilyIndex() const noexcept;
/**
* @return The index of the protected graphics queue (if there are multiple
* graphics queues) selected for the Vulkan backend.
*/
uint32_t getProtectedGraphicsQueueIndex() const noexcept;
/**
* @return The protected queue that was selected for the Vulkan backend.
*/
VkQueue getProtectedGraphicsQueue() const noexcept;
struct ExternalImageMetadata {
/**
* The width of the external image
*/
uint32_t width;
/**
* The height of the external image
*/
uint32_t height;
/**
* The layerCount of the external image
*/
uint32_t layerCount;
/**
* The layer count of the external image
*/
uint32_t layers;
/**
* The format of the external image
*/
VkFormat format;
/**
* An external buffer can be protected. This tells you if it is.
*/
bool isProtected;
/**
* The type of external format (opaque int) if used.
*/
uint64_t externalFormat;
/**
* Image usage
*/
VkImageUsageFlags usage;
/**
* Allocation size
*/
VkDeviceSize allocationSize;
/**
* Heap information
*/
uint32_t memoryTypeBits;
};
virtual ExternalImageMetadata getExternalImageMetadata(void* externalImage);
using ImageData = std::pair<VkImage, VkDeviceMemory>;
virtual ImageData createExternalImage(void* externalImage,
const ExternalImageMetadata& metadata);
private:
static ExtensionSet getSwapchainInstanceExtensions();
static ExternalImageMetadata getExternalImageMetadataImpl(void* externalImage,
VkDevice device);
static ImageData createExternalImageImpl(void* externalImage, VkDevice device,
const VkAllocationCallbacks* allocator, const ExternalImageMetadata& metadata);
// Platform dependent helper methods
using SurfaceBundle = std::tuple<VkSurfaceKHR, VkExtent2D>;
static SurfaceBundle createVkSurfaceKHR(void* nativeWindow, VkInstance instance,
uint64_t flags) noexcept;

View File

@@ -1,96 +0,0 @@
/*
* Copyright (C) 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**********************************************************************************************
* Generated by bluegl/bluegl-gen.py
* DO NOT EDIT
**********************************************************************************************/
#ifndef TNT_FILAMENT_BLUEGL__H
#define TNT_FILAMENT_BLUEGL__H
// MSVC includes .../Windows Kits\10\Include\10.0.17763.0\um\GL/gl.h, with gl APIs conflicting with
// bluegl\include\GL/glcorearb.h, causing errors for OpenGL APIs such as:
// error C2375: 'glBindTexture': redefinition; different linkage
#ifndef FILAMENT_PLATFORM_WGL
#define GL_GLEXT_PROTOTYPES 1
#endif
#include <GL/glcorearb.h>
#include <GL/glext.h>
#if defined(WIN32)
#ifdef max
#undef max
#endif
#ifdef min
#undef min
#endif
#ifdef far
#undef far
#endif
#ifdef near
#undef near
#endif
#ifdef ERROR
#undef ERROR
#endif
#ifdef OPAQUE
#undef OPAQUE
#endif
#ifdef TRANSPARENT
#undef TRANSPARENT
#endif
#ifdef PURE
#undef PURE
#endif
#endif
namespace bluegl {
/**
* Looks up and binds all available OpenGL Core functions.
* Every call to this function will increase an internal reference
* counter that can be decreased by calling unbind().
*
* @return 0 on success or -1 if an error occurred.
*/
int bind();
/**
* Unbinds all available OpenGL Core functions.
* Every call to this function will decrease an internal reference
* counter and unbind all OpenGL functions when the counter reaches 0.
* As such you should assume that no OpenGL calls can be made after
* calling this function.
*/
void unbind();
} // namespace bluegl
#endif // TNT_FILAMENT_BLUEGL__H

View File

@@ -40,7 +40,7 @@
#if defined(__ANDROID__)
#define VK_USE_PLATFORM_ANDROID_KHR 1
#elif defined(IOS)
#elif defined(FILAMENT_IOS)
#define VK_USE_PLATFORM_IOS_MVK 1
#elif defined(__linux__)
#if defined(FILAMENT_SUPPORTS_XCB)

View File

@@ -109,6 +109,7 @@ public:
vec4 groundPlane;
RayCallback raycastCallback;
void* raycastUserdata;
bool panning = true;
};
struct Builder {
@@ -143,6 +144,8 @@ public:
Builder& groundPlane(FLOAT a, FLOAT b, FLOAT c, FLOAT d); //! Plane equation used as a raycast fallback
Builder& raycastCallback(RayCallback cb, void* userdata); //! Raycast function for accurate grab-and-pan
Builder& panning(bool enabled); //! Sets whether panning is enabled
/**
* Creates a new camera manipulator, either ORBIT, MAP, or FREE_FLIGHT.
*

View File

@@ -80,7 +80,8 @@ public:
OPENGL = 0x01u,
VULKAN = 0x02u,
METAL = 0x04u,
ALL = OPENGL | VULKAN | METAL
WEBGPU = 0x08u,
ALL = OPENGL | VULKAN | METAL | WEBGPU
};
/*
@@ -134,6 +135,7 @@ protected:
TargetApi mTargetApi = (TargetApi) 0;
Optimization mOptimization = Optimization::PERFORMANCE;
bool mPrintShaders = false;
bool mSaveRawVariants = false;
bool mGenerateDebugInfo = false;
bool mIncludeEssl1 = true;
utils::bitset32 mShaderModels;
@@ -162,6 +164,7 @@ inline constexpr MaterialBuilderBase::TargetApi targetApiFromBackend(
case Backend::OPENGL: return TargetApi::OPENGL;
case Backend::VULKAN: return TargetApi::VULKAN;
case Backend::METAL: return TargetApi::METAL;
case Backend::WEBGPU: return TargetApi::WEBGPU;
case Backend::NOOP: return TargetApi::OPENGL;
}
}
@@ -227,6 +230,7 @@ public:
using ShaderQuality = filament::ShaderQuality;
using BlendingMode = filament::BlendingMode;
using BlendFunction = filament::backend::BlendFunction;
using Shading = filament::Shading;
using Interpolation = filament::Interpolation;
using VertexDomain = filament::VertexDomain;
@@ -244,6 +248,7 @@ public:
using CullingMode = filament::backend::CullingMode;
using FeatureLevel = filament::backend::FeatureLevel;
using StereoscopicType = filament::backend::StereoscopicType;
using ShaderStage = filament::backend::ShaderStage;
enum class VariableQualifier : uint8_t {
OUT
@@ -313,13 +318,17 @@ public:
MaterialBuilder& parameter(const char* name, SamplerType samplerType,
SamplerFormat format = SamplerFormat::FLOAT,
ParameterPrecision precision = ParameterPrecision::DEFAULT,
bool multisample = false) noexcept;
bool multisample = false,
const char* transformName = "") noexcept;
MaterialBuilder& buffer(filament::BufferInterfaceBlock bib) noexcept;
//! Custom variables (all float4).
MaterialBuilder& variable(Variable v, const char* name) noexcept;
MaterialBuilder& variable(Variable v, const char* name,
ParameterPrecision precision) noexcept;
/**
* Require a specified attribute.
*
@@ -407,6 +416,15 @@ public:
*/
MaterialBuilder& blending(BlendingMode blending) noexcept;
/**
* Set the blend function for this material. blending must be et to CUSTOM.
*/
MaterialBuilder& customBlendFunctions(
BlendFunction srcRGB,
BlendFunction srcA,
BlendFunction dstRGB,
BlendFunction dstA) noexcept;
/**
* Set the blending mode of the post-lighting color for this material.
* Only OPAQUE, TRANSPARENT and ADD are supported, the default is TRANSPARENT.
@@ -570,11 +588,18 @@ public:
*/
MaterialBuilder& optimization(Optimization optimization) noexcept;
// TODO: this is present here for matc's "--print" flag, but ideally does not belong inside
// MaterialBuilder.
// TODO: this is present here for matc's "--print" flag, but ideally does not belong inside MaterialBuilder.
//! If true, will output the generated GLSL shader code to stdout.
MaterialBuilder& printShaders(bool printShaders) noexcept;
/**
* If true, this will write the raw generated GLSL for each variant to a text file in the
* current directory. The file will be named after the material name and the variant name. Its
* extension will be derived from the shader stage. For example, mymaterial_0x0e.frag,
* mymaterial_0x18.vert, etc.
*/
MaterialBuilder& saveRawVariants(bool saveVariants) noexcept;
//! If true, will include debugging information in generated SPIRV.
MaterialBuilder& generateDebugInfo(bool generateDebugInfo) noexcept;
@@ -626,8 +651,8 @@ public:
Parameter() noexcept: parameterType(INVALID) {}
// Sampler
Parameter(const char* paramName, SamplerType t, SamplerFormat f, ParameterPrecision p, bool ms)
: name(paramName), size(1), precision(p), samplerType(t), format(f), parameterType(SAMPLER), multisample(ms) { }
Parameter(const char* paramName, SamplerType t, SamplerFormat f, ParameterPrecision p, bool ms, const char* tn)
: name(paramName), size(1), precision(p), samplerType(t), format(f), parameterType(SAMPLER), multisample(ms), transformName(tn) { }
// Uniform
Parameter(const char* paramName, UniformType t, size_t typeSize, ParameterPrecision p)
@@ -645,6 +670,7 @@ public:
SubpassType subpassType;
SamplerFormat format;
bool multisample;
utils::CString transformName;
enum {
INVALID,
UNIFORM,
@@ -682,18 +708,30 @@ public:
} defaultValue;
};
struct PushConstant {
utils::CString name;
ConstantType type;
ShaderStage stage;
};
struct CustomVariable {
utils::CString name;
Precision precision = Precision::DEFAULT;
bool hasPrecision = false;
};
static constexpr size_t MATERIAL_PROPERTIES_COUNT = filament::MATERIAL_PROPERTIES_COUNT;
using Property = filament::Property;
using PropertyList = bool[MATERIAL_PROPERTIES_COUNT];
using VariableList = utils::CString[MATERIAL_VARIABLES_COUNT];
using VariableList = CustomVariable[MATERIAL_VARIABLES_COUNT];
using OutputList = std::vector<Output>;
static constexpr size_t MAX_COLOR_OUTPUT = filament::backend::MRT::MAX_SUPPORTED_RENDER_TARGET_COUNT;
static constexpr size_t MAX_DEPTH_OUTPUT = 1;
static_assert(MAX_COLOR_OUTPUT == 8,
"When updating MRT::MAX_SUPPORTED_RENDER_TARGET_COUNT, manually update post_process_inputs.fs"
" and post_process.fs");
" and post_process_main.fs");
// Preview the first shader generated by the given CodeGenParams.
// This is used to run Static Code Analysis before generating a package.
@@ -710,6 +748,7 @@ public:
using SubpassList = Parameter[MAX_SUBPASS_COUNT];
using BufferList = std::vector<std::unique_ptr<filament::BufferInterfaceBlock>>;
using ConstantList = std::vector<Constant>;
using PushConstantList = std::vector<PushConstant>;
// returns the number of parameters declared in this material
uint8_t getParameterCount() const noexcept { return mParameterCount; }
@@ -753,6 +792,10 @@ private:
void prepareToBuild(MaterialInfo& info) noexcept;
// Initialize internal push constants that will both be written to the shaders and material
// chunks (like user-defined spec constants).
void initPushConstants() noexcept;
// Return true if the shader is syntactically and semantically valid.
// This method finds all the properties defined in the fragment and
// vertex shaders of the material.
@@ -819,6 +862,7 @@ private:
PropertyList mProperties;
ParameterList mParameters;
ConstantList mConstants;
PushConstantList mPushConstants;
SubpassList mSubpasses;
VariableList mVariables;
OutputList mOutputs;
@@ -828,6 +872,7 @@ private:
FeatureLevel mFeatureLevel = FeatureLevel::FEATURE_LEVEL_1;
BlendingMode mBlendingMode = BlendingMode::OPAQUE;
BlendingMode mPostLightingBlendingMode = BlendingMode::TRANSPARENT;
std::array<BlendFunction, 4> mCustomBlendFunctions = {};
CullingMode mCullingMode = CullingMode::BACK;
Shading mShading = Shading::LIT;
MaterialDomain mMaterialDomain = MaterialDomain::SURFACE;

View File

@@ -183,7 +183,7 @@ struct UTILS_PUBLIC Aabb {
* Returns the 8 corner vertices of the AABB.
*/
Corners getCorners() const {
return Aabb::Corners{ .vertices = {
return Corners{ .vertices = {
{ min.x, min.y, min.z },
{ max.x, min.y, min.z },
{ min.x, max.y, min.z },

View File

@@ -54,7 +54,7 @@ public:
using BufferDescriptor = backend::BufferDescriptor;
using BindingType = backend::BufferObjectBinding;
class Builder : public BuilderBase<BuilderDetails> {
class Builder : public BuilderBase<BuilderDetails>, public BuilderNameMixin<Builder> {
friend struct BuilderDetails;
public:
Builder() noexcept;
@@ -73,11 +73,26 @@ public:
/**
* The binding type for this buffer object. (defaults to VERTEX)
* @param BindingType Distinguishes between SSBO, VBO, etc. For now this must be VERTEX.
* @param bindingType Distinguishes between SSBO, VBO, etc. For now this must be VERTEX.
* @return A reference to this Builder for chaining calls.
*/
Builder& bindingType(BindingType bindingType) noexcept;
/**
* Associate an optional name with this BufferObject for debugging purposes.
*
* name will show in error messages and should be kept as short as possible. The name is
* truncated to a maximum of 128 characters.
*
* The name string is copied during this method so clients may free its memory after
* the function returns.
*
* @param name A string to identify this BufferObject
* @param len Length of name, should be less than or equal to 128
* @return This Builder, for chaining calls.
*/
Builder& name(const char* UTILS_NONNULL name, size_t len) noexcept;
/**
* Creates the BufferObject and returns a pointer to it. After creation, the buffer
* object is uninitialized. Use BufferObject::setBuffer() to initialize it.

View File

@@ -61,7 +61,7 @@ namespace filament {
* filament::Camera* myCamera = engine->createCamera(myCameraEntity);
* myCamera->setProjection(45, 16.0/9.0, 0.1, 1.0);
* myCamera->lookAt({0, 1.60, 1}, {0, 0, 0});
* engine->destroyCameraComponent(myCamera);
* engine->destroyCameraComponent(myCameraEntity);
* ~~~~~~~~~~~
*
*
@@ -400,14 +400,14 @@ public:
* engine.getTransformManager().getInstance(camera->getEntity()), model);
* ~~~~~~~~~~~
*
* @param model The camera position and orientation provided as a rigid transform matrix.
* @param modelMatrix The camera position and orientation provided as a rigid transform matrix.
*
* @note The Camera "looks" towards its -z axis
*
* @warning \p model must be a rigid transform
*/
void setModelMatrix(const math::mat4& model) noexcept;
void setModelMatrix(const math::mat4f& model) noexcept; //!< @overload
void setModelMatrix(const math::mat4& modelMatrix) noexcept;
void setModelMatrix(const math::mat4f& modelMatrix) noexcept; //!< @overload
/** Set the position of an eye relative to this Camera (head).
*

View File

@@ -194,7 +194,7 @@ inline sRGBColorA Color::toSRGB<ACCURATE>(LinearColorA const& color) {
}
inline LinearColor Color::toLinear(RgbType type, math::float3 color) {
return (type == RgbType::LINEAR) ? color : Color::toLinear<ACCURATE>(color);
return (type == RgbType::LINEAR) ? color : toLinear<ACCURATE>(color);
}
// converts an RGBA color to linear space
@@ -202,11 +202,11 @@ inline LinearColor Color::toLinear(RgbType type, math::float3 color) {
inline LinearColorA Color::toLinear(RgbaType type, math::float4 color) {
switch (type) {
case RgbaType::sRGB:
return Color::toLinear<ACCURATE>(color) * math::float4{color.a, color.a, color.a, 1.0f};
return toLinear<ACCURATE>(color) * math::float4{color.a, color.a, color.a, 1.0f};
case RgbaType::LINEAR:
return color * math::float4{color.a, color.a, color.a, 1.0f};
case RgbaType::PREMULTIPLIED_sRGB:
return Color::toLinear<ACCURATE>(color);
return toLinear<ACCURATE>(color);
case RgbaType::PREMULTIPLIED_LINEAR:
return color;
}

View File

@@ -17,6 +17,7 @@
#ifndef TNT_FILAMENT_ENGINE_H
#define TNT_FILAMENT_ENGINE_H
#include <filament/FilamentAPI.h>
#include <backend/DriverEnums.h>
@@ -24,10 +25,15 @@
#include <utils/compiler.h>
#include <utils/Invocable.h>
#include <utils/Slice.h>
#include <initializer_list>
#include <optional>
#include <stdint.h>
#include <stddef.h>
namespace utils {
class Entity;
class EntityManager;
@@ -36,6 +42,10 @@ class JobSystem;
namespace filament {
namespace backend {
class Driver;
} // backend
class BufferObject;
class Camera;
class ColorGrading;
@@ -179,6 +189,7 @@ public:
using DriverConfig = backend::Platform::DriverConfig;
using FeatureLevel = backend::FeatureLevel;
using StereoscopicType = backend::StereoscopicType;
using Driver = backend::Driver;
/**
* Config is used to define the memory footprint used by the engine, such as the
@@ -288,19 +299,28 @@ public:
*/
uint32_t jobSystemThreadCount = 0;
/*
* Number of most-recently destroyed textures to track for use-after-free.
/**
* When uploading vertex or index data, the Filament Metal backend copies data
* into a shared staging area before transferring it to the GPU. This setting controls
* the total size of the buffer used to perform these allocations.
*
* This will cause the backend to throw an exception when a texture is freed but still bound
* to a SamplerGroup and used in a draw call. 0 disables completely.
* Higher values can improve performance when performing many uploads across a small
* number of frames.
*
* Currently only respected by the Metal backend.
* This buffer remains alive throughout the lifetime of the Engine, so this size adds to the
* memory footprint of the app and should be set as conservative as possible.
*
* A value of 0 disables the shared staging buffer entirely; uploads will acquire an
* individual buffer from a pool of shared buffers.
*
* Only respected by the Metal backend.
*/
size_t textureUseAfterFreePoolSize = 0;
size_t metalUploadBufferSizeBytes = 512 * 1024;
/**
* Set to `true` to forcibly disable parallel shader compilation in the backend.
* Currently only honored by the GL and Metal backends.
* @deprecated use "backend.disable_parallel_shader_compile" feature flag instead
*/
bool disableParallelShaderCompile = false;
@@ -315,7 +335,7 @@ public:
*
* @see View::setStereoscopicOptions
*/
StereoscopicType stereoscopicType = StereoscopicType::INSTANCED;
StereoscopicType stereoscopicType = StereoscopicType::NONE;
/*
* The number of eyes to render when stereoscopic rendering is enabled. Supported values are
@@ -332,17 +352,78 @@ public:
uint32_t resourceAllocatorCacheSizeMB = 64;
/*
* This value determines for how many frames are texture entries kept in the cache.
* This value determines how many frames texture entries are kept for in the cache. This
* is a soft limit, meaning some texture older than this are allowed to stay in the cache.
* Typically only one texture is evicted per frame.
* The default is 1.
*/
uint32_t resourceAllocatorCacheMaxAge = 2;
uint32_t resourceAllocatorCacheMaxAge = 1;
/*
* Disable backend handles use-after-free checks.
* @deprecated use "backend.disable_handle_use_after_free_check" feature flag instead
*/
bool disableHandleUseAfterFreeCheck = false;
/*
* Sets a preferred shader language for Filament to use.
*
* The Metal backend supports two shader languages: MSL (Metal Shading Language) and
* METAL_LIBRARY (precompiled .metallib). This option controls which shader language is
* used when materials contain both.
*
* By default, when preferredShaderLanguage is unset, Filament will prefer METAL_LIBRARY
* shaders if present within a material, falling back to MSL. Setting
* preferredShaderLanguage to ShaderLanguage::MSL will instead instruct Filament to check
* for the presence of MSL in a material first, falling back to METAL_LIBRARY if MSL is not
* present.
*
* When using a non-Metal backend, setting this has no effect.
*/
enum class ShaderLanguage {
DEFAULT = 0,
MSL = 1,
METAL_LIBRARY = 2,
};
ShaderLanguage preferredShaderLanguage = ShaderLanguage::DEFAULT;
/*
* When the OpenGL ES backend is used, setting this value to true will force a GLES2.0
* context if supported by the Platform, or if not, will have the backend pretend
* it's a GLES2 context. Ignored on other backends.
*/
bool forceGLES2Context = false;
/**
* Assert the native window associated to a SwapChain is valid when calling makeCurrent().
* This is only supported for:
* - PlatformEGLAndroid
* @deprecated use "backend.opengl.assert_native_window_is_valid" feature flag instead
*/
bool assertNativeWindowIsValid = false;
};
/**
* Feature flags can be enabled or disabled when the Engine is built. Some Feature flags can
* also be toggled at any time. Feature flags should alawys use their default value unless
* the feature enabled by the flag is faulty. Feature flags provide a last resort way to
* disable problematic features.
* Feature flags are intended to have a short life-time and are regularly removed as features
* mature.
*/
struct FeatureFlag {
char const* UTILS_NONNULL name; //!< name of the feature flag
char const* UTILS_NONNULL description; //!< short description
bool const* UTILS_NONNULL value; //!< pointer to the value of the flag
bool constant; //!< whether the flag is constant after construction
};
/**
* Returns the list of available feature flags
*/
utils::Slice<const FeatureFlag> getFeatureFlags() const noexcept;
#if UTILS_HAS_THREADING
using CreateCallback = void(void* UTILS_NULLABLE user, void* UTILS_NONNULL token);
#endif
@@ -415,6 +496,21 @@ public:
*/
Builder& paused(bool paused) noexcept;
/**
* Set a feature flag value. This is the only way to set constant feature flags.
* @param name feature name
* @param value true to enable, false to disable
* @return A reference to this Builder for chaining calls.
*/
Builder& feature(char const* UTILS_NONNULL name, bool value) noexcept;
/**
* Enables a list of features.
* @param list list of feature names to enable.
* @return A reference to this Builder for chaining calls.
*/
Builder& features(std::initializer_list<char const *> list) noexcept;
#if UTILS_HAS_THREADING
/**
* Creates the filament Engine asynchronously.
@@ -448,7 +544,7 @@ public:
Platform* UTILS_NULLABLE platform = nullptr,
void* UTILS_NULLABLE sharedContext = nullptr,
const Config* UTILS_NULLABLE config = nullptr) {
return Engine::Builder()
return Builder()
.backend(backend)
.platform(platform)
.sharedContext(sharedContext)
@@ -468,7 +564,7 @@ public:
Platform* UTILS_NULLABLE platform = nullptr,
void* UTILS_NULLABLE sharedContext = nullptr,
const Config* UTILS_NULLABLE config = nullptr) {
Engine::Builder()
Builder()
.backend(backend)
.platform(platform)
.sharedContext(sharedContext)
@@ -493,6 +589,11 @@ public:
static Engine* UTILS_NULLABLE getEngine(void* UTILS_NONNULL token);
#endif
/**
* @return the Driver instance used by this Engine.
* @see OpenGLPlatform
*/
backend::Driver const* UTILS_NONNULL getDriver() const noexcept;
/**
* Destroy the Engine instance and all associated resources.
@@ -800,24 +901,74 @@ public:
bool destroy(const InstanceBuffer* UTILS_NULLABLE p); //!< Destroys an InstanceBuffer object.
void destroy(utils::Entity e); //!< Destroys all filament-known components from this entity
bool isValid(const BufferObject* UTILS_NULLABLE p); //!< Tells whether a BufferObject object is valid
bool isValid(const VertexBuffer* UTILS_NULLABLE p); //!< Tells whether an VertexBuffer object is valid
bool isValid(const Fence* UTILS_NULLABLE p); //!< Tells whether a Fence object is valid
bool isValid(const IndexBuffer* UTILS_NULLABLE p); //!< Tells whether an IndexBuffer object is valid
bool isValid(const SkinningBuffer* UTILS_NULLABLE p); //!< Tells whether a SkinningBuffer object is valid
bool isValid(const MorphTargetBuffer* UTILS_NULLABLE p); //!< Tells whether a MorphTargetBuffer object is valid
bool isValid(const IndirectLight* UTILS_NULLABLE p); //!< Tells whether an IndirectLight object is valid
bool isValid(const Material* UTILS_NULLABLE p); //!< Tells whether an IndirectLight object is valid
bool isValid(const Renderer* UTILS_NULLABLE p); //!< Tells whether a Renderer object is valid
bool isValid(const Scene* UTILS_NULLABLE p); //!< Tells whether a Scene object is valid
bool isValid(const Skybox* UTILS_NULLABLE p); //!< Tells whether a SkyBox object is valid
bool isValid(const ColorGrading* UTILS_NULLABLE p); //!< Tells whether a ColorGrading object is valid
bool isValid(const SwapChain* UTILS_NULLABLE p); //!< Tells whether a SwapChain object is valid
bool isValid(const Stream* UTILS_NULLABLE p); //!< Tells whether a Stream object is valid
bool isValid(const Texture* UTILS_NULLABLE p); //!< Tells whether a Texture object is valid
bool isValid(const RenderTarget* UTILS_NULLABLE p); //!< Tells whether a RenderTarget object is valid
bool isValid(const View* UTILS_NULLABLE p); //!< Tells whether a View object is valid
bool isValid(const InstanceBuffer* UTILS_NULLABLE p); //!< Tells whether an InstanceBuffer object is valid
/** Tells whether a BufferObject object is valid */
bool isValid(const BufferObject* UTILS_NULLABLE p) const;
/** Tells whether an VertexBuffer object is valid */
bool isValid(const VertexBuffer* UTILS_NULLABLE p) const;
/** Tells whether a Fence object is valid */
bool isValid(const Fence* UTILS_NULLABLE p) const;
/** Tells whether an IndexBuffer object is valid */
bool isValid(const IndexBuffer* UTILS_NULLABLE p) const;
/** Tells whether a SkinningBuffer object is valid */
bool isValid(const SkinningBuffer* UTILS_NULLABLE p) const;
/** Tells whether a MorphTargetBuffer object is valid */
bool isValid(const MorphTargetBuffer* UTILS_NULLABLE p) const;
/** Tells whether an IndirectLight object is valid */
bool isValid(const IndirectLight* UTILS_NULLABLE p) const;
/** Tells whether an Material object is valid */
bool isValid(const Material* UTILS_NULLABLE p) const;
/** Tells whether an MaterialInstance object is valid. Use this if you already know
* which Material this MaterialInstance belongs to. DO NOT USE getMaterial(), this would
* defeat the purpose of validating the MaterialInstance.
*/
bool isValid(const Material* UTILS_NONNULL m, const MaterialInstance* UTILS_NULLABLE p) const;
/** Tells whether an MaterialInstance object is valid. Use this if the Material the
* MaterialInstance belongs to is not known. This method can be expensive.
*/
bool isValidExpensive(const MaterialInstance* UTILS_NULLABLE p) const;
/** Tells whether a Renderer object is valid */
bool isValid(const Renderer* UTILS_NULLABLE p) const;
/** Tells whether a Scene object is valid */
bool isValid(const Scene* UTILS_NULLABLE p) const;
/** Tells whether a SkyBox object is valid */
bool isValid(const Skybox* UTILS_NULLABLE p) const;
/** Tells whether a ColorGrading object is valid */
bool isValid(const ColorGrading* UTILS_NULLABLE p) const;
/** Tells whether a SwapChain object is valid */
bool isValid(const SwapChain* UTILS_NULLABLE p) const;
/** Tells whether a Stream object is valid */
bool isValid(const Stream* UTILS_NULLABLE p) const;
/** Tells whether a Texture object is valid */
bool isValid(const Texture* UTILS_NULLABLE p) const;
/** Tells whether a RenderTarget object is valid */
bool isValid(const RenderTarget* UTILS_NULLABLE p) const;
/** Tells whether a View object is valid */
bool isValid(const View* UTILS_NULLABLE p) const;
/** Tells whether an InstanceBuffer object is valid */
bool isValid(const InstanceBuffer* UTILS_NULLABLE p) const;
/**
* Retrieve the count of each resource tracked by Engine.
* This is intended for debugging.
* @{
*/
size_t getBufferObjectCount() const noexcept;
size_t getViewCount() const noexcept;
size_t getSceneCount() const noexcept;
size_t getSwapChainCount() const noexcept;
size_t getStreamCount() const noexcept;
size_t getIndexBufferCount() const noexcept;
size_t getSkinningBufferCount() const noexcept;
size_t getMorphTargetBufferCount() const noexcept;
size_t getInstanceBufferCount() const noexcept;
size_t getVertexBufferCount() const noexcept;
size_t getIndirectLightCount() const noexcept;
size_t getMaterialCount() const noexcept;
size_t getTextureCount() const noexcept;
size_t getSkyboxeCount() const noexcept;
size_t getColorGradingCount() const noexcept;
size_t getRenderTargetCount() const noexcept;
/** @} */
/**
* Kicks the hardware thread (e.g. the OpenGL, Vulkan or Metal thread) and blocks until
@@ -831,6 +982,25 @@ public:
*/
void flushAndWait();
/**
* Kicks the hardware thread (e.g. the OpenGL, Vulkan or Metal thread) and blocks until
* all commands to this point are executed. Note that does guarantee that the
* hardware is actually finished.
*
* A timeout can be specified, if for some reason this flushAndWait doesn't complete before the timeout, it will
* return false, true otherwise.
*
* <p>This is typically used right after destroying the <code>SwapChain</code>,
* in cases where a guarantee about the <code>SwapChain</code> destruction is needed in a
* timely fashion, such as when responding to Android's
* <code>android.view.SurfaceHolder.Callback.surfaceDestroyed</code></p>
*
* @param timeout A timeout in nanoseconds
* @return true if successful, false if flushAndWait timed out, in which case it wasn't successful and commands
* might still be executing on both the CPU and GPU sides.
*/
bool flushAndWait(uint64_t timeout);
/**
* Kicks the hardware thread (e.g. the OpenGL, Vulkan or Metal thread) but does not wait
* for commands to be either executed or the hardware finished.
@@ -840,6 +1010,15 @@ public:
*/
void flush();
/**
* Get paused state of rendering thread.
*
* <p>Warning: This is an experimental API.
*
* @see setPaused
*/
bool isPaused() const noexcept;
/**
* Pause or resume rendering thread.
*
@@ -865,6 +1044,14 @@ public:
*/
void pumpMessageQueues();
/**
* Switch the command queue to unprotected mode. Protected mode can be activated via
* Renderer::beginFrame() using a protected SwapChain.
* @see Renderer
* @see SwapChain
*/
void unprotected() noexcept;
/**
* Returns the default Material.
*
@@ -948,8 +1135,54 @@ public:
void resetBackendState() noexcept;
#endif
/**
* Get the current time. This is a convenience function that simply returns the
* time in nanosecond since epoch of std::chrono::steady_clock.
* A possible implementation is:
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* return std::chrono::steady_clock::now().time_since_epoch().count();
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* @return current time in nanosecond since epoch of std::chrono::steady_clock.
* @see Renderer::beginFrame()
*/
static uint64_t getSteadyClockTimeNano() noexcept;
DebugRegistry& getDebugRegistry() noexcept;
/**
* Check if a feature flag exists
* @param name name of the feature flag to check
* @return true if the feature flag exists, false otherwise
*/
inline bool hasFeatureFlag(char const* UTILS_NONNULL name) noexcept {
return getFeatureFlag(name).has_value();
}
/**
* Set the value of a non-constant feature flag.
* @param name name of the feature flag to set
* @param value value to set
* @return true if the value was set, false if the feature flag is constant or doesn't exist.
*/
bool setFeatureFlag(char const* UTILS_NONNULL name, bool value) noexcept;
/**
* Retrieves the value of any feature flag.
* @param name name of the feature flag
* @return the value of the flag if it exists
*/
std::optional<bool> getFeatureFlag(char const* UTILS_NONNULL name) const noexcept;
/**
* Returns a pointer to a non-constant feature flag value.
* @param name name of the feature flag
* @return a pointer to the feature flag value, or nullptr if the feature flag is constant or doesn't exist
*/
bool* UTILS_NULLABLE getFeatureFlagPtr(char const* UTILS_NONNULL name) const noexcept;
protected:
//! \privatesection
Engine() noexcept = default;

View File

@@ -19,6 +19,7 @@
#include <utils/compiler.h>
#include <utils/PrivateImplementation.h>
#include <utils/CString.h>
#include <stddef.h>
@@ -54,6 +55,23 @@ public:
template<typename T>
using BuilderBase = utils::PrivateImplementation<T>;
// This needs to be public because it is used in the following template.
UTILS_PUBLIC void builderMakeName(utils::CString& outName, const char* name, size_t len) noexcept;
template <typename Builder>
class UTILS_PUBLIC BuilderNameMixin {
public:
Builder& name(const char* name, size_t len) noexcept {
builderMakeName(mName, name, len);
return static_cast<Builder&>(*this);
}
utils::CString const& getName() const noexcept { return mName; }
private:
utils::CString mName;
};
} // namespace filament
#endif // TNT_FILAMENT_FILAMENTAPI_H

View File

@@ -59,7 +59,7 @@ public:
UINT = uint8_t(backend::ElementType::UINT), //!< 32-bit indices
};
class Builder : public BuilderBase<BuilderDetails> {
class Builder : public BuilderBase<BuilderDetails>, public BuilderNameMixin<Builder> {
friend struct BuilderDetails;
public:
Builder() noexcept;
@@ -83,6 +83,21 @@ public:
*/
Builder& bufferType(IndexType indexType) noexcept;
/**
* Associate an optional name with this IndexBuffer for debugging purposes.
*
* name will show in error messages and should be kept as short as possible. The name is
* truncated to a maximum of 128 characters.
*
* The name string is copied during this method so clients may free its memory after
* the function returns.
*
* @param name A string to identify this IndexBuffer
* @param len Length of name, should be less than or equal to 128
* @return This Builder, for chaining calls.
*/
Builder& name(const char* UTILS_NONNULL name, size_t len) noexcept;
/**
* Creates the IndexBuffer object and returns a pointer to it. After creation, the index
* buffer is uninitialized. Use IndexBuffer::setBuffer() to initialize the IndexBuffer.

View File

@@ -158,6 +158,8 @@ public:
*
* @return This Builder, for chaining calls.
*
* @see Material::Builder::sphericalHarmonicsBandCount()
*
* @note
* Because the coefficients are pre-scaled, `sh[0]` is the environment's
* average irradiance.

Some files were not shown because too many files have changed in this diff Show More