From f2bac60be4459036cd088fa1f41bedfd5e7012b2 Mon Sep 17 00:00:00 2001 From: aerofly Date: Sun, 9 Dec 2018 16:34:30 +0100 Subject: [PATCH 01/14] mvk_vector --- MoltenVK/MoltenVK/Commands/MVKCmdDraw.h | 5 +- MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h | 8 +- MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm | 9 +- MoltenVK/MoltenVK/Commands/MVKCmdRenderPass.h | 7 +- MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h | 3 +- .../MoltenVK/Commands/MVKCommandBuffer.mm | 10 +- .../Commands/MVKCommandEncoderState.h | 44 +- .../Commands/MVKCommandEncoderState.mm | 12 +- .../MoltenVK/GPUObjects/MVKDescriptorSet.h | 7 +- .../MoltenVK/GPUObjects/MVKDescriptorSet.mm | 4 +- MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h | 9 +- MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm | 5 +- MoltenVK/MoltenVK/GPUObjects/MVKQueue.h | 9 +- MoltenVK/MoltenVK/Utility/MVKVector.h | 504 ++++++++++++++++ .../MoltenVK/Utility/MVKVectorAllocator.h | 549 ++++++++++++++++++ 15 files changed, 1129 insertions(+), 56 deletions(-) create mode 100755 MoltenVK/MoltenVK/Utility/MVKVector.h create mode 100755 MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdDraw.h b/MoltenVK/MoltenVK/Commands/MVKCmdDraw.h index 91bd8b6f..dd1e4b73 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCmdDraw.h +++ b/MoltenVK/MoltenVK/Commands/MVKCmdDraw.h @@ -20,8 +20,7 @@ #include "MVKCommand.h" #include "MVKMTLResourceBindings.h" -#include - +#include "MVKVector.h" #import class MVKDevice; @@ -44,7 +43,7 @@ public: MVKCmdBindVertexBuffers(MVKCommandTypePool* pool); protected: - std::vector _bindings; + MVKVector _bindings; }; diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h index a6002f7a..bbef5174 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h +++ b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h @@ -19,8 +19,8 @@ #pragma once #include "MVKCommand.h" +#include "MVKVector.h" #include - class MVKCommandBuffer; class MVKPipeline; class MVKPipelineLayout; @@ -101,8 +101,8 @@ public: private: VkPipelineBindPoint _pipelineBindPoint; MVKPipelineLayout* _pipelineLayout; - std::vector _descriptorSets; - std::vector _dynamicOffsets; + MVKVector _descriptorSets; + MVKVector _dynamicOffsets; uint32_t _firstSet; }; @@ -128,7 +128,7 @@ private: MVKPipelineLayout* _pipelineLayout; VkShaderStageFlags _stageFlags; uint32_t _offset; - std::vector _pushConstants; + MVKVector _pushConstants; }; diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm index 57d1ac83..b8dd2686 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm @@ -159,8 +159,13 @@ void MVKCmdPushConstants::setContent(VkPipelineLayout layout, _stageFlags = stageFlags; _offset = offset; - _pushConstants.resize(size); - copy_n((char*)pValues, size, _pushConstants.begin()); + _pushConstants.clear(); + for( uint32_t i=0; i( pValues)[i] ); + } + //_pushConstants.resize(size); + //copy_n((char*)pValues, size, _pushConstants.begin()); } void MVKCmdPushConstants::encode(MVKCommandEncoder* cmdEncoder) { diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdRenderPass.h b/MoltenVK/MoltenVK/Commands/MVKCmdRenderPass.h index db870243..9562d6b2 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCmdRenderPass.h +++ b/MoltenVK/MoltenVK/Commands/MVKCmdRenderPass.h @@ -19,6 +19,7 @@ #pragma once #include "MVKCommand.h" +#include "MVKVector.h" #include #import @@ -47,7 +48,7 @@ private: VkSubpassContents _contents; MVKRenderPass* _renderPass; MVKFramebuffer* _framebuffer; - std::vector _clearValues; + MVKVector _clearValues; }; @@ -114,7 +115,7 @@ public: private: uint32_t _firstViewport; - std::vector _mtlViewports; + MVKVector _mtlViewports; }; @@ -133,7 +134,7 @@ public: private: uint32_t _firstScissor; - std::vector _mtlScissors; + MVKVector _mtlScissors; }; diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h index 7560d4c6..b2f975a7 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h +++ b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h @@ -23,6 +23,7 @@ #include "MVKCommandEncoderState.h" #include "MVKMTLBufferAllocation.h" #include "MVKCmdPipeline.h" +#include "MVKVector.h" #include #include @@ -239,7 +240,7 @@ public: MVKRenderPass* renderPass, MVKFramebuffer* framebuffer, VkRect2D& renderArea, - std::vector* clearValues); + MVKVector* clearValues); /** Begins the next render subpass. */ void beginNextSubpass(VkSubpassContents renderpassContents); diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm index 35ebe11a..c0d61a3a 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm @@ -219,13 +219,19 @@ void MVKCommandEncoder::beginRenderpass(VkSubpassContents subpassContents, MVKRenderPass* renderPass, MVKFramebuffer* framebuffer, VkRect2D& renderArea, - vector* clearValues) { + MVKVector* clearValues) { _renderPass = renderPass; _framebuffer = framebuffer; _renderArea = renderArea; _isRenderingEntireAttachment = (mvkVkOffset2DsAreEqual(_renderArea.offset, {0,0}) && mvkVkExtent2DsAreEqual(_renderArea.extent, _framebuffer->getExtent2D())); - _clearValues.assign(clearValues->begin(), clearValues->end()); + + _clearValues.clear(); + for( auto cv : *clearValues ) + { + _clearValues.push_back( cv ); + } + //_clearValues.assign(clearValues->begin(), clearValues->end()); setSubpass(subpassContents, 0); } diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h index 4981e655..a87a7e0c 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h +++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h @@ -18,9 +18,9 @@ #pragma once -#include "MVKMTLResourceBindings.h" +#include "MVKMTLResourceBindings.h" #include "MVKCommandResourceFactory.h" -#include +#include "MVKVector.h" class MVKCommandEncoder; class MVKOcclusionQueryPool; @@ -135,7 +135,7 @@ public: * The isSettingDynamically indicates that the scissor is being changed dynamically, * which is only allowed if the pipeline was created as VK_DYNAMIC_STATE_SCISSOR. */ - void setViewports(std::vector mtlViewports, + void setViewports(const MVKVector &mtlViewports, uint32_t firstViewport, bool isSettingDynamically); @@ -147,7 +147,7 @@ protected: void encodeImpl() override; void resetImpl() override; - std::vector _mtlViewports; + MVKVector _mtlViewports; }; @@ -164,7 +164,7 @@ public: * The isSettingDynamically indicates that the scissor is being changed dynamically, * which is only allowed if the pipeline was created as VK_DYNAMIC_STATE_SCISSOR. */ - void setScissors(std::vector mtlScissors, + void setScissors(const MVKVector &mtlScissors, uint32_t firstScissor, bool isSettingDynamically); @@ -176,7 +176,7 @@ protected: void encodeImpl() override; void resetImpl() override; - std::vector _mtlScissors; + MVKVector _mtlScissors; }; @@ -189,7 +189,7 @@ class MVKPushConstantsCommandEncoderState : public MVKCommandEncoderState { public: /** Sets the specified push constants. */ - void setPushConstants(uint32_t offset, std::vector& pushConstants); + void setPushConstants(uint32_t offset, MVKVector& pushConstants); /** Sets the index of the Metal buffer used to hold the push constants. */ void setMTLBufferIndex(uint32_t mtlBufferIndex); @@ -203,7 +203,7 @@ protected: void encodeImpl() override; void resetImpl() override; - std::vector _pushConstants; + MVKVector _pushConstants; VkShaderStageFlagBits _shaderStage; uint32_t _mtlBufferIndex = 0; }; @@ -348,15 +348,15 @@ protected: // Template function that marks both the vector and all binding elements in the vector as dirty. template - void markDirty(std::vector& bindings, bool& bindingsDirtyFlag) { + void markDirty(T& bindings, bool& bindingsDirtyFlag) { for (auto& b : bindings) { b.isDirty = true; } bindingsDirtyFlag = true; } // Template function that updates an existing binding or adds a new binding to a vector // of bindings, and marks the binding, the vector, and this instance as dirty - template - void bind(const T& b, std::vector& bindings, bool& bindingsDirtyFlag) { + template + void bind(const T& b, U& bindings, bool& bindingsDirtyFlag) { if ( !b.mtlResource ) { return; } @@ -365,7 +365,7 @@ protected: bindingsDirtyFlag = true; db.isDirty = true; - for (auto iter = bindings.begin(), end = bindings.end(); iter != end; iter++) { + for (auto iter = bindings.begin(), end = bindings.end(); iter != end; ++iter) { if( iter->index == db.index ) { *iter = db; return; @@ -377,7 +377,7 @@ protected: // Template function that executes a lambda expression on each dirty element of // a vector of bindings, and marks the bindings and the vector as no longer dirty. template - void encodeBinding(std::vector& bindings, + void encodeBinding(MVKVector& bindings, bool& bindingsDirtyFlag, std::function mtlOperation) { if (bindingsDirtyFlag) { @@ -451,12 +451,12 @@ protected: void resetImpl() override; void markDirty() override; - std::vector _vertexBufferBindings; - std::vector _fragmentBufferBindings; - std::vector _vertexTextureBindings; - std::vector _fragmentTextureBindings; - std::vector _vertexSamplerStateBindings; - std::vector _fragmentSamplerStateBindings; + MVKVector _vertexBufferBindings; + MVKVector _fragmentBufferBindings; + MVKVector _vertexTextureBindings; + MVKVector _fragmentTextureBindings; + MVKVector _vertexSamplerStateBindings; + MVKVector _fragmentSamplerStateBindings; MVKMTLBufferBinding _vertexAuxBufferBinding; MVKMTLBufferBinding _fragmentAuxBufferBinding; @@ -499,9 +499,9 @@ protected: void resetImpl() override; void markDirty() override; - std::vector _bufferBindings; - std::vector _textureBindings; - std::vector _samplerStateBindings; + MVKVector _bufferBindings; + MVKVector _textureBindings; + MVKVector _samplerStateBindings; MVKMTLBufferBinding _auxBufferBinding; bool _areBufferBindingsDirty = false; diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm index b1ed8f34..718653f0 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm @@ -50,7 +50,7 @@ void MVKPipelineCommandEncoderState::resetImpl() { #pragma mark - #pragma mark MVKViewportCommandEncoderState -void MVKViewportCommandEncoderState::setViewports(vector mtlViewports, +void MVKViewportCommandEncoderState::setViewports(const MVKVector &mtlViewports, uint32_t firstViewport, bool isSettingDynamically) { @@ -91,7 +91,7 @@ void MVKViewportCommandEncoderState::resetImpl() { #pragma mark - #pragma mark MVKScissorCommandEncoderState -void MVKScissorCommandEncoderState::setScissors(vector mtlScissors, +void MVKScissorCommandEncoderState::setScissors(const MVKVector &mtlScissors, uint32_t firstScissor, bool isSettingDynamically) { @@ -113,7 +113,11 @@ void MVKScissorCommandEncoderState::setScissors(vector mtlScisso void MVKScissorCommandEncoderState::encodeImpl() { MVKAssert(!_mtlScissors.empty(), "Must specify at least one scissor rect"); - std::vector clippedScissors(_mtlScissors); + MVKVector clippedScissors; + for ( const auto &scissor : _mtlScissors) + { + clippedScissors.emplace_back( scissor ); + } std::for_each(clippedScissors.begin(), clippedScissors.end(), [this](MTLScissorRect& scissor) { scissor = _cmdEncoder->clipToRenderArea(scissor); }); @@ -136,7 +140,7 @@ void MVKScissorCommandEncoderState::resetImpl() { #pragma mark - #pragma mark MVKPushConstantsCommandEncoderState -void MVKPushConstantsCommandEncoderState:: setPushConstants(uint32_t offset, vector& pushConstants) { +void MVKPushConstantsCommandEncoderState:: setPushConstants(uint32_t offset, MVKVector& pushConstants) { uint32_t pcCnt = (uint32_t)pushConstants.size(); mvkEnsureSize(_pushConstants, offset + pcCnt); copy(pushConstants.begin(), pushConstants.end(), _pushConstants.begin() + offset); diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h index ec6323da..703e3fd8 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h @@ -20,6 +20,7 @@ #include "MVKDevice.h" #include "MVKImage.h" +#include "MVKVector.h" #include #include #include @@ -80,7 +81,7 @@ public: void bind(MVKCommandEncoder* cmdEncoder, MVKDescriptorBinding& descBinding, MVKShaderResourceBinding& dslMTLRezIdxOffsets, - std::vector& dynamicOffsets, + MVKVector& dynamicOffsets, uint32_t* pDynamicOffsetIndex); /** Encodes this binding layout and the specified descriptor binding on the specified command encoder immediately. */ @@ -131,7 +132,7 @@ public: void bindDescriptorSet(MVKCommandEncoder* cmdEncoder, MVKDescriptorSet* descSet, MVKShaderResourceBinding& dslMTLRezIdxOffsets, - std::vector& dynamicOffsets, + MVKVector& dynamicOffsets, uint32_t* pDynamicOffsetIndex); @@ -165,7 +166,7 @@ protected: friend class MVKPipelineLayout; friend class MVKDescriptorSet; - std::vector _bindings; + MVKVector _bindings; MVKShaderResourceBinding _mtlResourceCounts; bool _isPushDescriptorLayout : 1; }; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm index fa777529..7890672f 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm @@ -80,7 +80,7 @@ MVK_PUBLIC_SYMBOL MVKShaderResourceBinding& MVKShaderResourceBinding::operator+= void MVKDescriptorSetLayoutBinding::bind(MVKCommandEncoder* cmdEncoder, MVKDescriptorBinding& descBinding, MVKShaderResourceBinding& dslMTLRezIdxOffsets, - vector& dynamicOffsets, + MVKVector& dynamicOffsets, uint32_t* pDynamicOffsetIndex) { MVKMTLBufferBinding bb; MVKMTLTextureBinding tb; @@ -494,7 +494,7 @@ VkResult MVKDescriptorSetLayoutBinding::initMetalResourceIndexOffsets(MVKShaderS void MVKDescriptorSetLayout::bindDescriptorSet(MVKCommandEncoder* cmdEncoder, MVKDescriptorSet* descSet, MVKShaderResourceBinding& dslMTLRezIdxOffsets, - vector& dynamicOffsets, + MVKVector& dynamicOffsets, uint32_t* pDynamicOffsetIndex) { if (_isPushDescriptorLayout) return; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h index 3e3f2466..9158e6c0 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h @@ -22,6 +22,7 @@ #include "MVKDescriptorSet.h" #include "MVKShaderModule.h" #include "MVKSync.h" +#include "MVKVector.h" #include #include #include @@ -53,9 +54,9 @@ public: /** Binds descriptor sets to a command encoder. */ void bindDescriptorSets(MVKCommandEncoder* cmdEncoder, - std::vector& descriptorSets, + MVKVector& descriptorSets, uint32_t firstSet, - std::vector& dynamicOffsets); + MVKVector& dynamicOffsets); /** Populates the specified shader converter context. */ void populateShaderConverterContext(SPIRVToMSLConverterContext& context); @@ -157,8 +158,8 @@ protected: VkPipelineRasterizationStateCreateInfo _rasterInfo; VkPipelineDepthStencilStateCreateInfo _depthStencilInfo; - std::vector _mtlViewports; - std::vector _mtlScissors; + MVKVector _mtlViewports; + MVKVector _mtlScissors; id _mtlPipelineState; MTLCullMode _mtlCullMode; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm index 02d13ac9..3a75a87a 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm @@ -36,9 +36,9 @@ using namespace std; #pragma mark MVKPipelineLayout void MVKPipelineLayout::bindDescriptorSets(MVKCommandEncoder* cmdEncoder, - vector& descriptorSets, + MVKVector& descriptorSets, uint32_t firstSet, - vector& dynamicOffsets) { + MVKVector& dynamicOffsets) { uint32_t pDynamicOffsetIndex = 0; uint32_t dsCnt = (uint32_t)descriptorSets.size(); @@ -343,6 +343,7 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor } } + _needsFragmentAuxBuffer = false; // Fragment shader if (mvkAreFlagsEnabled(pSS->stage, VK_SHADER_STAGE_FRAGMENT_BIT)) { shaderContext.options.entryPointStage = spv::ExecutionModelFragment; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h index c27d7d21..8680d65b 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h @@ -22,6 +22,7 @@ #include "MVKCommandBuffer.h" #include "MVKImage.h" #include "MVKSync.h" +#include "MVKVector.h" #include #include @@ -169,7 +170,7 @@ protected: MVKQueueSubmission* _prev; MVKQueueSubmission* _next; VkResult _submissionResult; - std::vector _waitSemaphores; + MVKVector _waitSemaphores; bool _isAwaitingSemaphores; }; @@ -204,8 +205,8 @@ protected: void commitActiveMTLCommandBuffer(bool signalCompletion = false); void finish(); - std::vector _cmdBuffers; - std::vector _signalSemaphores; + MVKVector _cmdBuffers; + MVKVector _signalSemaphores; MVKFence* _fence; MVKCommandUse _cmdBuffUse; id _activeMTLCommandBuffer; @@ -227,6 +228,6 @@ public: const VkPresentInfoKHR* pPresentInfo); protected: - std::vector _surfaceImages; + MVKVector _surfaceImages; }; diff --git a/MoltenVK/MoltenVK/Utility/MVKVector.h b/MoltenVK/MoltenVK/Utility/MVKVector.h new file mode 100755 index 00000000..0981d60f --- /dev/null +++ b/MoltenVK/MoltenVK/Utility/MVKVector.h @@ -0,0 +1,504 @@ +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// MVKVector.h - similar to std::vector +// +// 2017/01/26 - th/mb +// +// --------------------------------------------------------------------------- +// +// copyright (C) 2005-2017, Dr. Torsten Hans / Dr. Marc Borchers +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// - Redistributions of source code must retain the above copyright notice, +// this vector of conditions and the disclaimer below. +// - Redistributions in binary form must reproduce the above copyright notice, +// this vector of conditions and the disclaimer (as noted below) in the +// documentation and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. +// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +#ifndef MVK_VECTOR_H +#define MVK_VECTOR_H + +#include "MVKVectorAllocator.h" +#include +#include +#include + +template> class MVKVector +{ + Allocator alc; + +public: + class iterator + { + const MVKVector *vector; + size_t index; + + public: + iterator() = delete; + iterator( const size_t _index, const MVKVector &_vector ) : vector{ &_vector }, index{ _index } { } + + iterator &operator=( const iterator &it ) + { + vector = it.vector; + index = it.index; + return *this; + } + + Type *operator->() const + { + return &vector->alc.ptr[index]; + } + + operator Type*( ) const + { + return &vector->alc.ptr[index]; + } + + bool operator==( const iterator &it ) const + { + return ( vector == it.vector ) && ( index == it.index ); + } + + bool operator!=( const iterator &it ) const + { + return ( vector != it.vector ) || ( index != it.index ); + } + + iterator& operator++() { ++index; return *this; } + + bool is_valid() const { return index < vector->alc.num_elements_used; } + size_t get_position() const { return index; } + }; + + class reverse_iterator + { + const MVKVector *vector; + size_t index; + + public: + reverse_iterator() = delete; + reverse_iterator( const size_t _index, const MVKVector &_vector ) : vector{ &_vector }, index{ _index } { } + reverse_iterator &operator=( const reverse_iterator & ) = delete; + + Type *operator->() const + { + return &vector->alc.ptr[index]; + } + + operator Type*( ) const + { + return &vector->alc.ptr[index]; + } + + bool operator==( const reverse_iterator &it ) const + { + return vector == it.vector && index == it.index; + } + + bool operator!=( const reverse_iterator &it ) const + { + return vector != it.vector || index != it.index; + } + + reverse_iterator& operator++() { --index; return *this; } + + bool is_valid() const { return index < vector->alc.num_elements_used; } + size_t get_position() const { return index; } + }; + +private: + size_t vector_GetNextCapacity() const + { + constexpr auto ELEMENTS_FOR_64_BYTES = 64 / sizeof( Type ); + constexpr auto MINIMUM_CAPACITY = ELEMENTS_FOR_64_BYTES > 4 ? ELEMENTS_FOR_64_BYTES : 4; + const auto current_capacity = capacity(); + //if( current_capacity < 256 ) + // return MINIMUM_CAPACITY + 2 * current_capacity; + return MINIMUM_CAPACITY + ( 3 * current_capacity ) / 2; + } + + void vector_Allocate( const size_t s ) + { + const auto new_reserved_size = tm_max( s, alc.num_elements_used ); + + alc.allocate( new_reserved_size ); + } + + void vector_ReAllocate( const size_t s ) + { + alc.re_allocate( s ); + } + +public: + MVKVector() + { + } + + MVKVector( const size_t n, const Type t ) + { + if( n > 0 ) + { + alc.allocate( n ); + + for( size_t i = 0; i < n; ++i ) + { + alc.construct( &alc.ptr[i], t ); + } + + alc.num_elements_used = n; + } + } + + MVKVector( const MVKVector &a ) + { + const size_t n = a.size(); + + if( n > 0 ) + { + alc.allocate( n ); + + for( size_t i = 0; i < n; ++i ) + { + alc.construct( &alc.ptr[i], a.alc.ptr[i] ); + } + + alc.num_elements_used = n; + } + } + + MVKVector( MVKVector &&a ) : alc{ std::move( a.alc ) } + { + } + + MVKVector( std::initializer_list vector ) + { + if( vector.size() > capacity() ) + { + vector_Allocate( vector.size() ); + } + + // std::initializer_list does not yet support std::move, we use it anyway but it has no effect + for( auto &&element : vector ) + { + alc.construct( &alc.ptr[alc.num_elements_used], std::move( element ) ); + ++alc.num_elements_used; + } + } + + ~MVKVector() + { + } + + MVKVector& operator=( const MVKVector &a ) + { + if( this != &a ) + { + const auto n = a.alc.num_elements_used; + + if( alc.num_elements_used == n ) + { + for( size_t i = 0; i < n; ++i ) + { + alc.ptr[i] = a.alc.ptr[i]; + } + } + else + { + if( n > capacity() ) + { + vector_ReAllocate( n ); + } + else + { + alc.destruct_all(); + } + + for( size_t i = 0; i < n; ++i ) + { + alc.construct( &alc.ptr[i], a.alc.ptr[i] ); + } + + alc.num_elements_used = n; + } + } + + return *this; + } + + MVKVector& operator=( MVKVector &&a ) + { + alc.swap( a.alc ); + return *this; + } + + bool operator==( const MVKVector &a ) const + { + if( alc.num_elements_used != a.alc.num_elements_used ) + return false; + for( size_t i = 0; i < alc.num_elements_used; ++i ) + { + if( alc.ptr[i] != a.alc.ptr[i] ) + return false; + } + return true; + } + + bool operator!=( const MVKVector &a ) const + { + if( alc.num_elements_used != a.alc.num_elements_used ) + return true; + for( size_t i = 0; i < alc.num_elements_used; ++i ) + { + if( alc.ptr[i] != a.alc.ptr[i] ) + return true; + } + return false; + } + + void swap( MVKVector &a ) + { + alc.swap( a.alc ); + } + + void clear() + { + alc.template destruct_all(); + } + + void reset() + { + alc.deallocate(); + } + + iterator begin() const { return iterator( 0, *this ); } + iterator end() const { return iterator( alc.num_elements_used, *this ); } + reverse_iterator rbegin() const { return reverse_iterator( alc.num_elements_used - 1, *this ); } + reverse_iterator rend() const { return reverse_iterator( size_t( -1 ), *this ); } + size_t size() const { return alc.num_elements_used; } + bool empty() const { return alc.num_elements_used == 0; } + + Type &at( const size_t i ) const + { + return alc.ptr[i]; + } + + const Type &operator[]( const size_t i ) const + { + return alc.ptr[i]; + } + + Type &operator[]( const size_t i ) + { + return alc.ptr[i]; + } + + const Type *data() const + { + return alc.ptr; + } + + Type *data() + { + return alc.ptr; + } + + size_t capacity() const + { + return alc.get_capacity(); + } + + const Type &front() const + { + return alc.ptr[0]; + } + + Type &front() + { + return alc.ptr[0]; + } + + const Type &back() const + { + return alc.ptr[alc.num_elements_used - 1]; + } + + Type &back() + { + return alc.ptr[alc.num_elements_used - 1]; + } + + void pop_back() + { + if( alc.num_elements_used > 0 ) + { + --alc.num_elements_used; + alc.destruct( &alc.ptr[alc.num_elements_used] ); + } + } + + void reserve( const size_t new_size ) + { + if( new_size > capacity() ) + { + vector_ReAllocate( new_size ); + } + } + + void assign( const size_t new_size, const Type &t ) + { + if( new_size <= capacity() ) + { + clear(); + } + else + { + vector_Allocate( new_size ); + } + + for( size_t i = 0; i < new_size; ++i ) + { + alc.construct( &alc.ptr[i], t ); + } + + alc.num_elements_used = new_size; + } + + void resize( const size_t new_size, const Type t = { } ) + { + if( new_size == alc.num_elements_used ) + { + return; + } + + if( new_size == 0 ) + { + clear(); + return; + } + + if( new_size > alc.num_elements_used ) + { + if( new_size > capacity() ) + { + vector_ReAllocate( new_size ); + } + + while( alc.num_elements_used < new_size ) + { + alc.construct( &alc.ptr[alc.num_elements_used], t ); + ++alc.num_elements_used; + } + } + else + { + //if constexpr( !std::is_trivially_destructible::value ) + { + while( alc.num_elements_used > new_size ) + { + --alc.num_elements_used; + alc.destruct( &alc.ptr[alc.num_elements_used] ); + } + } + //else + //{ + // alc.num_elements_used = new_size; + //} + } + } + + // trims the capacity of the slist to the number of alc.ptr + void shrink_to_fit() + { + alc.shrink_to_fit(); + } + + void erase( const iterator it ) + { + if( it.is_valid() ) + { + --alc.num_elements_used; + + for( size_t i = it.GetIndex(); i < alc.num_elements_used; ++i ) + { + alc.ptr[i] = std::move( alc.ptr[i + 1] ); + } + + // this is required for types with a destructor + alc.destruct( &alc.ptr[alc.num_elements_used] ); + } + } + + // adds t before it and automatically resizes vector if necessary + void insert( const iterator it, Type t ) + { + if( !it.is_valid() || alc.num_elements_used == 0 ) + { + push_back( std::move( t ) ); + } + else + { + if( alc.num_elements_used == capacity() ) + vector_ReAllocate( vector_GetNextCapacity() ); + + // move construct last element + alc.construct( &alc.ptr[alc.num_elements_used], std::move( alc.ptr[alc.num_elements_used - 1] ) ); + + // move the remaining elements + const size_t it_position = it.get_position(); + for( size_t i = alc.num_elements_used - 1; i > it_position; --i ) + { + alc.ptr[i] = std::move( alc.ptr[i - 1] ); + } + + alc.ptr[it_position] = std::move( t ); + ++alc.num_elements_used; + } + } + + void push_back( const Type &t ) + { + if( alc.num_elements_used == capacity() ) + vector_ReAllocate( vector_GetNextCapacity() ); + + alc.construct( &alc.ptr[alc.num_elements_used], t ); + ++alc.num_elements_used; + } + + void push_back( Type &&t ) + { + if( alc.num_elements_used == capacity() ) + vector_ReAllocate( vector_GetNextCapacity() ); + + alc.construct( &alc.ptr[alc.num_elements_used], std::forward( t ) ); + ++alc.num_elements_used; + } + + template + Type &emplace_back( Args&&... args ) + { + if( alc.num_elements_used == capacity() ) + vector_ReAllocate( vector_GetNextCapacity() ); + + alc.construct( &alc.ptr[alc.num_elements_used], std::forward( args )... ); + ++alc.num_elements_used; + + return alc.ptr[alc.num_elements_used - 1]; + } +}; + + +#endif // MVK_VECTOR_H + diff --git a/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h b/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h new file mode 100755 index 00000000..18cac4ec --- /dev/null +++ b/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h @@ -0,0 +1,549 @@ +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// mvk_vector_allocator.h - allocator classes for slist's +// +// 2017/01/26 - th/mb +// +// --------------------------------------------------------------------------- +// +// copyright (C) 2005-2017, Dr. Torsten Hans / Dr. Marc Borchers +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the disclaimer below. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the disclaimer (as noted below) in the +// documentation and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. +// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +#ifndef MVK_VECTOR_ALLOCATOR_H +#define MVK_VECTOR_ALLOCATOR_H + +#include +#include + +namespace mvk_memory_allocator +{ + inline char *alloc( size_t num_bytes ) + { + return new char[num_bytes]; + } + + inline void free( void *ptr ) + { + delete[] (char*)ptr; + } +}; + + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// mvk_vector_allocator_default -> malloc based allocator for tm_vector +// +/////////////////////////////////////////////////////////////////////////////////////////////////// +template +class mvk_vector_allocator_default final +{ +public: + T *ptr; + size_t num_elements_used; + +private: + size_t num_elements_reserved; + +public: + template typename std::enable_if< !std::is_trivially_constructible::value >::type + construct( S *_ptr, Args&&... _args ) + { + new ( _ptr ) S( std::forward( _args )... ); + } + + template typename std::enable_if< std::is_trivially_constructible::value >::type + construct( S *_ptr, Args&&... _args ) + { + *_ptr = S( std::forward( _args )... ); + } + + template typename std::enable_if< !std::is_trivially_destructible::value >::type + destruct( S *_ptr ) + { + _ptr->~S(); + } + + template typename std::enable_if< std::is_trivially_destructible::value >::type + destruct( S *_ptr ) + { + } + + + template typename std::enable_if< !std::is_trivially_destructible::value >::type + destruct_all() + { + for( size_t i = 0; i < num_elements_used; ++i ) + { + ptr[i].~S(); + } + + num_elements_used = 0; + } + + template typename std::enable_if< std::is_trivially_destructible::value >::type + destruct_all() + { + num_elements_used = 0; + } + +public: + constexpr mvk_vector_allocator_default() : ptr{ nullptr }, num_elements_used{ 0 }, num_elements_reserved{ 0 } + { + } + + mvk_vector_allocator_default( mvk_vector_allocator_default &&a ) : ptr{ a.ptr }, num_elements_used{ a.num_elements_used }, num_elements_reserved{ a.num_elements_reserved } + { + a.ptr = nullptr; + a.num_elements_used = 0; + a.num_elements_reserved = 0; + } + + ~mvk_vector_allocator_default() + { + deallocate(); + } + + size_t get_capacity() const + { + return num_elements_reserved; + } + + void swap( mvk_vector_allocator_default &a ) + { + const auto copy_ptr = a.ptr; + const auto copy_num_elements_used = a.num_elements_used; + const auto copy_num_elements_reserved = a.num_elements_reserved; + + a.ptr = ptr; + a.num_elements_used = num_elements_used; + a.num_elements_reserved = num_elements_reserved; + + ptr = copy_ptr; + num_elements_used = copy_num_elements_used; + num_elements_reserved = copy_num_elements_reserved; + } + + void allocate( const size_t num_elements_to_reserve ) + { + deallocate(); + + ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) ); + num_elements_used = 0; + num_elements_reserved = num_elements_to_reserve; + } + + void re_allocate( const size_t num_elements_to_reserve ) + { + //if constexpr( std::is_trivially_copyable::value ) + //{ + // ptr = reinterpret_cast< T* >( mvk_memory_allocator::tm_memrealloc( ptr, num_elements_to_reserve * sizeof( T ) ); + //} + //else + { + auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) ); + + for( size_t i = 0; i < num_elements_used; ++i ) + { + construct( &new_ptr[i], std::move( ptr[i] ) ); + destruct( &ptr[i] ); + } + + //if ( ptr != nullptr ) + { + mvk_memory_allocator::free( ptr ); + } + + ptr = new_ptr; + } + + num_elements_reserved = num_elements_to_reserve; + } + + void shrink_to_fit() + { + if( num_elements_used == 0 ) + { + deallocate(); + } + else + { + auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_used * sizeof( T ) ) ); + + for( size_t i = 0; i < num_elements_used; ++i ) + { + construct( &new_ptr[i], std::move( ptr[i] ) ); + destruct( &ptr[i] ); + } + + mvk_memory_allocator::free( ptr ); + + ptr = new_ptr; + num_elements_reserved = num_elements_used; + } + } + + void deallocate() + { + destruct_all(); + + mvk_memory_allocator::free( ptr ); + + ptr = nullptr; + num_elements_reserved = 0; + } +}; + + + + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// mvk_vector_allocator_with_stack -> malloc based slist allocator with extra stack storage +// +/////////////////////////////////////////////////////////////////////////////////////////////////// +template +class mvk_vector_allocator_with_stack +{ +public: + T *ptr; + size_t num_elements_used; + +private: + //size_t num_elements_reserved; // uhh, num_elements_reserved is mapped onto the stack elements, let the fun begin + alignas( alignof( T ) ) unsigned char elements_stack[N * sizeof( T )]; + + static_assert( N * sizeof( T ) >= sizeof( size_t ), "Bummer, TH's nasty optimization doesn't work" ); + + void set_num_elements_reserved( const size_t num_elements_reserved ) + { + *reinterpret_cast< size_t* >( &elements_stack[0] ) = num_elements_reserved; + } + + + // + // faster element construction and destruction using type traits + // +public: + // + // element creation and destruction + // + template typename std::enable_if< !std::is_trivially_constructible::value >::type + construct( S *_ptr, Args&&... _args ) + { + new ( _ptr ) S( std::forward( _args )... ); + } + + template typename std::enable_if< std::is_trivially_constructible::value >::type + construct( S *_ptr, Args&&... _args ) + { + *_ptr = S( std::forward( _args )... ); + } + + template typename std::enable_if< !std::is_trivially_destructible::value >::type + destruct( S *_ptr ) + { + _ptr->~S(); + } + + template typename std::enable_if< std::is_trivially_destructible::value >::type + destruct( S *_ptr ) + { + } + + template typename std::enable_if< !std::is_trivially_destructible::value >::type + destruct_all() + { + for( size_t i = 0; i < num_elements_used; ++i ) + { + ptr[i].~S(); + } + + num_elements_used = 0; + } + + template typename std::enable_if< std::is_trivially_destructible::value >::type + destruct_all() + { + num_elements_used = 0; + } + + template typename std::enable_if< !std::is_trivially_destructible::value >::type + swap_stack( mvk_vector_allocator_with_stack &a ) + { + T stack_copy[N]; + + for( size_t i = 0; i < num_elements_used; ++i ) + { + construct( &stack_copy[i], std::move( ptr[i] ) ); + destruct( &ptr[i] ); + } + + for( size_t i = 0; i < a.num_elements_used; ++i ) + { + construct( &ptr[i], std::move( a.ptr[i] ) ); + destruct( &ptr[i] ); + } + + for( size_t i = 0; i < num_elements_used; ++i ) + { + construct( &a.ptr[i], std::move( stack_copy[i] ) ); + destruct( &stack_copy[i] ); + } + } + + template typename std::enable_if< std::is_trivially_destructible::value >::type + swap_stack( mvk_vector_allocator_with_stack &a ) + { + constexpr int STACK_SIZE = N * sizeof( T ); + for( int i = 0; i < STACK_SIZE; ++i ) + { + const auto v = elements_stack[i]; + elements_stack[i] = a.elements_stack[i]; + a.elements_stack[i] = v; + } + } + + +public: + mvk_vector_allocator_with_stack() : ptr{ reinterpret_cast< T* >( &elements_stack[0] ) }, num_elements_used{ 0 } + { + } + + mvk_vector_allocator_with_stack( mvk_vector_allocator_with_stack &&a ) : num_elements_used{ a.num_elements_used } + { + // is a heap based -> steal ptr from a + if( !a.get_data_on_stack() ) + { + ptr = a.ptr; + set_num_elements_reserved( a.get_capacity() ); + + a.ptr = a.get_default_ptr(); + } + else + { + ptr = get_default_ptr(); + for( size_t i = 0; i < a.num_elements_used; ++i ) + { + construct( &ptr[i], std::move( a.ptr[i] ) ); + destruct( &a.ptr[i] ); + } + } + + a.num_elements_used = 0; + } + + ~mvk_vector_allocator_with_stack() + { + deallocate(); + } + + size_t get_capacity() const + { + return get_data_on_stack() ? N : *reinterpret_cast< const size_t* >( &elements_stack[0] ); + } + + constexpr T *get_default_ptr() const + { + return reinterpret_cast< T* >( const_cast< unsigned char * >( &elements_stack[0] ) ); + } + + bool get_data_on_stack() const + { + return ptr == get_default_ptr(); + } + + void swap( mvk_vector_allocator_with_stack &a ) + { + // both allocators on heap -> easy case + if( !get_data_on_stack() && !a.get_data_on_stack() ) + { + auto copy_ptr = ptr; + auto copy_num_elements_reserved = get_capacity(); + ptr = a.ptr; + set_num_elements_reserved( a.get_capacity() ); + a.ptr = copy_ptr; + a.set_num_elements_reserved( copy_num_elements_reserved ); + } + // both allocators on stack -> just switch the stack contents + else if( get_data_on_stack() && a.get_data_on_stack() ) + { + swap_stack( a ); + } + else if( get_data_on_stack() && !a.get_data_on_stack() ) + { + auto copy_ptr = a.ptr; + auto copy_num_elements_reserved = a.get_capacity(); + + a.ptr = a.get_default_ptr(); + for( size_t i = 0; i < num_elements_used; ++i ) + { + construct( &a.ptr[i], std::move( ptr[i] ) ); + destruct( &ptr[i] ); + } + + ptr = copy_ptr; + set_num_elements_reserved( copy_num_elements_reserved ); + } + else if( !get_data_on_stack() && a.get_data_on_stack() ) + { + auto copy_ptr = ptr; + auto copy_num_elements_reserved = get_capacity(); + + ptr = get_default_ptr(); + for( size_t i = 0; i < a.num_elements_used; ++i ) + { + construct( &ptr[i], std::move( a.ptr[i] ) ); + destruct( &a.ptr[i] ); + } + + a.ptr = copy_ptr; + a.set_num_elements_reserved( copy_num_elements_reserved ); + } + + auto copy_num_elements_used = num_elements_used; + num_elements_used = a.num_elements_used; + a.num_elements_used = copy_num_elements_used; + } + + // + // allocates rounded up to the defined alignment the number of bytes / if the system cannot allocate the specified amount of memory then a null block is returned + // + void allocate( const size_t num_elements_to_reserve ) + { + deallocate(); + + // check if enough memory on stack space is left + if( num_elements_to_reserve <= N ) + { + return; + } + + ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) ); + num_elements_used = 0; + set_num_elements_reserved( num_elements_to_reserve ); + } + + //template typename std::enable_if< !std::is_trivially_copyable::value >::type + void _re_allocate( const size_t num_elements_to_reserve ) + { + auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) ); + + for( size_t i = 0; i < num_elements_used; ++i ) + { + construct( &new_ptr[i], std::move( ptr[i] ) ); + destruct( &ptr[i] ); + } + + if( ptr != get_default_ptr() ) + { + mvk_memory_allocator::free( ptr ); + } + + ptr = new_ptr; + set_num_elements_reserved( num_elements_to_reserve ); + } + + //template typename std::enable_if< std::is_trivially_copyable::value >::type + // _re_allocate( const size_t num_elements_to_reserve ) + //{ + // const bool data_is_on_stack = get_data_on_stack(); + // + // auto *new_ptr = reinterpret_cast< S* >( mvk_memory_allocator::tm_memrealloc( data_is_on_stack ? nullptr : ptr, num_elements_to_reserve * sizeof( S ) ) ); + // if( data_is_on_stack ) + // { + // for( int i = 0; i < N; ++i ) + // { + // new_ptr[i] = ptr[i]; + // } + // } + // + // ptr = new_ptr; + // set_num_elements_reserved( num_elements_to_reserve ); + //} + + void re_allocate( const size_t num_elements_to_reserve ) + { + //TM_ASSERT( num_elements_to_reserve > get_capacity() ); + + if( num_elements_to_reserve > N ) + { + _re_allocate( num_elements_to_reserve ); + } + } + + void shrink_to_fit() + { + // nothing to do if data is on stack already + if( get_data_on_stack() ) + return; + + // move elements to stack space + if( num_elements_used <= N ) + { + const auto num_elements_reserved = get_capacity(); + + auto *stack_ptr = get_default_ptr(); + for( size_t i = 0; i < num_elements_used; ++i ) + { + construct( &stack_ptr[i], std::move( ptr[i] ) ); + destruct( &ptr[i] ); + } + + mvk_memory_allocator::free( ptr ); + + ptr = stack_ptr; + } + else + { + auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( ptr, num_elements_used * sizeof( T ) ) ); + + for( size_t i = 0; i < num_elements_used; ++i ) + { + construct( &new_ptr[i], std::move( ptr[i] ) ); + destruct( &ptr[i] ); + } + + mvk_memory_allocator::free( ptr ); + + ptr = new_ptr; + set_num_elements_reserved( num_elements_used ); + } + } + + void deallocate() + { + destruct_all(); + + if( !get_data_on_stack() ) + { + mvk_memory_allocator::free( ptr ); + } + + ptr = get_default_ptr(); + num_elements_used = 0; + } +}; + + +#endif // MVK_VECTOR_ALLOCATOR_H + + From 0c0afd4c687e0ff79ef8307b151c315348f7c79b Mon Sep 17 00:00:00 2001 From: aerofly Date: Sun, 9 Dec 2018 16:41:48 +0100 Subject: [PATCH 02/14] mvk_vector --- MoltenVK/MoltenVK/Utility/MVKVector.h | 56 +++++++---------- .../MoltenVK/Utility/MVKVectorAllocator.h | 60 +++++++------------ 2 files changed, 45 insertions(+), 71 deletions(-) diff --git a/MoltenVK/MoltenVK/Utility/MVKVector.h b/MoltenVK/MoltenVK/Utility/MVKVector.h index 0981d60f..67eed898 100755 --- a/MoltenVK/MoltenVK/Utility/MVKVector.h +++ b/MoltenVK/MoltenVK/Utility/MVKVector.h @@ -1,36 +1,28 @@ -/////////////////////////////////////////////////////////////////////////////////////////////////// -// -// MVKVector.h - similar to std::vector -// -// 2017/01/26 - th/mb -// -// --------------------------------------------------------------------------- -// -// copyright (C) 2005-2017, Dr. Torsten Hans / Dr. Marc Borchers -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// - Redistributions of source code must retain the above copyright notice, -// this vector of conditions and the disclaimer below. -// - Redistributions in binary form must reproduce the above copyright notice, -// this vector of conditions and the disclaimer (as noted below) in the -// documentation and/or other materials provided with the distribution. -// - Neither the name of the copyright holder nor the names of its contributors -// may be used to endorse or promote products derived from this software -// without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -// ARE DISCLAIMED. -// -/////////////////////////////////////////////////////////////////////////////////////////////////// +/* + * MVKVectorAllocator.h + * + * Copyright (c) 2012-2018 Dr. Torsten Hans (hans@ipacs.de) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ -#ifndef MVK_VECTOR_H -#define MVK_VECTOR_H +#pragma once +// +// a simple std::vector like container with a configurable extra stack space +// this class supports just the necessary members to be compatible with MoltenVK +// if C++17 is used, code can be simplified further +// #include "MVKVectorAllocator.h" #include #include @@ -500,5 +492,3 @@ public: }; -#endif // MVK_VECTOR_H - diff --git a/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h b/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h index 18cac4ec..743e4c33 100755 --- a/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h +++ b/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h @@ -1,42 +1,29 @@ -/////////////////////////////////////////////////////////////////////////////////////////////////// -// -// mvk_vector_allocator.h - allocator classes for slist's -// -// 2017/01/26 - th/mb -// -// --------------------------------------------------------------------------- -// -// copyright (C) 2005-2017, Dr. Torsten Hans / Dr. Marc Borchers -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// - Redistributions of source code must retain the above copyright notice, -// this list of conditions and the disclaimer below. -// - Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the disclaimer (as noted below) in the -// documentation and/or other materials provided with the distribution. -// - Neither the name of the copyright holder nor the names of its contributors -// may be used to endorse or promote products derived from this software -// without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -// ARE DISCLAIMED. -// -/////////////////////////////////////////////////////////////////////////////////////////////////// +/* + * MVKVectorAllocator.h + * + * Copyright (c) 2012-2018 Dr. Torsten Hans (hans@ipacs.de) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ -#ifndef MVK_VECTOR_ALLOCATOR_H -#define MVK_VECTOR_ALLOCATOR_H +#pragma once #include #include namespace mvk_memory_allocator { - inline char *alloc( size_t num_bytes ) + inline char *alloc( const size_t num_bytes ) { return new char[num_bytes]; } @@ -50,7 +37,7 @@ namespace mvk_memory_allocator /////////////////////////////////////////////////////////////////////////////////////////////////// // -// mvk_vector_allocator_default -> malloc based allocator for tm_vector +// mvk_vector_allocator_default -> malloc based allocator for MVKVector // /////////////////////////////////////////////////////////////////////////////////////////////////// template @@ -217,7 +204,7 @@ public: /////////////////////////////////////////////////////////////////////////////////////////////////// // -// mvk_vector_allocator_with_stack -> malloc based slist allocator with extra stack storage +// mvk_vector_allocator_with_stack -> malloc based MVKVector allocator with extra stack storage // /////////////////////////////////////////////////////////////////////////////////////////////////// template @@ -231,7 +218,7 @@ private: //size_t num_elements_reserved; // uhh, num_elements_reserved is mapped onto the stack elements, let the fun begin alignas( alignof( T ) ) unsigned char elements_stack[N * sizeof( T )]; - static_assert( N * sizeof( T ) >= sizeof( size_t ), "Bummer, TH's nasty optimization doesn't work" ); + static_assert( N * sizeof( T ) >= sizeof( size_t ), "Bummer, nasty optimization doesn't work" ); void set_num_elements_reserved( const size_t num_elements_reserved ) { @@ -544,6 +531,3 @@ public: }; -#endif // MVK_VECTOR_ALLOCATOR_H - - From 8d868926bfd08852e90b4f02e2b53ae5cbdf6030 Mon Sep 17 00:00:00 2001 From: aerofly Date: Sun, 9 Dec 2018 16:58:15 +0100 Subject: [PATCH 03/14] mvk_vector --- MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm | 2 ++ MoltenVK/MoltenVK/Utility/MVKVector.h | 1 + MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h | 18 ++++++------------ 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm index 3a75a87a..ba4ce43a 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm @@ -343,6 +343,8 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor } } + // bug fix by aerofly -> if no fragment shader is used and _needsFragmentAuxBuffer was true newBufferWithLength was trying to allocate zero bytes + // please verify this fix _needsFragmentAuxBuffer = false; // Fragment shader if (mvkAreFlagsEnabled(pSS->stage, VK_SHADER_STAGE_FRAGMENT_BIT)) { diff --git a/MoltenVK/MoltenVK/Utility/MVKVector.h b/MoltenVK/MoltenVK/Utility/MVKVector.h index 67eed898..c15448ea 100755 --- a/MoltenVK/MoltenVK/Utility/MVKVector.h +++ b/MoltenVK/MoltenVK/Utility/MVKVector.h @@ -22,6 +22,7 @@ // a simple std::vector like container with a configurable extra stack space // this class supports just the necessary members to be compatible with MoltenVK // if C++17 is used, code can be simplified further +// by default MVKVector used 8 elements from the stack before getting memory from heap // #include "MVKVectorAllocator.h" #include diff --git a/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h b/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h index 743e4c33..aaa59d29 100755 --- a/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h +++ b/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h @@ -35,11 +35,11 @@ namespace mvk_memory_allocator }; -/////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////// // // mvk_vector_allocator_default -> malloc based allocator for MVKVector // -/////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////// template class mvk_vector_allocator_default final { @@ -200,13 +200,11 @@ public: }; - - -/////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////// // -// mvk_vector_allocator_with_stack -> malloc based MVKVector allocator with extra stack storage +// mvk_vector_allocator_with_stack -> malloc based MVKVector allocator with stack storage // -/////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////// template class mvk_vector_allocator_with_stack { @@ -226,12 +224,9 @@ private: } - // - // faster element construction and destruction using type traits - // public: // - // element creation and destruction + // faster element construction and destruction using type traits // template typename std::enable_if< !std::is_trivially_constructible::value >::type construct( S *_ptr, Args&&... _args ) @@ -309,7 +304,6 @@ public: } } - public: mvk_vector_allocator_with_stack() : ptr{ reinterpret_cast< T* >( &elements_stack[0] ) }, num_elements_used{ 0 } { From 3587bcf02f7393f1c3cf0945c0bd03bc1014840f Mon Sep 17 00:00:00 2001 From: aerofly Date: Sun, 9 Dec 2018 17:18:40 +0100 Subject: [PATCH 04/14] mvk_vector --- MoltenVK/MoltenVK.xcodeproj/project.pbxproj | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/MoltenVK/MoltenVK.xcodeproj/project.pbxproj b/MoltenVK/MoltenVK.xcodeproj/project.pbxproj index d7ee478f..d25f6cd8 100644 --- a/MoltenVK/MoltenVK.xcodeproj/project.pbxproj +++ b/MoltenVK/MoltenVK.xcodeproj/project.pbxproj @@ -9,6 +9,10 @@ /* Begin PBXBuildFile section */ 45003E73214AD4E500E989CB /* MVKExtensions.def in Headers */ = {isa = PBXBuildFile; fileRef = 45003E6F214AD4C900E989CB /* MVKExtensions.def */; }; 45003E74214AD4E600E989CB /* MVKExtensions.def in Headers */ = {isa = PBXBuildFile; fileRef = 45003E6F214AD4C900E989CB /* MVKExtensions.def */; }; + 83A4AD2A21BD75570006C935 /* MVKVector.h in Headers */ = {isa = PBXBuildFile; fileRef = 83A4AD2521BD75570006C935 /* MVKVector.h */; }; + 83A4AD2B21BD75570006C935 /* MVKVector.h in Headers */ = {isa = PBXBuildFile; fileRef = 83A4AD2521BD75570006C935 /* MVKVector.h */; }; + 83A4AD2C21BD75570006C935 /* MVKVectorAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 83A4AD2921BD75570006C935 /* MVKVectorAllocator.h */; }; + 83A4AD2D21BD75570006C935 /* MVKVectorAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 83A4AD2921BD75570006C935 /* MVKVectorAllocator.h */; }; A9096E5E1F81E16300DFBEA6 /* MVKCmdDispatch.mm in Sources */ = {isa = PBXBuildFile; fileRef = A9096E5D1F81E16300DFBEA6 /* MVKCmdDispatch.mm */; }; A9096E5F1F81E16300DFBEA6 /* MVKCmdDispatch.mm in Sources */ = {isa = PBXBuildFile; fileRef = A9096E5D1F81E16300DFBEA6 /* MVKCmdDispatch.mm */; }; A909F65F213B190700FCD6BE /* MVKExtensions.h in Headers */ = {isa = PBXBuildFile; fileRef = A909F65A213B190600FCD6BE /* MVKExtensions.h */; }; @@ -261,6 +265,8 @@ /* Begin PBXFileReference section */ 45003E6F214AD4C900E989CB /* MVKExtensions.def */ = {isa = PBXFileReference; explicitFileType = sourcecode.cpp.h; fileEncoding = 4; path = MVKExtensions.def; sourceTree = ""; }; + 83A4AD2521BD75570006C935 /* MVKVector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MVKVector.h; sourceTree = ""; }; + 83A4AD2921BD75570006C935 /* MVKVectorAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MVKVectorAllocator.h; sourceTree = ""; }; A9096E5C1F81E16300DFBEA6 /* MVKCmdDispatch.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = MVKCmdDispatch.h; sourceTree = ""; }; A9096E5D1F81E16300DFBEA6 /* MVKCmdDispatch.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MVKCmdDispatch.mm; sourceTree = ""; }; A909F65A213B190600FCD6BE /* MVKExtensions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MVKExtensions.h; sourceTree = ""; }; @@ -491,6 +497,8 @@ A98149401FB6A3F7005F00B4 /* Utility */ = { isa = PBXGroup; children = ( + 83A4AD2521BD75570006C935 /* MVKVector.h */, + 83A4AD2921BD75570006C935 /* MVKVectorAllocator.h */, A98149411FB6A3F7005F00B4 /* MVKBaseObject.cpp */, A98149421FB6A3F7005F00B4 /* MVKBaseObject.h */, A98149431FB6A3F7005F00B4 /* MVKEnvironment.h */, @@ -607,10 +615,12 @@ A9F042A61FB4CF83009FCCB8 /* MVKLogging.h in Headers */, A94FB8001C7DFB4800632CA3 /* MVKQueue.h in Headers */, A94FB7EC1C7DFB4800632CA3 /* MVKFramebuffer.h in Headers */, + 83A4AD2C21BD75570006C935 /* MVKVectorAllocator.h in Headers */, A98149611FB6A3F7005F00B4 /* MVKWatermarkShaderSource.h in Headers */, A9E53DE32100B197002781DD /* MTLSamplerDescriptor+MoltenVK.h in Headers */, A94FB8181C7DFB4800632CA3 /* MVKSync.h in Headers */, A94FB7E41C7DFB4800632CA3 /* MVKDevice.h in Headers */, + 83A4AD2A21BD75570006C935 /* MVKVector.h in Headers */, A94FB7D41C7DFB4800632CA3 /* MVKCommandPool.h in Headers */, A94FB80C1C7DFB4800632CA3 /* MVKShaderModule.h in Headers */, A94FB7C01C7DFB4800632CA3 /* MVKCmdQueries.h in Headers */, @@ -668,10 +678,12 @@ A9F042A71FB4CF83009FCCB8 /* MVKLogging.h in Headers */, A94FB8011C7DFB4800632CA3 /* MVKQueue.h in Headers */, A94FB7ED1C7DFB4800632CA3 /* MVKFramebuffer.h in Headers */, + 83A4AD2D21BD75570006C935 /* MVKVectorAllocator.h in Headers */, A98149621FB6A3F7005F00B4 /* MVKWatermarkShaderSource.h in Headers */, A9E53DE42100B197002781DD /* MTLSamplerDescriptor+MoltenVK.h in Headers */, A94FB8191C7DFB4800632CA3 /* MVKSync.h in Headers */, A94FB7E51C7DFB4800632CA3 /* MVKDevice.h in Headers */, + 83A4AD2B21BD75570006C935 /* MVKVector.h in Headers */, A94FB7D51C7DFB4800632CA3 /* MVKCommandPool.h in Headers */, A94FB80D1C7DFB4800632CA3 /* MVKShaderModule.h in Headers */, A94FB7C11C7DFB4800632CA3 /* MVKCmdQueries.h in Headers */, From b8d12f58a58d872889c07dae4a65cfe11ec32482 Mon Sep 17 00:00:00 2001 From: aerofly Date: Sun, 9 Dec 2018 17:24:29 +0100 Subject: [PATCH 05/14] Add MVKVector a std::vector compatible container with configurable stack space --- MoltenVK/MoltenVK/Utility/MVKVector.h | 1 + 1 file changed, 1 insertion(+) diff --git a/MoltenVK/MoltenVK/Utility/MVKVector.h b/MoltenVK/MoltenVK/Utility/MVKVector.h index c15448ea..93afc744 100755 --- a/MoltenVK/MoltenVK/Utility/MVKVector.h +++ b/MoltenVK/MoltenVK/Utility/MVKVector.h @@ -493,3 +493,4 @@ public: }; + From 8c1581f24716781db0beeffe33e6651479d0dd7d Mon Sep 17 00:00:00 2001 From: aerofly Date: Wed, 12 Dec 2018 16:08:22 +0100 Subject: [PATCH 06/14] Update fork to latest MoltenVK --- MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h | 2 +- MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm | 4 +- .../Commands/MVKCommandResourceFactory.mm | 4 +- MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm | 5 +- MoltenVK/MoltenVK/GPUObjects/MVKDevice.h | 11 +- MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm | 31 +- .../MoltenVK/GPUObjects/MVKDeviceMemory.mm | 2 +- MoltenVK/MoltenVK/GPUObjects/MVKImage.mm | 23 +- MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm | 9 +- MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm | 42 +- MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm | 2 +- MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm | 2 +- MoltenVK/MoltenVK/GPUObjects/MVKSync.h | 21 +- MoltenVK/MoltenVK/GPUObjects/MVKSync.mm | 87 +--- MoltenVK/MoltenVK/OS/MVKOSExtensions.h | 23 +- MoltenVK/MoltenVK/OS/MVKOSExtensions.mm | 9 +- MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm | 485 ++++++++++-------- MoltenVK/MoltenVK/Vulkan/vulkan.mm | 6 +- MoltenVK/scripts/create_dylib.sh | 8 +- 19 files changed, 429 insertions(+), 347 deletions(-) diff --git a/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h b/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h index 5dc90805..ccd9b6a8 100644 --- a/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h +++ b/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h @@ -48,7 +48,7 @@ extern "C" { */ #define MVK_VERSION_MAJOR 1 #define MVK_VERSION_MINOR 0 -#define MVK_VERSION_PATCH 28 +#define MVK_VERSION_PATCH 29 #define MVK_MAKE_VERSION(major, minor, patch) (((major) * 10000) + ((minor) * 100) + (patch)) #define MVK_VERSION MVK_MAKE_VERSION(MVK_VERSION_MAJOR, MVK_VERSION_MINOR, MVK_VERSION_PATCH) diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm b/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm index 723cfd01..18728923 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm @@ -839,13 +839,13 @@ void MVKCmdClearAttachments::encode(MVKCommandEncoder* cmdEncoder) { uint32_t caCnt = subpass->getColorAttachmentCount(); for (uint32_t caIdx = 0; caIdx < caCnt; caIdx++) { VkFormat vkAttFmt = subpass->getColorAttachmentFormat(caIdx); - _rpsKey.attachmentMTLPixelFormats[caIdx] = cmdPool->mtlPixelFormatFromVkFormat(vkAttFmt); + _rpsKey.attachmentMTLPixelFormats[caIdx] = cmdPool->getMTLPixelFormatFromVkFormat(vkAttFmt); MTLClearColor mtlCC = mvkMTLClearColorFromVkClearValue(_vkClearValues[caIdx], vkAttFmt); _clearColors[caIdx] = { (float)mtlCC.red, (float)mtlCC.green, (float)mtlCC.blue, (float)mtlCC.alpha}; } VkFormat vkAttFmt = subpass->getDepthStencilFormat(); - MTLPixelFormat mtlAttFmt = cmdPool->mtlPixelFormatFromVkFormat(vkAttFmt); + MTLPixelFormat mtlAttFmt = cmdPool->getMTLPixelFormatFromVkFormat(vkAttFmt); _rpsKey.attachmentMTLPixelFormats[kMVKAttachmentFormatDepthStencilIndex] = mtlAttFmt; bool isClearingDepth = _isClearingDepth && mvkMTLPixelFormatIsDepthFormat(mtlAttFmt); bool isClearingStencil = _isClearingStencil && mvkMTLPixelFormatIsStencilFormat(mtlAttFmt); diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm b/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm index 17bae0fa..bfbe8e4e 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm @@ -226,9 +226,9 @@ NSString* MVKCommandResourceFactory::getMTLFormatTypeString(MTLPixelFormat mtlPi switch (mvkFormatTypeFromMTLPixelFormat(mtlPixFmt)) { case kMVKFormatColorHalf: return @"half"; case kMVKFormatColorFloat: return @"float"; - case kMVKFormatColorInt8: return @"char"; - case kMVKFormatColorUInt8: return @"uchar"; + case kMVKFormatColorInt8: case kMVKFormatColorInt16: return @"short"; + case kMVKFormatColorUInt8: case kMVKFormatColorUInt16: return @"ushort"; case kMVKFormatColorInt32: return @"int"; case kMVKFormatColorUInt32: return @"uint"; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm b/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm index c346ba32..9d5ed4b3 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm @@ -155,7 +155,7 @@ id MVKBufferView::getMTLTexture() { MVKBufferView::MVKBufferView(MVKDevice* device, const VkBufferViewCreateInfo* pCreateInfo) : MVKRefCountedDeviceObject(device) { _buffer = (MVKBuffer*)pCreateInfo->buffer; _mtlBufferOffset = _buffer->getMTLBufferOffset() + pCreateInfo->offset; - _mtlPixelFormat = mtlPixelFormatFromVkFormat(pCreateInfo->format); + _mtlPixelFormat = getMTLPixelFormatFromVkFormat(pCreateInfo->format); VkExtent2D fmtBlockSize = mvkVkFormatBlockTexelSize(pCreateInfo->format); // Pixel size of format size_t bytesPerBlock = mvkVkFormatBytesPerBlock(pCreateInfo->format); _mtlTexture = nil; @@ -166,9 +166,10 @@ MVKBufferView::MVKBufferView(MVKDevice* device, const VkBufferViewCreateInfo* pC size_t blockCount = byteCount / bytesPerBlock; // But Metal requires the texture to be a 2D texture. Determine the number of 2D rows we need and their width. + // Multiple rows will automatically align with PoT max texture dimension, but need to align upwards if less than full single row. size_t maxBlocksPerRow = _device->_pMetalFeatures->maxTextureDimension / fmtBlockSize.width; size_t blocksPerRow = min(blockCount, maxBlocksPerRow); - _mtlBytesPerRow = mvkAlignByteOffset(blocksPerRow * bytesPerBlock, _device->_pProperties->limits.minTexelBufferOffsetAlignment); + _mtlBytesPerRow = mvkAlignByteOffset(blocksPerRow * bytesPerBlock, _device->getVkFormatTexelBufferAlignment(pCreateInfo->format)); size_t rowCount = blockCount / blocksPerRow; if (blockCount % blocksPerRow) { rowCount++; } diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h index 433810a6..070202a2 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h @@ -506,7 +506,10 @@ public: * * All other pixel formats are returned unchanged. */ - MTLPixelFormat mtlPixelFormatFromVkFormat(VkFormat vkFormat); + MTLPixelFormat getMTLPixelFormatFromVkFormat(VkFormat vkFormat); + + /** Returns the memory alignment required for the format when used in a texel buffer. */ + VkDeviceSize getVkFormatTexelBufferAlignment(VkFormat format); /** * Returns the MTLBuffer used to hold occlusion query results, @@ -612,12 +615,12 @@ public: * Returns the Metal MTLPixelFormat corresponding to the specified Vulkan VkFormat, * or returns MTLPixelFormatInvalid if no corresponding MTLPixelFormat exists. * - * This function delegates to the MVKDevice::mtlPixelFormatFromVkFormat() function. + * This function delegates to the MVKDevice::getMTLPixelFormatFromVkFormat() function. * See the notes for that function for more information about how MTLPixelFormats * are managed for each platform device. */ - inline MTLPixelFormat mtlPixelFormatFromVkFormat(VkFormat vkFormat) { - return _device->mtlPixelFormatFromVkFormat(vkFormat); + inline MTLPixelFormat getMTLPixelFormatFromVkFormat(VkFormat vkFormat) { + return _device->getMTLPixelFormatFromVkFormat(vkFormat); } /** Constructs an instance for the specified device. */ diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm index cd9d29de..9aba1e73 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm @@ -524,15 +524,21 @@ void MVKPhysicalDevice::initMetalFeatures() { _metalFeatures.dynamicMTLBuffers = true; _metalFeatures.maxTextureDimension = (8 * KIBI); } + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily1_v3] ) { _metalFeatures.mslVersion = SPIRVToMSLConverterOptions::makeMSLVersion(1, 2); _metalFeatures.shaderSpecialization = true; _metalFeatures.stencilViews = true; } + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily1_v4] ) { _metalFeatures.mslVersion = SPIRVToMSLConverterOptions::makeMSLVersion(2); } + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily1_v5] ) { + _metalFeatures.mslVersion = SPIRVToMSLConverterOptions::makeMSLVersion(2, 1); + } + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily3_v1] ) { _metalFeatures.indirectDrawing = true; _metalFeatures.baseVertexInstanceDrawing = true; @@ -566,6 +572,7 @@ void MVKPhysicalDevice::initMetalFeatures() { } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_macOS_GPUFamily1_v4] ) { + _metalFeatures.mslVersion = SPIRVToMSLConverterOptions::makeMSLVersion(2, 1); _metalFeatures.multisampleArrayTextures = true; } @@ -1204,20 +1211,26 @@ void MVKPhysicalDevice::logGPUInfo() { logMsg += "\n\t\tvendorID: %#06x"; logMsg += "\n\t\tdeviceID: %#06x"; logMsg += "\n\t\tpipelineCacheUUID: %s"; - logMsg += "\n\tsupports the following Metal Feature Sets:"; + logMsg += "\n\tsupports Metal Shading Language version %s and the following Metal Feature Sets:"; #if MVK_IOS + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily5_v1] ) { logMsg += "\n\t\tiOS GPU Family 5 v1"; } + + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily4_v2] ) { logMsg += "\n\t\tiOS GPU Family 4 v2"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily4_v1] ) { logMsg += "\n\t\tiOS GPU Family 4 v1"; } + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily3_v4] ) { logMsg += "\n\t\tiOS GPU Family 3 v4"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily3_v3] ) { logMsg += "\n\t\tiOS GPU Family 3 v3"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily3_v2] ) { logMsg += "\n\t\tiOS GPU Family 3 v2"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily3_v1] ) { logMsg += "\n\t\tiOS GPU Family 3 v1"; } + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily2_v5] ) { logMsg += "\n\t\tiOS GPU Family 2 v5"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily2_v4] ) { logMsg += "\n\t\tiOS GPU Family 2 v4"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily2_v3] ) { logMsg += "\n\t\tiOS GPU Family 2 v3"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily2_v2] ) { logMsg += "\n\t\tiOS GPU Family 2 v2"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily2_v1] ) { logMsg += "\n\t\tiOS GPU Family 2 v1"; } + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily1_v5] ) { logMsg += "\n\t\tiOS GPU Family 1 v5"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily1_v4] ) { logMsg += "\n\t\tiOS GPU Family 1 v4"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily1_v3] ) { logMsg += "\n\t\tiOS GPU Family 1 v3"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily1_v2] ) { logMsg += "\n\t\tiOS GPU Family 1 v2"; } @@ -1225,13 +1238,20 @@ void MVKPhysicalDevice::logGPUInfo() { #endif #if MVK_MACOS + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_macOS_GPUFamily2_v1] ) { logMsg += "\n\t\tmacOS GPU Family 2 v1"; } + + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_macOS_GPUFamily1_v4] ) { logMsg += "\n\t\tmacOS GPU Family 1 v4"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_macOS_GPUFamily1_v3] ) { logMsg += "\n\t\tmacOS GPU Family 1 v3"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_macOS_GPUFamily1_v2] ) { logMsg += "\n\t\tmacOS GPU Family 1 v2"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_macOS_GPUFamily1_v1] ) { logMsg += "\n\t\tmacOS GPU Family 1 v1"; } + + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_macOS_ReadWriteTextureTier2] ) { logMsg += "\n\t\tmacOS Read-Write Texture Tier 2"; } + #endif MVKLogInfo(logMsg.c_str(), _properties.deviceName, devTypeStr.c_str(), _properties.vendorID, _properties.deviceID, - [[[NSUUID alloc] initWithUUIDBytes: _properties.pipelineCacheUUID] autorelease].UUIDString.UTF8String); + [[[NSUUID alloc] initWithUUIDBytes: _properties.pipelineCacheUUID] autorelease].UUIDString.UTF8String, + SPIRVToMSLConverterOptions::printMSLVersion(_metalFeatures.mslVersion).c_str()); } // Initializes the queue families supported by this instance. @@ -1684,7 +1704,7 @@ uint32_t MVKDevice::getMetalBufferIndexForVertexAttributeBinding(uint32_t bindin return ((_pMetalFeatures->maxPerStageBufferCount - 1) - binding); } -MTLPixelFormat MVKDevice::mtlPixelFormatFromVkFormat(VkFormat vkFormat) { +MTLPixelFormat MVKDevice::getMTLPixelFormatFromVkFormat(VkFormat vkFormat) { MTLPixelFormat mtlPixFmt = mvkMTLPixelFormatFromVkFormat(vkFormat); #if MVK_MACOS if (mtlPixFmt == MTLPixelFormatDepth24Unorm_Stencil8 && @@ -1695,6 +1715,11 @@ MTLPixelFormat MVKDevice::mtlPixelFormatFromVkFormat(VkFormat vkFormat) { return mtlPixFmt; } +VkDeviceSize MVKDevice::getVkFormatTexelBufferAlignment(VkFormat format) { + VkDeviceSize deviceAlignment = mvkMTLPixelFormatLinearTextureAlignment(getMTLPixelFormatFromVkFormat(format), getMTLDevice()); + return deviceAlignment ? deviceAlignment : _pProperties->limits.minTexelBufferOffsetAlignment; +} + id MVKDevice::getGlobalVisibilityResultMTLBuffer() { lock_guard lock(_vizLock); return _globalVisibilityResultMTLBuffer; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm index d1aac446..99ec0e65 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm @@ -38,7 +38,7 @@ VkResult MVKDeviceMemory::map(VkDeviceSize offset, VkDeviceSize size, VkMemoryMa return mvkNotifyErrorWithText(VK_ERROR_MEMORY_MAP_FAILED, "Memory is already mapped. Call vkUnmapMemory() first."); } - if ( !ensureHostMemory() ) { + if ( !ensureMTLBuffer() && !ensureHostMemory() ) { return mvkNotifyErrorWithText(VK_ERROR_OUT_OF_HOST_MEMORY, "Could not allocate %llu bytes of host-accessible device memory.", _allocationSize); } diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm index b4a3f56b..5d3b1285 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm @@ -365,14 +365,27 @@ VkResult MVKImage::useIOSurface(IOSurfaceRef ioSurface) { MTLTextureUsage MVKImage::getMTLTextureUsage() { MTLTextureUsage usage = mvkMTLTextureUsageFromVkImageUsageFlags(_usage); + // If this is a depth/stencil texture, and the device supports it, tell // Metal we may create texture views of this, too. if ((_mtlPixelFormat == MTLPixelFormatDepth32Float_Stencil8 #if MVK_MACOS || _mtlPixelFormat == MTLPixelFormatDepth24Unorm_Stencil8 #endif - ) && _device->_pMetalFeatures->stencilViews) + ) && _device->_pMetalFeatures->stencilViews) { mvkEnableFlag(usage, MTLTextureUsagePixelFormatView); + } + + // If this format doesn't support being blitted to, and the usage + // doesn't specify use as an attachment, turn off + // MTLTextureUsageRenderTarget. + VkFormatProperties props; + _device->getPhysicalDevice()->getFormatProperties(getVkFormat(), &props); + if (!mvkAreFlagsEnabled(_isLinear ? props.linearTilingFeatures : props.optimalTilingFeatures, VK_FORMAT_FEATURE_BLIT_DST_BIT) && + !mvkIsAnyFlagEnabled(_usage, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { + mvkDisableFlag(usage, MTLTextureUsageRenderTarget); + } + return usage; } @@ -481,8 +494,6 @@ void MVKImage::getMTLTextureContent(MVKImageSubresource& subresource, MVKImage::MVKImage(MVKDevice* device, const VkImageCreateInfo* pCreateInfo) : MVKResource(device) { - _byteAlignment = _device->_pProperties->limits.minTexelBufferOffsetAlignment; - if (pCreateInfo->flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT) { mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : Metal may not allow uncompressed views of compressed images."); } @@ -509,7 +520,7 @@ MVKImage::MVKImage(MVKDevice* device, const VkImageCreateInfo* pCreateInfo) : MV _mtlTexture = nil; _ioSurface = nil; - _mtlPixelFormat = mtlPixelFormatFromVkFormat(pCreateInfo->format); + _mtlPixelFormat = getMTLPixelFormatFromVkFormat(pCreateInfo->format); _mtlTextureType = mvkMTLTextureTypeFromVkImageType(pCreateInfo->imageType, _arrayLayers, (pCreateInfo->samples > 1)); @@ -538,6 +549,8 @@ MVKImage::MVKImage(MVKDevice* device, const VkImageCreateInfo* pCreateInfo) : MV _isLinear = validateLinear(pCreateInfo); _usesTexelBuffer = false; + _byteAlignment = _isLinear ? _device->getVkFormatTexelBufferAlignment(pCreateInfo->format) : mvkEnsurePowerOfTwo(mvkVkFormatBytesPerBlock(pCreateInfo->format)); + // Calc _byteCount after _mtlTexture & _byteAlignment for (uint32_t mipLvl = 0; mipLvl < _mipLevels; mipLvl++) { _byteCount += getBytesPerLayer(mipLvl) * _extent.depth * _arrayLayers; @@ -763,7 +776,7 @@ void MVKImageView::validateImageViewConfig(const VkImageViewCreateInfo* pCreateI // alignments of existing MTLPixelFormats of the same structure. If swizzling is not possible for a // particular combination of format and swizzle spec, the original MTLPixelFormat is returned. MTLPixelFormat MVKImageView::getSwizzledMTLPixelFormat(VkFormat format, VkComponentMapping components, bool& useSwizzle) { - MTLPixelFormat mtlPF = mtlPixelFormatFromVkFormat(format); + MTLPixelFormat mtlPF = getMTLPixelFormatFromVkFormat(format); useSwizzle = false; switch (mtlPF) { diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm b/MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm index 3acd3d47..462fe40f 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm @@ -137,6 +137,10 @@ MVKInstance::MVKInstance(const VkInstanceCreateInfo* pCreateInfo) { if (_physicalDevices.empty()) { setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_INCOMPATIBLE_DRIVER, "Vulkan is not supported on this device. MoltenVK requires Metal, which is not available on this device.")); } + + string logMsg = "Created VkInstance with the following Vulkan extensions enabled:"; + logMsg += _enabledExtensions.enabledNamesString("\n\t\t", true); + MVKLogInfo("%s", logMsg.c_str()); } #define ADD_PROC_ADDR(entrypoint) _procAddrMap[""#entrypoint] = (PFN_vkVoidFunction)&entrypoint; @@ -339,12 +343,9 @@ void MVKInstance::logVersions() { char vkVer[buffLen]; vkGetVersionStringsMVK(mvkVer, buffLen, vkVer, buffLen); - const char* indent = "\n\t\t"; string logMsg = "MoltenVK version %s. Vulkan version %s."; logMsg += "\n\tThe following Vulkan extensions are supported:"; - logMsg += getDriverLayer()->getSupportedExtensions()->enabledNamesString(indent, true); - logMsg += "\n\tCreated VkInstance with the following Vulkan extensions enabled:"; - logMsg += _enabledExtensions.enabledNamesString(indent, true); + logMsg += getDriverLayer()->getSupportedExtensions()->enabledNamesString("\n\t\t", true); MVKLogInfo(logMsg.c_str(), mvkVer, vkVer); } diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm index ba4ce43a..ef6f522e 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm @@ -304,6 +304,7 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor SPIRVToMSLConverterContext shaderContext; initMVKShaderConverterContext(shaderContext, pCreateInfo); + auto* mvkLayout = (MVKPipelineLayout*)pCreateInfo->layout; _auxBufferIndex = mvkLayout->getAuxBufferIndex(); uint32_t auxBufferSize = sizeof(uint32_t) * mvkLayout->getTextureCount(); @@ -316,18 +317,14 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor uint32_t vbCnt = pCreateInfo->pVertexInputState->vertexBindingDescriptionCount; - // Add shader stages + // Add shader stages. Compile vertex shder before others just in case conversion changes anything...like rasterizaion disable. for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { const VkPipelineShaderStageCreateInfo* pSS = &pCreateInfo->pStages[i]; - shaderContext.options.entryPointName = pSS->pName; - - MVKShaderModule* mvkShdrMod = (MVKShaderModule*)pSS->module; - - // Vertex shader if (mvkAreFlagsEnabled(pSS->stage, VK_SHADER_STAGE_VERTEX_BIT)) { shaderContext.options.entryPointStage = spv::ExecutionModelVertex; shaderContext.options.auxBufferIndex = _auxBufferIndex.vertex; - id mtlFunction = mvkShdrMod->getMTLFunction(&shaderContext, pSS->pSpecializationInfo, _pipelineCache).mtlFunction; + shaderContext.options.entryPointName = pSS->pName; + id mtlFunction = ((MVKShaderModule*)pSS->module)->getMTLFunction(&shaderContext, pSS->pSpecializationInfo, _pipelineCache).mtlFunction; if ( !mtlFunction ) { setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_INITIALIZATION_FAILED, "Vertex shader function could not be compiled into pipeline. See previous error.")); return nil; @@ -335,22 +332,25 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor plDesc.vertexFunction = mtlFunction; plDesc.rasterizationEnabled = !shaderContext.options.isRasterizationDisabled; _needsVertexAuxBuffer = shaderContext.options.needsAuxBuffer; - // If we need the auxiliary buffer and there's no place to put it, - // we're in serious trouble. + // If we need the auxiliary buffer and there's no place to put it, we're in serious trouble. if (_needsVertexAuxBuffer && (_auxBufferIndex.vertex == ~0u || _auxBufferIndex.vertex >= _device->_pMetalFeatures->maxPerStageBufferCount - vbCnt) ) { setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_INITIALIZATION_FAILED, "Vertex shader requires auxiliary buffer, but there is no free slot to pass it.")); return nil; } } + } // bug fix by aerofly -> if no fragment shader is used and _needsFragmentAuxBuffer was true newBufferWithLength was trying to allocate zero bytes // please verify this fix _needsFragmentAuxBuffer = false; - // Fragment shader - if (mvkAreFlagsEnabled(pSS->stage, VK_SHADER_STAGE_FRAGMENT_BIT)) { + // Fragment shader - only add if rasterization is enabled + for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { + const VkPipelineShaderStageCreateInfo* pSS = &pCreateInfo->pStages[i]; + if (mvkAreFlagsEnabled(pSS->stage, VK_SHADER_STAGE_FRAGMENT_BIT) && !shaderContext.options.isRasterizationDisabled) { shaderContext.options.entryPointStage = spv::ExecutionModelFragment; shaderContext.options.auxBufferIndex = _auxBufferIndex.fragment; - id mtlFunction = mvkShdrMod->getMTLFunction(&shaderContext, pSS->pSpecializationInfo, _pipelineCache).mtlFunction; + shaderContext.options.entryPointName = pSS->pName; + id mtlFunction = ((MVKShaderModule*)pSS->module)->getMTLFunction(&shaderContext, pSS->pSpecializationInfo, _pipelineCache).mtlFunction; if ( !mtlFunction ) { setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_INITIALIZATION_FAILED, "Fragment shader function could not be compiled into pipeline. See previous error.")); } @@ -360,8 +360,8 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_INITIALIZATION_FAILED, "Fragment shader requires auxiliary buffer, but there is no free slot to pass it.")); return nil; } - } - } + } + } if (_needsVertexAuxBuffer || _needsFragmentAuxBuffer) { _auxBuffer = [_device->getMTLDevice() newBufferWithLength: auxBufferSize options: MTLResourceStorageModeShared]; // retained @@ -415,7 +415,7 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor const VkPipelineColorBlendAttachmentState* pCA = &pCreateInfo->pColorBlendState->pAttachments[caIdx]; MTLRenderPipelineColorAttachmentDescriptor* colorDesc = plDesc.colorAttachments[caIdx]; - colorDesc.pixelFormat = mtlPixelFormatFromVkFormat(mvkRenderSubpass->getColorAttachmentFormat(caIdx)); + colorDesc.pixelFormat = getMTLPixelFormatFromVkFormat(mvkRenderSubpass->getColorAttachmentFormat(caIdx)); colorDesc.writeMask = mvkMTLColorWriteMaskFromVkChannelFlags(pCA->colorWriteMask); // Don't set the blend state if we're not using this attachment. // The pixel format will be MTLPixelFormatInvalid in that case, and @@ -434,16 +434,14 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor } // Depth & stencil attachments - MTLPixelFormat mtlDSFormat = mtlPixelFormatFromVkFormat(mvkRenderSubpass->getDepthStencilFormat()); + MTLPixelFormat mtlDSFormat = getMTLPixelFormatFromVkFormat(mvkRenderSubpass->getDepthStencilFormat()); if (mvkMTLPixelFormatIsDepthFormat(mtlDSFormat)) { plDesc.depthAttachmentPixelFormat = mtlDSFormat; } if (mvkMTLPixelFormatIsStencilFormat(mtlDSFormat)) { plDesc.stencilAttachmentPixelFormat = mtlDSFormat; } - // In Vulkan, it's perfectly valid to do rasterization with no attachments. - // Not so in Metal. If we have no attachments and rasterization is enabled, - // then we'll have to add a dummy attachment. - if (plDesc.rasterizationEnabled && !caCnt && - !mvkMTLPixelFormatIsDepthFormat(mtlDSFormat) && - !mvkMTLPixelFormatIsStencilFormat(mtlDSFormat)) { + // In Vulkan, it's perfectly valid to render with no attachments. Not so + // in Metal. If we have no attachments, then we'll have to add a dummy + // attachment. + if (!caCnt && !mvkMTLPixelFormatIsDepthFormat(mtlDSFormat) && !mvkMTLPixelFormatIsStencilFormat(mtlDSFormat)) { MTLRenderPipelineColorAttachmentDescriptor* colorDesc = plDesc.colorAttachments[0]; colorDesc.pixelFormat = MTLPixelFormatR8Unorm; colorDesc.writeMask = MTLColorWriteMaskNone; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm b/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm index 3dde4c89..0a63201c 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm @@ -178,7 +178,7 @@ void MVKRenderSubpass::populateClearAttachments(vector& clear cAtt.colorAttachment = 0; cAtt.clearValue = clearValues[attIdx]; - MTLPixelFormat mtlDSFmt = _renderPass->mtlPixelFormatFromVkFormat(getDepthStencilFormat()); + MTLPixelFormat mtlDSFmt = _renderPass->getMTLPixelFormatFromVkFormat(getDepthStencilFormat()); if (mvkMTLPixelFormatIsDepthFormat(mtlDSFmt)) { cAtt.aspectMask |= VK_IMAGE_ASPECT_DEPTH_BIT; } if (mvkMTLPixelFormatIsStencilFormat(mtlDSFmt)) { cAtt.aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT; } if (cAtt.aspectMask) { clearAtts.push_back(cAtt); } diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm b/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm index 970e32ae..5bc4f23e 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm @@ -192,7 +192,7 @@ void MVKSwapchain::initCAMetalLayer(const VkSwapchainCreateInfoKHR* pCreateInfo) MVKSurface* mvkSrfc = (MVKSurface*)pCreateInfo->surface; _mtlLayer = mvkSrfc->getCAMetalLayer(); _mtlLayer.device = getMTLDevice(); - _mtlLayer.pixelFormat = mtlPixelFormatFromVkFormat(pCreateInfo->imageFormat); + _mtlLayer.pixelFormat = getMTLPixelFormatFromVkFormat(pCreateInfo->imageFormat); _mtlLayer.displaySyncEnabledMVK = (pCreateInfo->presentMode != VK_PRESENT_MODE_IMMEDIATE_KHR); _mtlLayer.magnificationFilter = _device->_pMVKConfig->swapchainMagFilterUseNearest ? kCAFilterNearest : kCAFilterLinear; _mtlLayer.framebufferOnly = !mvkIsAnyFlagEnabled(pCreateInfo->imageUsage, (VK_IMAGE_USAGE_TRANSFER_SRC_BIT | diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKSync.h b/MoltenVK/MoltenVK/GPUObjects/MVKSync.h index 14a01c1a..07b8b9d9 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKSync.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKSync.h @@ -87,7 +87,6 @@ public: private: bool operator()(); - inline void reserveImpl() { _reservationCount++; } // Not thread-safe inline bool isClear() { return _reservationCount == 0; } // Not thread-safe std::mutex _lock; @@ -166,10 +165,8 @@ public: #pragma mark Construction - MVKFence(MVKDevice* device, const VkFenceCreateInfo* pCreateInfo) : MVKRefCountedDeviceObject(device), - _isSignaled(mvkAreFlagsEnabled(pCreateInfo->flags, VK_FENCE_CREATE_SIGNALED_BIT)) {} - - ~MVKFence() override; + MVKFence(MVKDevice* device, const VkFenceCreateInfo* pCreateInfo) : + MVKRefCountedDeviceObject(device), _isSignaled(mvkAreFlagsEnabled(pCreateInfo->flags, VK_FENCE_CREATE_SIGNALED_BIT)) {} protected: void notifySitters(); @@ -198,25 +195,19 @@ public: * * Returns true if the required fences were triggered, or false if the timeout interval expired. */ - bool wait(uint64_t timeout = UINT64_MAX); + bool wait(uint64_t timeout = UINT64_MAX) { return _blocker.wait(timeout); } #pragma mark Construction - /** Constructs an instance with the specified type of waiting. */ - MVKFenceSitter(bool waitAll = true) : _blocker(waitAll, 0) {} - - ~MVKFenceSitter() override; + MVKFenceSitter(bool waitAll) : _blocker(waitAll, 0) {} private: friend class MVKFence; - void addUnsignaledFence(MVKFence* fence); - void fenceSignaled(MVKFence* fence); - void getUnsignaledFences(std::vector& fences); + void awaitFence(MVKFence* fence) { _blocker.reserve(); } + void fenceSignaled(MVKFence* fence) { _blocker.release(); } - std::mutex _lock; - std::unordered_set _unsignaledFences; MVKSemaphoreImpl _blocker; }; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKSync.mm b/MoltenVK/MoltenVK/GPUObjects/MVKSync.mm index a4f1a1b9..9cc01105 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKSync.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKSync.mm @@ -41,7 +41,7 @@ void MVKSemaphoreImpl::release() { void MVKSemaphoreImpl::reserve() { lock_guard lock(_lock); - reserveImpl(); + _reservationCount++; } bool MVKSemaphoreImpl::wait(uint64_t timeout, bool reserveAgain) { @@ -60,7 +60,7 @@ bool MVKSemaphoreImpl::wait(uint64_t timeout, bool reserveAgain) { isDone = _blocker.wait_for(lock, nanos, [this]{ return isClear(); }); } - if (reserveAgain) { reserveImpl(); } + if (reserveAgain) { _reservationCount++; } return isDone; } @@ -85,17 +85,18 @@ void MVKSemaphore::signal() { void MVKFence::addSitter(MVKFenceSitter* fenceSitter) { lock_guard lock(_lock); - // Sitters only care about unsignaled fences. If already signaled, - // don't add myself to the sitter and don't notify the sitter. + // We only care about unsignaled fences. If already signaled, + // don't add myself to the sitter and don't signal the sitter. if (_isSignaled) { return; } // Ensure each fence only added once to each fence sitter auto addRslt = _fenceSitters.insert(fenceSitter); // pair with second element true if was added - if (addRslt.second) { fenceSitter->addUnsignaledFence(this); } + if (addRslt.second) { fenceSitter->awaitFence(this); } } void MVKFence::removeSitter(MVKFenceSitter* fenceSitter) { lock_guard lock(_lock); + _fenceSitters.erase(fenceSitter); } @@ -114,72 +115,18 @@ void MVKFence::signal() { void MVKFence::reset() { lock_guard lock(_lock); + _isSignaled = false; _fenceSitters.clear(); } bool MVKFence::getIsSignaled() { lock_guard lock(_lock); + return _isSignaled; } -#pragma mark Construction - -MVKFence::~MVKFence() { - lock_guard lock(_lock); - for (auto& fs : _fenceSitters) { - fs->fenceSignaled(this); - } -} - - -#pragma mark - -#pragma mark MVKFenceSitter - -void MVKFenceSitter::addUnsignaledFence(MVKFence* fence) { - lock_guard lock(_lock); - // Only reserve semaphore once per fence - auto addRslt = _unsignaledFences.insert(fence); // pair with second element true if was added - if (addRslt.second) { _blocker.reserve(); } -} - -void MVKFenceSitter::fenceSignaled(MVKFence* fence) { - lock_guard lock(_lock); - // Only release semaphore if actually waiting for this fence - if (_unsignaledFences.erase(fence)) { _blocker.release(); } -} - -bool MVKFenceSitter::wait(uint64_t timeout) { - bool isDone = _blocker.wait(timeout); - if ( !isDone && timeout > 0 ) { mvkNotifyErrorWithText(VK_TIMEOUT, "Vulkan fence timeout after %llu nanoseconds.", timeout); } - return isDone; -} - - -#pragma mark Construction - -MVKFenceSitter::~MVKFenceSitter() { - // Use copy of collection to avoid deadlocks with the fences if lock in place here when removing sitters - vector ufsCopy; - getUnsignaledFences(ufsCopy); - for (auto& uf : ufsCopy) { - uf->removeSitter(this); - } -} - -// Fills the vector with the collection of unsignaled fences -void MVKFenceSitter::getUnsignaledFences(vector& fences) { - fences.clear(); - - lock_guard lock(_lock); - fences.reserve(_unsignaledFences.size()); - for (auto& uf : _unsignaledFences) { - fences.push_back(uf); - } -} - - #pragma mark - #pragma mark Support functions @@ -190,18 +137,28 @@ VkResult mvkResetFences(uint32_t fenceCount, const VkFence* pFences) { return VK_SUCCESS; } +// Create a blocking fence sitter, add it to each fence, wait, then remove it. VkResult mvkWaitForFences(uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout) { - // Create a blocking fence sitter and add it to each fence + VkResult rslt = VK_SUCCESS; MVKFenceSitter fenceSitter(waitAll); + for (uint32_t i = 0; i < fenceCount; i++) { - MVKFence* mvkFence = (MVKFence*)pFences[i]; - mvkFence->addSitter(&fenceSitter); + ((MVKFence*)pFences[i])->addSitter(&fenceSitter); } - return fenceSitter.wait(timeout) ? VK_SUCCESS : VK_TIMEOUT; + + if ( !fenceSitter.wait(timeout) && timeout > 0 ) { + rslt = mvkNotifyErrorWithText(VK_TIMEOUT, "Vulkan fence timeout after %llu nanoseconds.", timeout); + } + + for (uint32_t i = 0; i < fenceCount; i++) { + ((MVKFence*)pFences[i])->removeSitter(&fenceSitter); + } + + return rslt; } diff --git a/MoltenVK/MoltenVK/OS/MVKOSExtensions.h b/MoltenVK/MoltenVK/OS/MVKOSExtensions.h index f2e520b1..58cd4ecb 100644 --- a/MoltenVK/MoltenVK/OS/MVKOSExtensions.h +++ b/MoltenVK/MoltenVK/OS/MVKOSExtensions.h @@ -57,6 +57,15 @@ double mvkGetTimestampPeriod(); */ double mvkGetElapsedMilliseconds(uint64_t startTimestamp = 0, uint64_t endTimestamp = 0); +/** Ensures the block is executed on the main thread. */ +inline void mvkDispatchToMainAndWait(dispatch_block_t block) { + if (NSThread.isMainThread) { + block(); + } else { + dispatch_sync(dispatch_get_main_queue(), block); + } +} + #pragma mark - #pragma mark MTLDevice @@ -67,11 +76,9 @@ uint64_t mvkRecommendedMaxWorkingSetSize(id mtlDevice); /** Populate the propertes with info about the GPU represented by the MTLDevice. */ void mvkPopulateGPUInfo(VkPhysicalDeviceProperties& devProps, id mtlDevice); -/** Ensures the block is executed on the main thread. */ -inline void mvkDispatchToMainAndWait(dispatch_block_t block) { - if (NSThread.isMainThread) { - block(); - } else { - dispatch_sync(dispatch_get_main_queue(), block); - } -} +/** + * If the MTLDevice defines a texture memory alignment for the format, it is retrieved from + * the MTLDevice and returned, or returns zero if the MTLDevice does not define an alignment. + * The format must support linear texture memory (must not be depth, stencil, or compressed). + */ +VkDeviceSize mvkMTLPixelFormatLinearTextureAlignment(MTLPixelFormat mtlPixelFormat, id mtlDevice); diff --git a/MoltenVK/MoltenVK/OS/MVKOSExtensions.mm b/MoltenVK/MoltenVK/OS/MVKOSExtensions.mm index 94092f78..603558e4 100644 --- a/MoltenVK/MoltenVK/OS/MVKOSExtensions.mm +++ b/MoltenVK/MoltenVK/OS/MVKOSExtensions.mm @@ -188,4 +188,11 @@ void mvkPopulateGPUInfo(VkPhysicalDeviceProperties& devProps, id mtlD } #endif //MVK_IOS - +VkDeviceSize mvkMTLPixelFormatLinearTextureAlignment(MTLPixelFormat mtlPixelFormat, + id mtlDevice) { + if ([mtlDevice respondsToSelector: @selector(minimumLinearTextureAlignmentForPixelFormat:)]) { + return [mtlDevice minimumLinearTextureAlignmentForPixelFormat: mtlPixelFormat]; + } else { + return 0; + } +} diff --git a/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm b/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm index 764ac4bc..d6505385 100644 --- a/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm +++ b/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm @@ -34,13 +34,13 @@ using namespace std; #define MVK_FMT_IMAGE_FEATS (VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT \ | VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT \ | VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT \ + | VK_FORMAT_FEATURE_BLIT_SRC_BIT \ | VK_FORMAT_FEATURE_TRANSFER_SRC_BIT \ | VK_FORMAT_FEATURE_TRANSFER_DST_BIT) #define MVK_FMT_COLOR_INTEGER_FEATS (MVK_FMT_IMAGE_FEATS \ | VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT \ | VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT \ - | VK_FORMAT_FEATURE_BLIT_SRC_BIT \ | VK_FORMAT_FEATURE_BLIT_DST_BIT) #define MVK_FMT_COLOR_FEATS (MVK_FMT_COLOR_INTEGER_FEATS | VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT) @@ -61,13 +61,21 @@ using namespace std; # define MVK_FMT_DEPTH_FEATS (MVK_FMT_STENCIL_FEATS | VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT) #endif +#define MVK_FMT_COMPRESSED_FEATS (VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT \ + | VK_FORMAT_FEATURE_TRANSFER_SRC_BIT \ + | VK_FORMAT_FEATURE_TRANSFER_DST_BIT \ + | VK_FORMAT_FEATURE_BLIT_SRC_BIT \ + | VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT) + #if MVK_MACOS // macOS does not support linear images as framebuffer attachments. -#define MVK_FMT_LINEAR_TILING_FEATS (MVK_FMT_IMAGE_FEATS \ - | VK_FORMAT_FEATURE_BLIT_SRC_BIT \ - | VK_FORMAT_FEATURE_BLIT_DST_BIT) +#define MVK_FMT_LINEAR_TILING_FEATS MVK_FMT_IMAGE_FEATS + +// macOS also does not support E5B9G9R9 for anything but filtering. +#define MVK_FMT_E5B9G9R9_FEATS MVK_FMT_COMPRESSED_FEATS #else #define MVK_FMT_LINEAR_TILING_FEATS MVK_FMT_COLOR_FEATS +#define MVK_FMT_E5B9G9R9_FEATS MVK_FMT_COLOR_FEATS #endif #define MVK_FMT_BUFFER_FEATS (VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT \ @@ -76,10 +84,18 @@ using namespace std; #define MVK_FMT_BUFFER_VTX_FEATS (MVK_FMT_BUFFER_FEATS | VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) +#define MVK_FMT_BUFFER_RDONLY_FEATS (VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) + +#if MVK_MACOS +#define MVK_FMT_E5B9G9R9_BUFFER_FEATS MVK_FMT_BUFFER_RDONLY_FEATS +#else +#define MVK_FMT_E5B9G9R9_BUFFER_FEATS MVK_FMT_BUFFER_FEATS +#endif + #define MVK_FMT_NO_FEATS 0 -#define MVK_MAKE_FMT_STRUCT(VK_FMT, MTL_FMT, MTL_FMT_ALT, IOS_SINCE, MACOS_SINCE, BLK_W, BLK_H, BLK_BYTE_CNT, MTL_VTX_FMT, CLR_TYPE, PIXEL_FEATS, BUFFER_FEATS) \ - { VK_FMT, MTL_FMT, MTL_FMT_ALT, IOS_SINCE, MACOS_SINCE, { BLK_W, BLK_H }, BLK_BYTE_CNT, MTL_VTX_FMT, CLR_TYPE, { (PIXEL_FEATS & MVK_FMT_LINEAR_TILING_FEATS), PIXEL_FEATS, BUFFER_FEATS }, #VK_FMT, #MTL_FMT } +#define MVK_MAKE_FMT_STRUCT(VK_FMT, MTL_FMT, MTL_FMT_ALT, IOS_SINCE, MACOS_SINCE, BLK_W, BLK_H, BLK_BYTE_CNT, MTL_VTX_FMT, MTL_VTX_FMT_ALT, VTX_IOS_SINCE, VTX_MACOS_SINCE, CLR_TYPE, PIXEL_FEATS, BUFFER_FEATS) \ + { VK_FMT, MTL_FMT, MTL_FMT_ALT, IOS_SINCE, MACOS_SINCE, { BLK_W, BLK_H }, BLK_BYTE_CNT, MTL_VTX_FMT, MTL_VTX_FMT_ALT, VTX_IOS_SINCE, VTX_MACOS_SINCE, CLR_TYPE, { (PIXEL_FEATS & MVK_FMT_LINEAR_TILING_FEATS), PIXEL_FEATS, BUFFER_FEATS }, #VK_FMT, #MTL_FMT } #pragma mark Texture formats @@ -95,6 +111,9 @@ typedef struct { VkExtent2D blockTexelSize; uint32_t bytesPerBlock; MTLVertexFormat mtlVertexFormat; + MTLVertexFormat mtlVertexFormatSubstitute; + MVKOSVersion vertexSinceIOSVersion; + MVKOSVersion vertexSinceMacOSVersion; MVKFormatType formatType; VkFormatProperties properties; const char* vkName; @@ -112,6 +131,17 @@ typedef struct { } inline bool isSupported() const { return (mtl != MTLPixelFormatInvalid) && (mvkOSVersion() >= sinceOSVersion()); }; inline bool isSupportedOrSubstitutable() const { return isSupported() || (mtlSubstitute != MTLPixelFormatInvalid); }; + + inline MVKOSVersion vertexSinceOSVersion() const { +#if MVK_IOS + return vertexSinceIOSVersion; +#endif +#if MVK_MACOS + return vertexSinceMacOSVersion; +#endif + } + inline bool vertexIsSupported() const { return (mtlVertexFormat != MTLVertexFormatInvalid) && (mvkOSVersion() >= vertexSinceOSVersion()); }; + inline bool vertexIsSupportedOrSubstitutable() const { return vertexIsSupported() || (mtlVertexFormatSubstitute != MTLVertexFormatInvalid); }; } MVKFormatDesc; /** Mapping between Vulkan and Metal pixel formats. */ @@ -198,249 +228,250 @@ typedef struct { static const MVKFormatDesc _formatDescriptions[] { - MVK_MAKE_FMT_STRUCT( VK_FORMAT_UNDEFINED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 0, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_UNDEFINED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 0, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R4G4_UNORM_PACK8, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 1, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R4G4B4A4_UNORM_PACK16, MTLPixelFormatABGR4Unorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), // Vulkan packed is reversed - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B4G4R4A4_UNORM_PACK16, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R4G4_UNORM_PACK8, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 1, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R4G4B4A4_UNORM_PACK16, MTLPixelFormatABGR4Unorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), // Vulkan packed is reversed + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B4G4R4A4_UNORM_PACK16, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R5G6B5_UNORM_PACK16, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B5G6R5_UNORM_PACK16, MTLPixelFormatB5G6R5Unorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R5G5B5A1_UNORM_PACK16, MTLPixelFormatA1BGR5Unorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B5G5R5A1_UNORM_PACK16, MTLPixelFormatBGR5A1Unorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A1R5G5B5_UNORM_PACK16, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R5G6B5_UNORM_PACK16, MTLPixelFormatB5G6R5Unorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), // Vulkan packed is reversed + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B5G6R5_UNORM_PACK16, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R5G5B5A1_UNORM_PACK16, MTLPixelFormatA1BGR5Unorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), // Vulkan packed is reversed + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B5G5R5A1_UNORM_PACK16, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A1R5G5B5_UNORM_PACK16, MTLPixelFormatBGR5A1Unorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), // Vulkan packed is reversed - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_UNORM, MTLPixelFormatR8Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 1, MTLVertexFormatUChar2Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_SNORM, MTLPixelFormatR8Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 1, MTLVertexFormatChar2Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 1, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 1, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_UINT, MTLPixelFormatR8Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 1, MTLVertexFormatUChar2, kMVKFormatColorUInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_SINT, MTLPixelFormatR8Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 1, MTLVertexFormatChar2, kMVKFormatColorInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_SRGB, MTLPixelFormatR8Unorm_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 1, 1, 1, MTLVertexFormatUChar2, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_UNORM, MTLPixelFormatR8Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 1, MTLVertexFormatUCharNormalized, MTLVertexFormatUChar2Normalized, 11.0, 10.13, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_SNORM, MTLPixelFormatR8Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 1, MTLVertexFormatCharNormalized, MTLVertexFormatChar2Normalized, 11.0, 10.13, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 1, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 1, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_UINT, MTLPixelFormatR8Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 1, MTLVertexFormatUChar, MTLVertexFormatUChar2, 11.0, 10.13, kMVKFormatColorUInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_SINT, MTLPixelFormatR8Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 1, MTLVertexFormatChar, MTLVertexFormatChar2, 11.0, 10.13, kMVKFormatColorInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8_SRGB, MTLPixelFormatR8Unorm_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 1, 1, 1, MTLVertexFormatUCharNormalized, MTLVertexFormatUChar2Normalized, 11.0, 10.13, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_UNORM, MTLPixelFormatRG8Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatUChar2Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_SNORM, MTLPixelFormatRG8Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatChar2Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_UINT, MTLPixelFormatRG8Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatUChar2, kMVKFormatColorUInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_SINT, MTLPixelFormatRG8Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatChar2, kMVKFormatColorInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_SRGB, MTLPixelFormatRG8Unorm_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 1, 1, 2, MTLVertexFormatUChar2, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_UNORM, MTLPixelFormatRG8Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatUChar2Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_SNORM, MTLPixelFormatRG8Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatChar2Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_UINT, MTLPixelFormatRG8Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatUChar2, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorUInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_SINT, MTLPixelFormatRG8Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatChar2, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8_SRGB, MTLPixelFormatRG8Unorm_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 1, 1, 2, MTLVertexFormatUChar2Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_UNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatUChar3Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_SNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatChar3Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatUChar3, kMVKFormatColorUInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatChar3, kMVKFormatColorInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_SRGB, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatUChar3, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_UNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatUChar3Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_SNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatChar3Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatUChar3, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorUInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatChar3, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8_SRGB, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatUChar3Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_UNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_SNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, kMVKFormatColorUInt8, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, kMVKFormatColorInt8, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_SRGB, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_UNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_SNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorUInt8, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorInt8, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8_SRGB, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_UNORM, MTLPixelFormatRGBA8Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUChar4Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_SNORM, MTLPixelFormatRGBA8Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatChar4Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_UINT, MTLPixelFormatRGBA8Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUChar4, kMVKFormatColorUInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_SINT, MTLPixelFormatRGBA8Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatChar4, kMVKFormatColorInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_SRGB, MTLPixelFormatRGBA8Unorm_sRGB, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUChar4, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_UNORM, MTLPixelFormatRGBA8Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUChar4Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_SNORM, MTLPixelFormatRGBA8Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatChar4Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_UINT, MTLPixelFormatRGBA8Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUChar4, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorUInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_SINT, MTLPixelFormatRGBA8Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatChar4, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R8G8B8A8_SRGB, MTLPixelFormatRGBA8Unorm_sRGB, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUChar4Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_UNORM, MTLPixelFormatBGRA8Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_SNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorUInt8, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorInt8, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_SRGB, MTLPixelFormatBGRA8Unorm_sRGB, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_UNORM, MTLPixelFormatBGRA8Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUChar4Normalized_BGRA, MTLVertexFormatInvalid, 11.0, 10.13, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_SNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorUInt8, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorInt8, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B8G8R8A8_SRGB, MTLPixelFormatBGRA8Unorm_sRGB, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_UNORM_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_SNORM_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_USCALED_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_SSCALED_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_UINT_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorUInt8, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_SINT_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorInt8, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_SRGB_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_UNORM_PACK32, MTLPixelFormatRGBA8Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUChar4Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_SNORM_PACK32, MTLPixelFormatRGBA8Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatChar4Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_USCALED_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_SSCALED_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_UINT_PACK32, MTLPixelFormatRGBA8Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUChar4, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorUInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_SINT_PACK32, MTLPixelFormatRGBA8Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatChar4, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorInt8, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A8B8G8R8_SRGB_PACK32, MTLPixelFormatRGBA8Unorm_sRGB, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUChar4Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2R10G10B10_UNORM_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2R10G10B10_SNORM_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInt1010102Normalized, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2R10G10B10_USCALED_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2R10G10B10_SSCALED_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2R10G10B10_UINT_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorUInt16, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2R10G10B10_SINT_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorInt16, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2R10G10B10_UNORM_PACK32, MTLPixelFormatBGR10A2Unorm, MTLPixelFormatInvalid, 11.0, 10.13, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_NO_FEATS ), // Vulkan packed is reversed + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2R10G10B10_SNORM_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2R10G10B10_USCALED_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2R10G10B10_SSCALED_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2R10G10B10_UINT_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorUInt16, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2R10G10B10_SINT_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorInt16, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2B10G10R10_UNORM_PACK32, MTLPixelFormatRGB10A2Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUInt1010102Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), // Vulkan packed is reversed - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2B10G10R10_SNORM_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInt1010102Normalized, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2B10G10R10_USCALED_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2B10G10R10_SSCALED_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2B10G10R10_UINT_PACK32, MTLPixelFormatRGB10A2Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorUInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_FEATS ), // Vulkan packed is reversed - MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2B10G10R10_SINT_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorInt16, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2B10G10R10_UNORM_PACK32, MTLPixelFormatRGB10A2Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUInt1010102Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), // Vulkan packed is reversed + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2B10G10R10_SNORM_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInt1010102Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2B10G10R10_USCALED_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2B10G10R10_SSCALED_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2B10G10R10_UINT_PACK32, MTLPixelFormatRGB10A2Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorUInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_FEATS ), // Vulkan packed is reversed + MVK_MAKE_FMT_STRUCT( VK_FORMAT_A2B10G10R10_SINT_PACK32, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorInt16, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_UNORM, MTLPixelFormatR16Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatUShort2Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_SNORM, MTLPixelFormatR16Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatShort2Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_UINT, MTLPixelFormatR16Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatUShort2, kMVKFormatColorUInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_SINT, MTLPixelFormatR16Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatShort2, kMVKFormatColorInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_SFLOAT, MTLPixelFormatR16Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatHalf2, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_UNORM, MTLPixelFormatR16Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatUShortNormalized, MTLVertexFormatUShort2Normalized, 11.0, 10.13, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_SNORM, MTLPixelFormatR16Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatShortNormalized, MTLVertexFormatShort2Normalized, 11.0, 10.13, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 2, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_UINT, MTLPixelFormatR16Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatUShort, MTLVertexFormatUShort2, 11.0, 10.13, kMVKFormatColorUInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_SINT, MTLPixelFormatR16Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatShort, MTLVertexFormatShort2, 11.0, 10.13, kMVKFormatColorInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16_SFLOAT, MTLPixelFormatR16Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 2, MTLVertexFormatHalf, MTLVertexFormatHalf2, 11.0, 10.13, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_UNORM, MTLPixelFormatRG16Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUShort2Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_SNORM, MTLPixelFormatRG16Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatShort2Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_UINT, MTLPixelFormatRG16Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUShort2, kMVKFormatColorUInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_SINT, MTLPixelFormatRG16Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatShort2, kMVKFormatColorInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_SFLOAT, MTLPixelFormatRG16Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatHalf2, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_UNORM, MTLPixelFormatRG16Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUShort2Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_SNORM, MTLPixelFormatRG16Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatShort2Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_UINT, MTLPixelFormatRG16Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUShort2, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorUInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_SINT, MTLPixelFormatRG16Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatShort2, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16_SFLOAT, MTLPixelFormatRG16Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatHalf2, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_UNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatUShort3Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_SNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatShort3Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatUShort3, kMVKFormatColorUInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatShort3, kMVKFormatColorInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_SFLOAT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatHalf3, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_UNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatUShort3Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_SNORM, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatShort3Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatUShort3, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorUInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatShort3, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16_SFLOAT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 6, MTLVertexFormatHalf3, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_UNORM, MTLPixelFormatRGBA16Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatUShort4Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_SNORM, MTLPixelFormatRGBA16Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatShort4Normalized, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_UINT, MTLPixelFormatRGBA16Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatUShort4, kMVKFormatColorUInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_SINT, MTLPixelFormatRGBA16Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatShort4, kMVKFormatColorInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_SFLOAT, MTLPixelFormatRGBA16Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatHalf4, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_UNORM, MTLPixelFormatRGBA16Unorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatUShort4Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_SNORM, MTLPixelFormatRGBA16Snorm, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatShort4Normalized, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_USCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_SSCALED, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_UINT, MTLPixelFormatRGBA16Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatUShort4, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorUInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_SINT, MTLPixelFormatRGBA16Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatShort4, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorInt16, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R16G16B16A16_SFLOAT, MTLPixelFormatRGBA16Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatHalf4, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32_UINT, MTLPixelFormatR32Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUInt, kMVKFormatColorUInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32_SINT, MTLPixelFormatR32Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInt, kMVKFormatColorInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32_SFLOAT, MTLPixelFormatR32Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatFloat, kMVKFormatColorFloat, MVK_FMT_COLOR_FLOAT32_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32_UINT, MTLPixelFormatR32Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatUInt, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorUInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32_SINT, MTLPixelFormatR32Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInt, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32_SFLOAT, MTLPixelFormatR32Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatFloat, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FLOAT32_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32_UINT, MTLPixelFormatRG32Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatUInt2, kMVKFormatColorUInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32_SINT, MTLPixelFormatRG32Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatInt2, kMVKFormatColorInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32_SFLOAT, MTLPixelFormatRG32Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatFloat2, kMVKFormatColorFloat, MVK_FMT_COLOR_FLOAT32_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32_UINT, MTLPixelFormatRG32Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatUInt2, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorUInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32_SINT, MTLPixelFormatRG32Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatInt2, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32_SFLOAT, MTLPixelFormatRG32Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 8, MTLVertexFormatFloat2, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FLOAT32_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32B32_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 12, MTLVertexFormatUInt3, kMVKFormatColorUInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32B32_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 12, MTLVertexFormatInt3, kMVKFormatColorInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32B32_SFLOAT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 12, MTLVertexFormatFloat3, kMVKFormatColorFloat, MVK_FMT_COLOR_FLOAT32_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32B32_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 12, MTLVertexFormatUInt3, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorUInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32B32_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 12, MTLVertexFormatInt3, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32B32_SFLOAT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 12, MTLVertexFormatFloat3, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FLOAT32_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32B32A32_UINT, MTLPixelFormatRGBA32Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 16, MTLVertexFormatUInt4, kMVKFormatColorUInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32B32A32_SINT, MTLPixelFormatRGBA32Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 16, MTLVertexFormatInt4, kMVKFormatColorInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32B32A32_SFLOAT, MTLPixelFormatRGBA32Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 16, MTLVertexFormatFloat4, kMVKFormatColorFloat, MVK_FMT_COLOR_FLOAT32_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32B32A32_UINT, MTLPixelFormatRGBA32Uint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 16, MTLVertexFormatUInt4, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorUInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32B32A32_SINT, MTLPixelFormatRGBA32Sint, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 16, MTLVertexFormatInt4, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorInt32, MVK_FMT_COLOR_INTEGER_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R32G32B32A32_SFLOAT, MTLPixelFormatRGBA32Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 16, MTLVertexFormatFloat4, MTLVertexFormatInvalid, 8.0, 10.11, kMVKFormatColorFloat, MVK_FMT_COLOR_FLOAT32_FEATS, MVK_FMT_BUFFER_VTX_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64_SFLOAT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64_SFLOAT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 16, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 16, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64_SFLOAT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 16, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64_SFLOAT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64B64_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 24, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64B64_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 24, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64B64_SFLOAT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 24, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64B64_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 24, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64B64_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 24, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64B64_SFLOAT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 24, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64B64A64_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 32, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64B64A64_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 32, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64B64A64_SFLOAT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 32, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64B64A64_UINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 32, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64B64A64_SINT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 32, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_R64G64B64A64_SFLOAT, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 1, 1, 32, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_B10G11R11_UFLOAT_PACK32, MTLPixelFormatRG11B10Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), // Vulkan packed is reversed - MVK_MAKE_FMT_STRUCT( VK_FORMAT_E5B9G9R9_UFLOAT_PACK32, MTLPixelFormatRGB9E5Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), // Vulkan packed is reversed + MVK_MAKE_FMT_STRUCT( VK_FORMAT_B10G11R11_UFLOAT_PACK32, MTLPixelFormatRG11B10Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), // Vulkan packed is reversed + MVK_MAKE_FMT_STRUCT( VK_FORMAT_E5B9G9R9_UFLOAT_PACK32, MTLPixelFormatRGB9E5Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_E5B9G9R9_FEATS, MVK_FMT_E5B9G9R9_BUFFER_FEATS ), // Vulkan packed is reversed - MVK_MAKE_FMT_STRUCT( VK_FORMAT_D32_SFLOAT, MTLPixelFormatDepth32Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatDepthStencil, MVK_FMT_DEPTH_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_D32_SFLOAT_S8_UINT, MTLPixelFormatDepth32Float_Stencil8, MTLPixelFormatInvalid, 9.0, 10.11, 1, 1, 5, MTLVertexFormatInvalid, kMVKFormatDepthStencil, MVK_FMT_DEPTH_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_D32_SFLOAT, MTLPixelFormatDepth32Float, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatDepthStencil, MVK_FMT_DEPTH_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_D32_SFLOAT_S8_UINT, MTLPixelFormatDepth32Float_Stencil8, MTLPixelFormatInvalid, 9.0, 10.11, 1, 1, 5, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatDepthStencil, MVK_FMT_DEPTH_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_S8_UINT, MTLPixelFormatStencil8, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 1, MTLVertexFormatInvalid, kMVKFormatDepthStencil, MVK_FMT_STENCIL_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_S8_UINT, MTLPixelFormatStencil8, MTLPixelFormatInvalid, 8.0, 10.11, 1, 1, 1, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatDepthStencil, MVK_FMT_STENCIL_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_D16_UNORM, MTLPixelFormatDepth16Unorm, MTLPixelFormatDepth32Float, kMTLFmtNA, 10.12, 1, 1, 2, MTLVertexFormatInvalid, kMVKFormatDepthStencil, MVK_FMT_DEPTH_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_D16_UNORM_S8_UINT, MTLPixelFormatInvalid, MTLPixelFormatDepth16Unorm_Stencil8, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, kMVKFormatDepthStencil, MVK_FMT_DEPTH_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_D24_UNORM_S8_UINT, MTLPixelFormatDepth24Unorm_Stencil8, MTLPixelFormatDepth32Float_Stencil8, kMTLFmtNA, 10.11, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatDepthStencil, MVK_FMT_DEPTH_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_D16_UNORM, MTLPixelFormatDepth16Unorm, MTLPixelFormatDepth32Float, kMTLFmtNA, 10.12, 1, 1, 2, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatDepthStencil, MVK_FMT_DEPTH_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_D16_UNORM_S8_UINT, MTLPixelFormatInvalid, MTLPixelFormatDepth16Unorm_Stencil8, kMTLFmtNA, kMTLFmtNA, 1, 1, 3, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatDepthStencil, MVK_FMT_DEPTH_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_D24_UNORM_S8_UINT, MTLPixelFormatDepth24Unorm_Stencil8, MTLPixelFormatDepth32Float_Stencil8, kMTLFmtNA, 10.11, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatDepthStencil, MVK_FMT_DEPTH_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_X8_D24_UNORM_PACK32, MTLPixelFormatInvalid, MTLPixelFormatDepth24Unorm_Stencil8, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, kMVKFormatDepthStencil, MVK_FMT_DEPTH_FEATS, MVK_FMT_BUFFER_FEATS ), // Vulkan packed is reversed + MVK_MAKE_FMT_STRUCT( VK_FORMAT_X8_D24_UNORM_PACK32, MTLPixelFormatInvalid, MTLPixelFormatDepth24Unorm_Stencil8, kMTLFmtNA, kMTLFmtNA, 1, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatDepthStencil, MVK_FMT_DEPTH_FEATS, MVK_FMT_NO_FEATS ), // Vulkan packed is reversed - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC1_RGB_UNORM_BLOCK, MTLPixelFormatBC1_RGBA, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC1_RGB_SRGB_BLOCK, MTLPixelFormatBC1_RGBA_sRGB, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC1_RGBA_UNORM_BLOCK, MTLPixelFormatBC1_RGBA, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC1_RGBA_SRGB_BLOCK, MTLPixelFormatBC1_RGBA_sRGB, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC1_RGB_UNORM_BLOCK, MTLPixelFormatBC1_RGBA, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC1_RGB_SRGB_BLOCK, MTLPixelFormatBC1_RGBA_sRGB, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC1_RGBA_UNORM_BLOCK, MTLPixelFormatBC1_RGBA, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC1_RGBA_SRGB_BLOCK, MTLPixelFormatBC1_RGBA_sRGB, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC2_UNORM_BLOCK, MTLPixelFormatBC2_RGBA, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC2_SRGB_BLOCK, MTLPixelFormatBC2_RGBA_sRGB, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC2_UNORM_BLOCK, MTLPixelFormatBC2_RGBA, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC2_SRGB_BLOCK, MTLPixelFormatBC2_RGBA_sRGB, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC3_UNORM_BLOCK, MTLPixelFormatBC3_RGBA, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC3_SRGB_BLOCK, MTLPixelFormatBC3_RGBA_sRGB, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC3_UNORM_BLOCK, MTLPixelFormatBC3_RGBA, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC3_SRGB_BLOCK, MTLPixelFormatBC3_RGBA_sRGB, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC4_UNORM_BLOCK, MTLPixelFormatBC4_RUnorm, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC4_SNORM_BLOCK, MTLPixelFormatBC4_RSnorm, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC4_UNORM_BLOCK, MTLPixelFormatBC4_RUnorm, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC4_SNORM_BLOCK, MTLPixelFormatBC4_RSnorm, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC5_UNORM_BLOCK, MTLPixelFormatBC5_RGUnorm, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC5_SNORM_BLOCK, MTLPixelFormatBC5_RGSnorm, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC5_UNORM_BLOCK, MTLPixelFormatBC5_RGUnorm, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC5_SNORM_BLOCK, MTLPixelFormatBC5_RGSnorm, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC6H_UFLOAT_BLOCK, MTLPixelFormatBC6H_RGBUfloat, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC6H_SFLOAT_BLOCK, MTLPixelFormatBC6H_RGBFloat, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC6H_UFLOAT_BLOCK, MTLPixelFormatBC6H_RGBUfloat, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC6H_SFLOAT_BLOCK, MTLPixelFormatBC6H_RGBFloat, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC7_UNORM_BLOCK, MTLPixelFormatBC7_RGBAUnorm, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC7_SRGB_BLOCK, MTLPixelFormatBC7_RGBAUnorm_sRGB, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC7_UNORM_BLOCK, MTLPixelFormatBC7_RGBAUnorm, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_BC7_SRGB_BLOCK, MTLPixelFormatBC7_RGBAUnorm_sRGB, MTLPixelFormatInvalid, kMTLFmtNA, 10.11, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK, MTLPixelFormatETC2_RGB8, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK, MTLPixelFormatETC2_RGB8_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK, MTLPixelFormatETC2_RGB8A1, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK, MTLPixelFormatETC2_RGB8A1_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK, MTLPixelFormatETC2_RGB8, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK, MTLPixelFormatETC2_RGB8_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK, MTLPixelFormatETC2_RGB8A1, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK, MTLPixelFormatETC2_RGB8A1_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, MTLPixelFormatEAC_RGBA8, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK, MTLPixelFormatEAC_RGBA8_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, MTLPixelFormatEAC_RGBA8, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK, MTLPixelFormatEAC_RGBA8_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_EAC_R11_UNORM_BLOCK, MTLPixelFormatEAC_R11Unorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_EAC_R11_SNORM_BLOCK, MTLPixelFormatEAC_R11Snorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_EAC_R11_UNORM_BLOCK, MTLPixelFormatEAC_R11Unorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_EAC_R11_SNORM_BLOCK, MTLPixelFormatEAC_R11Snorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_EAC_R11G11_UNORM_BLOCK, MTLPixelFormatEAC_RG11Unorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_EAC_R11G11_SNORM_BLOCK, MTLPixelFormatEAC_RG11Snorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_EAC_R11G11_UNORM_BLOCK, MTLPixelFormatEAC_RG11Unorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_EAC_R11G11_SNORM_BLOCK, MTLPixelFormatEAC_RG11Snorm, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_4x4_UNORM_BLOCK, MTLPixelFormatASTC_4x4_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_4x4_SRGB_BLOCK, MTLPixelFormatASTC_4x4_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_5x4_UNORM_BLOCK, MTLPixelFormatASTC_5x4_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 5, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_5x4_SRGB_BLOCK, MTLPixelFormatASTC_5x4_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 5, 4, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_5x5_UNORM_BLOCK, MTLPixelFormatASTC_5x5_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 5, 5, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_5x5_SRGB_BLOCK, MTLPixelFormatASTC_5x5_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 5, 5, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_6x5_UNORM_BLOCK, MTLPixelFormatASTC_6x5_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 6, 5, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_6x5_SRGB_BLOCK, MTLPixelFormatASTC_6x5_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 6, 5, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_6x6_UNORM_BLOCK, MTLPixelFormatASTC_6x6_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 6, 6, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_6x6_SRGB_BLOCK, MTLPixelFormatASTC_6x6_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 6, 6, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_8x5_UNORM_BLOCK, MTLPixelFormatASTC_8x5_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 5, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_8x5_SRGB_BLOCK, MTLPixelFormatASTC_8x5_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 5, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_8x6_UNORM_BLOCK, MTLPixelFormatASTC_8x6_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 6, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_8x6_SRGB_BLOCK, MTLPixelFormatASTC_8x6_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 6, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_8x8_UNORM_BLOCK, MTLPixelFormatASTC_8x8_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 8, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_8x8_SRGB_BLOCK, MTLPixelFormatASTC_8x8_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 8, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x5_UNORM_BLOCK, MTLPixelFormatASTC_10x5_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 5, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x5_SRGB_BLOCK, MTLPixelFormatASTC_10x5_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 5, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x6_UNORM_BLOCK, MTLPixelFormatASTC_10x6_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 6, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x6_SRGB_BLOCK, MTLPixelFormatASTC_10x6_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 6, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x8_UNORM_BLOCK, MTLPixelFormatASTC_10x8_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 8, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x8_SRGB_BLOCK, MTLPixelFormatASTC_10x8_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 8, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x10_UNORM_BLOCK, MTLPixelFormatASTC_10x10_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 10, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x10_SRGB_BLOCK, MTLPixelFormatASTC_10x10_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 10, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_12x10_UNORM_BLOCK, MTLPixelFormatASTC_12x10_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 12, 10, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_12x10_SRGB_BLOCK, MTLPixelFormatASTC_12x10_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 12, 10, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_12x12_UNORM_BLOCK, MTLPixelFormatASTC_12x12_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 12, 12, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_12x12_SRGB_BLOCK, MTLPixelFormatASTC_12x12_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 12, 12, 16, MTLVertexFormatInvalid, kMVKFormatNone, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_4x4_UNORM_BLOCK, MTLPixelFormatASTC_4x4_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_4x4_SRGB_BLOCK, MTLPixelFormatASTC_4x4_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 4, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_5x4_UNORM_BLOCK, MTLPixelFormatASTC_5x4_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 5, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_5x4_SRGB_BLOCK, MTLPixelFormatASTC_5x4_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 5, 4, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_5x5_UNORM_BLOCK, MTLPixelFormatASTC_5x5_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 5, 5, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_5x5_SRGB_BLOCK, MTLPixelFormatASTC_5x5_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 5, 5, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_6x5_UNORM_BLOCK, MTLPixelFormatASTC_6x5_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 6, 5, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_6x5_SRGB_BLOCK, MTLPixelFormatASTC_6x5_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 6, 5, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_6x6_UNORM_BLOCK, MTLPixelFormatASTC_6x6_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 6, 6, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_6x6_SRGB_BLOCK, MTLPixelFormatASTC_6x6_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 6, 6, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_8x5_UNORM_BLOCK, MTLPixelFormatASTC_8x5_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 5, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_8x5_SRGB_BLOCK, MTLPixelFormatASTC_8x5_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 5, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_8x6_UNORM_BLOCK, MTLPixelFormatASTC_8x6_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 6, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_8x6_SRGB_BLOCK, MTLPixelFormatASTC_8x6_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 6, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_8x8_UNORM_BLOCK, MTLPixelFormatASTC_8x8_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 8, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_8x8_SRGB_BLOCK, MTLPixelFormatASTC_8x8_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 8, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x5_UNORM_BLOCK, MTLPixelFormatASTC_10x5_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 5, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x5_SRGB_BLOCK, MTLPixelFormatASTC_10x5_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 5, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x6_UNORM_BLOCK, MTLPixelFormatASTC_10x6_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 6, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x6_SRGB_BLOCK, MTLPixelFormatASTC_10x6_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 6, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x8_UNORM_BLOCK, MTLPixelFormatASTC_10x8_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 8, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x8_SRGB_BLOCK, MTLPixelFormatASTC_10x8_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 8, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x10_UNORM_BLOCK, MTLPixelFormatASTC_10x10_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 10, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_10x10_SRGB_BLOCK, MTLPixelFormatASTC_10x10_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 10, 10, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_12x10_UNORM_BLOCK, MTLPixelFormatASTC_12x10_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 12, 10, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_12x10_SRGB_BLOCK, MTLPixelFormatASTC_12x10_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 12, 10, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_12x12_UNORM_BLOCK, MTLPixelFormatASTC_12x12_LDR, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 12, 12, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_ASTC_12x12_SRGB_BLOCK, MTLPixelFormatASTC_12x12_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 12, 12, 16, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), // Extension VK_IMG_format_pvrtc - MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG, MTLPixelFormatPVRTC_RGBA_2BPP, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 4, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG, MTLPixelFormatPVRTC_RGBA_4BPP, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 4, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG, MTLPixelFormatPVRTC_RGBA_2BPP_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 4, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG, MTLPixelFormatPVRTC_RGBA_4BPP_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 4, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG, MTLPixelFormatPVRTC_RGBA_2BPP, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG, MTLPixelFormatPVRTC_RGBA_4BPP, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG, MTLPixelFormatPVRTC_RGBA_2BPP_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG, MTLPixelFormatPVRTC_RGBA_4BPP_sRGB, MTLPixelFormatInvalid, 8.0, kMTLFmtNA, 8, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_COMPRESSED_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG, MTLPixelFormatInvalid, MTLPixelFormatInvalid, kMTLFmtNA, kMTLFmtNA, 4, 4, 8, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatNone, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS ), // Future extension VK_KHX_color_conversion and Vulkan 1.1. - MVK_MAKE_FMT_STRUCT( VK_FORMAT_UNDEFINED, MTLPixelFormatGBGR422, MTLPixelFormatInvalid, 8.0, 10.11, 2, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), - MVK_MAKE_FMT_STRUCT( VK_FORMAT_UNDEFINED, MTLPixelFormatBGRG422, MTLPixelFormatInvalid, 8.0, 10.11, 2, 1, 4, MTLVertexFormatInvalid, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_UNDEFINED, MTLPixelFormatGBGR422, MTLPixelFormatInvalid, 8.0, 10.11, 2, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), + MVK_MAKE_FMT_STRUCT( VK_FORMAT_UNDEFINED, MTLPixelFormatBGRG422, MTLPixelFormatInvalid, 8.0, 10.11, 2, 1, 4, MTLVertexFormatInvalid, MTLVertexFormatInvalid, kMTLFmtNA, kMTLFmtNA, kMVKFormatColorFloat, MVK_FMT_COLOR_FEATS, MVK_FMT_BUFFER_FEATS ), }; static const uint32_t _vkFormatCoreCount = VK_FORMAT_ASTC_12x12_SRGB_BLOCK + 1; static const uint32_t _mtlFormatCount = MTLPixelFormatX32_Stencil8 + 2; // The actual last enum value is not available on iOS +static const uint32_t _mtlVertexFormatCount = MTLVertexFormatHalf + 1; // Map for mapping large VkFormat values to an index. typedef unordered_map MVKFormatIndexByVkFormatMap; @@ -453,6 +484,7 @@ static MVKFormatIndexByVkFormatMap* _pFmtDescIndicesByVkFormatsExt; // Metal formats have small values and are mapped by simple lookup array. static uint16_t _fmtDescIndicesByMTLPixelFormats[_mtlFormatCount]; +static uint16_t _fmtDescIndicesByMTLVertexFormats[_mtlVertexFormatCount]; /** * Populates the lookup maps that map Vulkan and Metal pixel formats to one-another. @@ -469,6 +501,7 @@ static void MVKInitFormatMaps() { // Set all VkFormats and MTLPixelFormats to undefined/invalid memset(_fmtDescIndicesByVkFormatsCore, 0, sizeof(_fmtDescIndicesByVkFormatsCore)); memset(_fmtDescIndicesByMTLPixelFormats, 0, sizeof(_fmtDescIndicesByMTLPixelFormats)); + memset(_fmtDescIndicesByMTLVertexFormats, 0, sizeof(_fmtDescIndicesByMTLVertexFormats)); _pFmtDescIndicesByVkFormatsExt = new MVKFormatIndexByVkFormatMap(); @@ -493,6 +526,7 @@ static void MVKInitFormatMaps() { // If the Metal format is defined, create a lookup between the Metal format and an // index to the format info. Metal formats are small, so use a simple lookup array. if (tfm.mtl != MTLPixelFormatInvalid) { _fmtDescIndicesByMTLPixelFormats[tfm.mtl] = fmtIdx; } + if (tfm.mtlVertexFormat != MTLVertexFormatInvalid) { _fmtDescIndicesByMTLVertexFormats[tfm.mtlVertexFormat] = fmtIdx; } } } @@ -508,6 +542,12 @@ inline const MVKFormatDesc& formatDescForMTLPixelFormat(MTLPixelFormat mtlFormat return _formatDescriptions[fmtIdx]; } +// Return a reference to the format description corresponding to the MTLVertexFormat. +inline const MVKFormatDesc& formatDescForMTLVertexFormat(MTLVertexFormat mtlFormat) { + uint16_t fmtIdx = (mtlFormat < _mtlVertexFormatCount) ? _fmtDescIndicesByMTLVertexFormats[mtlFormat] : 0; + return _formatDescriptions[fmtIdx]; +} + MVK_PUBLIC_SYMBOL bool mvkVkFormatIsSupported(VkFormat vkFormat) { return formatDescForVkFormat(vkFormat).isSupported(); } @@ -599,15 +639,18 @@ MVK_PUBLIC_SYMBOL size_t mvkMTLPixelFormatBytesPerLayer(MTLPixelFormat mtlFormat } MVK_PUBLIC_SYMBOL VkFormatProperties mvkVkFormatProperties(VkFormat vkFormat, bool assumeGPUSupportsDefault) { + VkFormatProperties fmtProps = {MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS}; const MVKFormatDesc& fmtDesc = formatDescForVkFormat(vkFormat); if (assumeGPUSupportsDefault && fmtDesc.isSupported()) { - return fmtDesc.properties; + fmtProps = fmtDesc.properties; + if (!fmtDesc.vertexIsSupportedOrSubstitutable()) { + fmtProps.bufferFeatures &= ~VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT; + } } else { // If texture format is unsupported, vertex buffer format may still be. - VkFormatProperties fmtProps = {MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS, MVK_FMT_NO_FEATS}; fmtProps.bufferFeatures |= fmtDesc.properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT; - return fmtProps; } + return fmtProps; } MVK_PUBLIC_SYMBOL const char* mvkVkFormatName(VkFormat vkFormat) { @@ -619,7 +662,31 @@ MVK_PUBLIC_SYMBOL const char* mvkMTLPixelFormatName(MTLPixelFormat mtlFormat) { } MVK_PUBLIC_SYMBOL MTLVertexFormat mvkMTLVertexFormatFromVkFormat(VkFormat vkFormat) { - return formatDescForVkFormat(vkFormat).mtlVertexFormat; + MTLVertexFormat mtlVtxFmt = MTLVertexFormatInvalid; + + const MVKFormatDesc& fmtDesc = formatDescForVkFormat(vkFormat); + if (fmtDesc.vertexIsSupported()) { + mtlVtxFmt = fmtDesc.mtlVertexFormat; + } else if (vkFormat != VK_FORMAT_UNDEFINED) { + // If the MTLVertexFormat is not supported but VkFormat is valid, + // report an error, and possibly substitute a different MTLVertexFormat. + string errMsg; + errMsg += "VkFormat "; + errMsg += (fmtDesc.vkName) ? fmtDesc.vkName : to_string(fmtDesc.vk); + errMsg += " is not supported for vertex buffers on this platform."; + + if (fmtDesc.vertexIsSupportedOrSubstitutable()) { + mtlVtxFmt = fmtDesc.mtlVertexFormatSubstitute; + + const MVKFormatDesc& fmtDescSubs = formatDescForMTLVertexFormat(mtlVtxFmt); + errMsg += " Using VkFormat "; + errMsg += (fmtDescSubs.vkName) ? fmtDescSubs.vkName : to_string(fmtDescSubs.vk); + errMsg += " instead."; + } + mvkNotifyErrorWithText(VK_ERROR_FORMAT_NOT_SUPPORTED, "%s", errMsg.c_str()); + } + + return mtlVtxFmt; } MVK_PUBLIC_SYMBOL MTLClearColor mvkMTLClearColorFromVkClearValue(VkClearValue vkClearValue, @@ -729,7 +796,13 @@ MVK_PUBLIC_SYMBOL MTLTextureType mvkMTLTextureTypeFromVkImageViewType(VkImageVie case VK_IMAGE_VIEW_TYPE_1D: return MTLTextureType1D; case VK_IMAGE_VIEW_TYPE_1D_ARRAY: return MTLTextureType1DArray; case VK_IMAGE_VIEW_TYPE_2D: return (isMultisample ? MTLTextureType2DMultisample : MTLTextureType2D); - case VK_IMAGE_VIEW_TYPE_2D_ARRAY: return MTLTextureType2DArray; + case VK_IMAGE_VIEW_TYPE_2D_ARRAY: +#if MVK_MACOS + if (isMultisample) { + return MTLTextureType2DMultisampleArray; + } +#endif + return MTLTextureType2DArray; case VK_IMAGE_VIEW_TYPE_3D: return MTLTextureType3D; case VK_IMAGE_VIEW_TYPE_CUBE: return MTLTextureTypeCube; #if MVK_MACOS diff --git a/MoltenVK/MoltenVK/Vulkan/vulkan.mm b/MoltenVK/MoltenVK/Vulkan/vulkan.mm index 869f8a0e..2d00950d 100644 --- a/MoltenVK/MoltenVK/Vulkan/vulkan.mm +++ b/MoltenVK/MoltenVK/Vulkan/vulkan.mm @@ -369,6 +369,7 @@ MVK_PUBLIC_SYMBOL void vkGetImageSparseMemoryRequirements( VkSparseImageMemoryRequirements* pSparseMemoryRequirements) { MVKLogUnimplemented("vkGetImageSparseMemoryRequirements"); + *pNumRequirements = 0; } MVK_PUBLIC_SYMBOL void vkGetPhysicalDeviceSparseImageFormatProperties( @@ -382,6 +383,7 @@ MVK_PUBLIC_SYMBOL void vkGetPhysicalDeviceSparseImageFormatProperties( VkSparseImageFormatProperties* pProperties) { MVKLogUnimplemented("vkGetPhysicalDeviceSparseImageFormatProperties"); + *pPropertyCount = 0; } MVK_PUBLIC_SYMBOL VkResult vkQueueBindSparse( @@ -1572,6 +1574,7 @@ MVK_PUBLIC_SYMBOL void vkGetImageSparseMemoryRequirements2KHR( VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements) { MVKLogUnimplemented("vkGetImageSparseMemoryRequirements2KHR"); + *pSparseMemoryRequirementCount = 0; } @@ -1636,6 +1639,7 @@ MVK_PUBLIC_SYMBOL void vkGetPhysicalDeviceSparseImageFormatProperties2KHR( VkSparseImageFormatProperties2KHR* pProperties) { MVKLogUnimplemented("vkGetPhysicalDeviceSparseImageFormatProperties"); + *pPropertyCount = 0; } @@ -1658,7 +1662,7 @@ MVK_PUBLIC_SYMBOL void vkGetDescriptorSetLayoutSupportKHR( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupportKHR* pSupport) { - MVKDevice* mvkDevice = (MVKDevice*)device; + MVKDevice* mvkDevice = MVKDevice::getMVKDevice(device); mvkDevice->getDescriptorSetLayoutSupport(pCreateInfo, pSupport); } diff --git a/MoltenVK/scripts/create_dylib.sh b/MoltenVK/scripts/create_dylib.sh index 34291d40..09f5bbaf 100755 --- a/MoltenVK/scripts/create_dylib.sh +++ b/MoltenVK/scripts/create_dylib.sh @@ -11,7 +11,9 @@ if test x"${ENABLE_BITCODE}" = xYES; then fi if test x"${ENABLE_THREAD_SANITIZER}" = xYES; then - MVK_TSAN="-fsanitize=thread" + MVK_SAN="-fsanitize=thread" +elif test x"${ENABLE_ADDRESS_SANITIZER}" = xYES; then + MVK_SAN="-fsanitize=address" fi clang++ \ @@ -23,7 +25,7 @@ $(printf -- "-arch %s " ${ARCHS}) \ -install_name "@rpath/${MVK_DYLIB_NAME}" \ -Wno-incompatible-sysroot \ ${MVK_EMBED_BITCODE} \ -${MVK_TSAN} \ +${MVK_SAN} \ -isysroot ${SDK_DIR} \ -iframework ${MVK_SYS_FWK_DIR} \ -framework Metal ${MVK_IOSURFACE_FWK} -framework ${MVK_UX_FWK} -framework QuartzCore -framework IOKit -framework Foundation \ @@ -32,5 +34,5 @@ ${MVK_TSAN} \ -force_load "${BUILT_PRODUCTS_DIR}/lib${PRODUCT_NAME}.a" if test "$CONFIGURATION" = Debug; then - dsymutil "${BUILT_PRODUCTS_DIR}/${MVK_DYLIB_NAME}" -o "${DWARF_DSYM_FOLDER_PATH}/${MVK_DYLIB_NAME}${DWARF_DSYM_FILE_NAME}" + dsymutil "${BUILT_PRODUCTS_DIR}/${MVK_DYLIB_NAME}" -o "${DWARF_DSYM_FOLDER_PATH}/${MVK_DYLIB_NAME}.dSYM" fi From 4cae69542d3fcd4e4f961cde4d1db59380058b81 Mon Sep 17 00:00:00 2001 From: aerofly Date: Wed, 12 Dec 2018 16:24:57 +0100 Subject: [PATCH 07/14] Update fork to latest MoltenVK --- ExternalRevisions/SPIRV-Cross_repo_revision | 2 +- .../SPIRVToMSLConverter.cpp | 19 +++++++++++++++++++ .../SPIRVToMSLConverter.h | 2 ++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/ExternalRevisions/SPIRV-Cross_repo_revision b/ExternalRevisions/SPIRV-Cross_repo_revision index bb8c3d06..511b7a38 100644 --- a/ExternalRevisions/SPIRV-Cross_repo_revision +++ b/ExternalRevisions/SPIRV-Cross_repo_revision @@ -1 +1 @@ -52f26ee73648a25e9465035a55b276898f453830 +ed16b3e69985feaf565efbecea70a1cc2fca2a58 diff --git a/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp b/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp index 8752a727..ee6be256 100644 --- a/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp +++ b/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp @@ -49,6 +49,25 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverterOptions::matches(const SPIRVToMSLConve return true; } +MVK_PUBLIC_SYMBOL std::string SPIRVToMSLConverterOptions::printMSLVersion(uint32_t mslVersion, bool includePatch) { + string verStr; + + uint32_t major = mslVersion / 10000; + verStr += to_string(major); + + uint32_t minor = (mslVersion - makeMSLVersion(major)) / 100; + verStr += "."; + verStr += to_string(minor); + + if (includePatch) { + uint32_t patch = mslVersion - makeMSLVersion(major, minor); + verStr += "."; + verStr += to_string(patch); + } + + return verStr; +} + MVK_PUBLIC_SYMBOL bool MSLVertexAttribute::matches(const MSLVertexAttribute& other) const { if (location != other.location) { return false; } if (mslBuffer != other.mslBuffer) { return false; } diff --git a/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h b/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h index f9dff22a..07bcb2ef 100644 --- a/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h +++ b/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h @@ -66,6 +66,8 @@ namespace mvk { return (major * 10000) + (minor * 100) + patch; } + static std::string printMSLVersion(uint32_t mslVersion, bool includePatch = false); + } SPIRVToMSLConverterOptions; /** From a4c6f64ecf2af2b7e11f7cb2cc31da14d4f29fd6 Mon Sep 17 00:00:00 2001 From: aerofly Date: Wed, 12 Dec 2018 17:38:42 +0100 Subject: [PATCH 08/14] Default to swizzle_texture_samples=false --- .../MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp b/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp index ee6be256..dfe09659 100644 --- a/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp +++ b/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp @@ -237,7 +237,7 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConverterContext& mslOpts.aux_buffer_index = context.options.auxBufferIndex; mslOpts.enable_point_size_builtin = context.options.isRenderingPoints; mslOpts.disable_rasterization = context.options.isRasterizationDisabled; - mslOpts.swizzle_texture_samples = true; + mslOpts.swizzle_texture_samples = false; pMSLCompiler->set_msl_options(mslOpts); auto scOpts = pMSLCompiler->get_common_options(); From 5811ae5811d2f7ea987d4a7b396f40a5f4958db4 Mon Sep 17 00:00:00 2001 From: aerofly Date: Wed, 12 Dec 2018 20:09:24 +0100 Subject: [PATCH 09/14] fix conflicts --- MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm | 10 +++++----- MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm | 2 +- MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm | 14 +++++++------- MoltenVK/MoltenVK/GPUObjects/MVKSync.h | 2 +- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm index b8dd2686..9e688a85 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm @@ -159,11 +159,11 @@ void MVKCmdPushConstants::setContent(VkPipelineLayout layout, _stageFlags = stageFlags; _offset = offset; - _pushConstants.clear(); - for( uint32_t i=0; i( pValues)[i] ); - } + _pushConstants.clear(); + for( uint32_t i=0; i( pValues)[i] ); + } //_pushConstants.resize(size); //copy_n((char*)pValues, size, _pushConstants.begin()); } diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm index 9aba1e73..384bff85 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm @@ -1217,7 +1217,7 @@ void MVKPhysicalDevice::logGPUInfo() { if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily5_v1] ) { logMsg += "\n\t\tiOS GPU Family 5 v1"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily4_v2] ) { logMsg += "\n\t\tiOS GPU Family 4 v2"; } - if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily4_v1] ) { logMsg += "\n\t\tiOS GPU Family 4 v1"; } + if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily4_v1] ) { logMsg += "\n\t\tiOS GPU Family 4 v1"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily3_v4] ) { logMsg += "\n\t\tiOS GPU Family 3 v4"; } if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily3_v3] ) { logMsg += "\n\t\tiOS GPU Family 3 v3"; } diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm index ef6f522e..0896c6f4 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm @@ -318,9 +318,9 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor uint32_t vbCnt = pCreateInfo->pVertexInputState->vertexBindingDescriptionCount; // Add shader stages. Compile vertex shder before others just in case conversion changes anything...like rasterizaion disable. - for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { - const VkPipelineShaderStageCreateInfo* pSS = &pCreateInfo->pStages[i]; - if (mvkAreFlagsEnabled(pSS->stage, VK_SHADER_STAGE_VERTEX_BIT)) { + for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { + const VkPipelineShaderStageCreateInfo* pSS = &pCreateInfo->pStages[i]; + if (mvkAreFlagsEnabled(pSS->stage, VK_SHADER_STAGE_VERTEX_BIT)) { shaderContext.options.entryPointStage = spv::ExecutionModelVertex; shaderContext.options.auxBufferIndex = _auxBufferIndex.vertex; shaderContext.options.entryPointName = pSS->pName; @@ -337,12 +337,12 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_INITIALIZATION_FAILED, "Vertex shader requires auxiliary buffer, but there is no free slot to pass it.")); return nil; } - } + } } - // bug fix by aerofly -> if no fragment shader is used and _needsFragmentAuxBuffer was true newBufferWithLength was trying to allocate zero bytes - // please verify this fix - _needsFragmentAuxBuffer = false; + // bug fix by aerofly -> if no fragment shader is used and _needsFragmentAuxBuffer was true newBufferWithLength was trying to allocate zero bytes + // please verify this fix + needsFragmentAuxBuffer = false; // Fragment shader - only add if rasterization is enabled for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { const VkPipelineShaderStageCreateInfo* pSS = &pCreateInfo->pStages[i]; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKSync.h b/MoltenVK/MoltenVK/GPUObjects/MVKSync.h index 07b8b9d9..6f2b4308 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKSync.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKSync.h @@ -186,7 +186,7 @@ class MVKFenceSitter : public MVKBaseObject { public: /** - * If this instance has been configured to wait for fences, blocks processing on the + * If this instance has been configured to wait for fences, blocks processing on the * current thread until any or all of the fences that this instance is waiting for are * signaled, or until the specified timeout in nanoseconds expires. If this instance * has not been configured to wait for fences, this function immediately returns true. From 6aad9dc0baa6d57ade1b750f834139179e19c0dd Mon Sep 17 00:00:00 2001 From: aerofly Date: Wed, 12 Dec 2018 20:57:11 +0100 Subject: [PATCH 10/14] fix conflicts --- MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm index e1f7bcb0..2851e1c4 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm @@ -342,7 +342,7 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor // bug fix by aerofly -> if no fragment shader is used and _needsFragmentAuxBuffer was true newBufferWithLength was trying to allocate zero bytes // please verify this fix - needsFragmentAuxBuffer = false; + _needsFragmentAuxBuffer = false; // Fragment shader - only add if rasterization is enabled for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { From fba2eefa5e67359ba0989ea6ca04b414120617a3 Mon Sep 17 00:00:00 2001 From: aerofly Date: Fri, 14 Dec 2018 14:19:30 +0100 Subject: [PATCH 11/14] Modify MVKVector with feedback from cdavis5e --- MoltenVK/MoltenVK/API/mvk_datatypes.h | 8 ++++ MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h | 3 +- MoltenVK/MoltenVK/Commands/MVKCmdDraw.h | 1 + MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h | 1 + MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm | 9 +--- MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h | 2 +- .../MoltenVK/Commands/MVKCommandBuffer.mm | 8 +--- .../Commands/MVKCommandEncoderState.mm | 6 +-- ...KCommandPipelineStateFactoryShaderSource.h | 29 ------------- .../Commands/MVKCommandResourceFactory.h | 1 + .../Commands/MVKCommandResourceFactory.mm | 41 +++++++++++++++++- MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm | 1 + MoltenVK/MoltenVK/GPUObjects/MVKImage.mm | 14 +++++++ MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm | 8 ++-- MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.h | 5 ++- MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm | 4 +- .../OS/MTLSamplerDescriptor+MoltenVK.h | 9 ++++ .../OS/MTLSamplerDescriptor+MoltenVK.m | 13 ++++++ MoltenVK/MoltenVK/Utility/MVKVector.h | 42 ++++++++++++++++--- .../MoltenVK/Utility/MVKVectorAllocator.h | 1 - MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm | 14 +++++++ 21 files changed, 155 insertions(+), 65 deletions(-) diff --git a/MoltenVK/MoltenVK/API/mvk_datatypes.h b/MoltenVK/MoltenVK/API/mvk_datatypes.h index b21ba9ae..b638b02b 100644 --- a/MoltenVK/MoltenVK/API/mvk_datatypes.h +++ b/MoltenVK/MoltenVK/API/mvk_datatypes.h @@ -295,6 +295,14 @@ VkExtent3D mvkMipmapBaseSizeFromLevelSize3D(VkExtent3D levelSize, uint32_t level */ MTLSamplerAddressMode mvkMTLSamplerAddressModeFromVkSamplerAddressMode(VkSamplerAddressMode vkMode); +#if MVK_MACOS +/** + * Returns the Metal MTLSamplerBorderColor corresponding to the specified Vulkan VkBorderColor, + * or returns MTLSamplerBorderColorTransparentBlack if no corresponding MTLSamplerBorderColor exists. + */ +MTLSamplerBorderColor mvkMTLSamplerBorderColorFromVkBorderColor(VkBorderColor vkColor); +#endif + /** * Returns the Metal MTLSamplerMinMagFilter corresponding to the specified Vulkan VkFilter, * or returns MTLSamplerMinMagFilterNearest if no corresponding MTLSamplerMinMagFilter exists. diff --git a/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h b/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h index ccd9b6a8..467012c8 100644 --- a/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h +++ b/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h @@ -54,7 +54,7 @@ extern "C" { #define MVK_VERSION MVK_MAKE_VERSION(MVK_VERSION_MAJOR, MVK_VERSION_MINOR, MVK_VERSION_PATCH) -#define VK_MVK_MOLTENVK_SPEC_VERSION 12 +#define VK_MVK_MOLTENVK_SPEC_VERSION 13 #define VK_MVK_MOLTENVK_EXTENSION_NAME "VK_MVK_moltenvk" /** @@ -261,6 +261,7 @@ typedef struct { VkBool32 presentModeImmediate; /**< If true, immediate surface present mode (VK_PRESENT_MODE_IMMEDIATE_KHR), allowing a swapchain image to be presented immediately, without waiting for the vertical sync period of the display, is supported. */ VkBool32 stencilViews; /**< If true, stencil aspect views are supported through the MTLPixelFormatX24_Stencil8 and MTLPixelFormatX32_Stencil8 formats. */ VkBool32 multisampleArrayTextures; /**< If true, MTLTextureType2DMultisampleArray is supported. */ + VkBool32 samplerClampToBorder; /**< If true, the border color set when creating a sampler will be respected. */ uint32_t maxTextureDimension; /**< The maximum size of each texture dimension (width, height, or depth). */ uint32_t maxPerStageBufferCount; /**< The total number of per-stage Metal buffers available for shader uniform content and attributes. */ uint32_t maxPerStageTextureCount; /**< The total number of per-stage Metal textures available for shader uniform content. */ diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdDraw.h b/MoltenVK/MoltenVK/Commands/MVKCmdDraw.h index dd1e4b73..7e585cdb 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCmdDraw.h +++ b/MoltenVK/MoltenVK/Commands/MVKCmdDraw.h @@ -21,6 +21,7 @@ #include "MVKCommand.h" #include "MVKMTLResourceBindings.h" #include "MVKVector.h" + #import class MVKDevice; diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h index bbef5174..5689133f 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h +++ b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h @@ -21,6 +21,7 @@ #include "MVKCommand.h" #include "MVKVector.h" #include + class MVKCommandBuffer; class MVKPipeline; class MVKPipelineLayout; diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm index 9e688a85..5f6fd4c0 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm @@ -159,13 +159,8 @@ void MVKCmdPushConstants::setContent(VkPipelineLayout layout, _stageFlags = stageFlags; _offset = offset; - _pushConstants.clear(); - for( uint32_t i=0; i( pValues)[i] ); - } - //_pushConstants.resize(size); - //copy_n((char*)pValues, size, _pushConstants.begin()); + _pushConstants.resize(size); + std::copy_n((char*)pValues, size, _pushConstants.begin()); } void MVKCmdPushConstants::encode(MVKCommandEncoder* cmdEncoder) { diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h index b2f975a7..d970f0e0 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h +++ b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h @@ -403,7 +403,7 @@ protected: uint32_t _renderSubpassIndex; VkRect2D _renderArea; MVKActivatedQueries* _pActivatedQueries; - std::vector _clearValues; + MVKVector _clearValues; id _mtlComputeEncoder; MVKCommandUse _mtlComputeEncoderUse; id _mtlBlitEncoder; diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm index c0d61a3a..d36819b6 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm @@ -225,13 +225,7 @@ void MVKCommandEncoder::beginRenderpass(VkSubpassContents subpassContents, _renderArea = renderArea; _isRenderingEntireAttachment = (mvkVkOffset2DsAreEqual(_renderArea.offset, {0,0}) && mvkVkExtent2DsAreEqual(_renderArea.extent, _framebuffer->getExtent2D())); - - _clearValues.clear(); - for( auto cv : *clearValues ) - { - _clearValues.push_back( cv ); - } - //_clearValues.assign(clearValues->begin(), clearValues->end()); + _clearValues.assign(clearValues->begin(), clearValues->end()); setSubpass(subpassContents, 0); } diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm index 718653f0..f50843fb 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm @@ -113,11 +113,7 @@ void MVKScissorCommandEncoderState::setScissors(const MVKVector void MVKScissorCommandEncoderState::encodeImpl() { MVKAssert(!_mtlScissors.empty(), "Must specify at least one scissor rect"); - MVKVector clippedScissors; - for ( const auto &scissor : _mtlScissors) - { - clippedScissors.emplace_back( scissor ); - } + auto clippedScissors(_mtlScissors); std::for_each(clippedScissors.begin(), clippedScissors.end(), [this](MTLScissorRect& scissor) { scissor = _cmdEncoder->clipToRenderArea(scissor); }); diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandPipelineStateFactoryShaderSource.h b/MoltenVK/MoltenVK/Commands/MVKCommandPipelineStateFactoryShaderSource.h index 9da1788d..46ac6b68 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandPipelineStateFactoryShaderSource.h +++ b/MoltenVK/MoltenVK/Commands/MVKCommandPipelineStateFactoryShaderSource.h @@ -27,15 +27,6 @@ static NSString* _MVKStaticCmdShaderSource = @" #include \n\ using namespace metal; \n\ \n\ -typedef struct { \n\ - float4 a_position [[attribute(0)]]; \n\ -} AttributesPos; \n\ - \n\ -typedef struct { \n\ - float4 v_position [[position]]; \n\ - uint layer%s; \n\ -} VaryingsPos; \n\ - \n\ typedef struct { \n\ float2 a_position [[attribute(0)]]; \n\ float2 a_texCoord [[attribute(1)]]; \n\ @@ -53,18 +44,6 @@ vertex VaryingsPosTex vtxCmdBlitImage(AttributesPosTex attributes [[stage_in]]) return varyings; \n\ } \n\ \n\ -typedef struct { \n\ - float4 colors[9]; \n\ -} ClearColorsIn; \n\ - \n\ -vertex VaryingsPos vtxCmdClearAttachments(AttributesPos attributes [[stage_in]], \n\ - constant ClearColorsIn& ccIn [[buffer(0)]]) { \n\ - VaryingsPos varyings; \n\ - varyings.v_position = float4(attributes.a_position.x, -attributes.a_position.y, ccIn.colors[8].r, 1.0); \n\ - varyings.layer = uint(attributes.a_position.w); \n\ - return varyings; \n\ -} \n\ - \n\ typedef struct { \n\ uint32_t srcOffset; \n\ uint32_t dstOffset; \n\ @@ -93,11 +72,3 @@ kernel void cmdFillBuffer(device uint32_t* dst [[ buffer(0) ]], \n\ "; -/** Returns MSL shader source code containing static functions to be used for various Vulkan commands. */ -static inline NSString* mvkStaticCmdShaderSource(MVKDevice* device) { - const char* rtaiStr = device->_pMetalFeatures->layeredRendering ? " [[render_target_array_index]]" : ""; - return [NSString stringWithFormat: _MVKStaticCmdShaderSource, rtaiStr]; -} - - - diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.h b/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.h index 67cfb6bb..274f2c36 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.h +++ b/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.h @@ -324,6 +324,7 @@ protected: void initMTLLibrary(); void initImageDeviceMemory(); id getBlitFragFunction(MVKRPSKeyBlitImg& blitKey); + id getClearVertFunction(MVKRPSKeyClearAtt& attKey); id getClearFragFunction(MVKRPSKeyClearAtt& attKey); NSString* getMTLFormatTypeString(MTLPixelFormat mtlPixFmt); id getFunctionNamed(const char* funcName); diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm b/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm index bfbe8e4e..91602a2c 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm @@ -87,7 +87,7 @@ id MVKCommandResourceFactory::newCmdBlitImageMTLSamplerState(MT id MVKCommandResourceFactory::newCmdClearMTLRenderPipelineState(MVKRPSKeyClearAtt& attKey) { MTLRenderPipelineDescriptor* plDesc = [[[MTLRenderPipelineDescriptor alloc] init] autorelease]; plDesc.label = @"CmdClearAttachments"; - plDesc.vertexFunction = getFunctionNamed("vtxCmdClearAttachments"); + plDesc.vertexFunction = getClearVertFunction(attKey); plDesc.fragmentFunction = getClearFragFunction(attKey); plDesc.sampleCount = attKey.mtlSampleCount; plDesc.inputPrimitiveTopologyMVK = MTLPrimitiveTopologyClassTriangle; @@ -176,6 +176,43 @@ id MVKCommandResourceFactory::getBlitFragFunction(MVKRPSKeyBlitImg& return [mtlFunc autorelease]; } +id MVKCommandResourceFactory::getClearVertFunction(MVKRPSKeyClearAtt& attKey) { + id mtlFunc = nil; + bool allowLayers = _device->_pMetalFeatures->layeredRendering && (attKey.mtlSampleCount == 1 || _device->_pMetalFeatures->multisampleArrayTextures); + @autoreleasepool { + NSMutableString* msl = [NSMutableString stringWithCapacity: (2 * KIBI) ]; + [msl appendLineMVK: @"#include "]; + [msl appendLineMVK: @"using namespace metal;"]; + [msl appendLineMVK]; + [msl appendLineMVK: @"typedef struct {"]; + [msl appendLineMVK: @" float4 a_position [[attribute(0)]];"]; + [msl appendLineMVK: @"} AttributesPos;"]; + [msl appendLineMVK]; + [msl appendLineMVK: @"typedef struct {"]; + [msl appendLineMVK: @" float4 colors[9];"]; + [msl appendLineMVK: @"} ClearColorsIn;"]; + [msl appendLineMVK]; + [msl appendLineMVK: @"typedef struct {"]; + [msl appendLineMVK: @" float4 v_position [[position]];"]; + [msl appendFormat: @" uint layer%s;", allowLayers ? " [[render_target_array_index]]" : ""]; + [msl appendLineMVK: @"} VaryingsPos;"]; + [msl appendLineMVK]; + + NSString* funcName = @"vertClear"; + [msl appendFormat: @"vertex VaryingsPos %@(AttributesPos attributes [[stage_in]], constant ClearColorsIn& ccIn [[buffer(0)]]) {", funcName]; + [msl appendLineMVK]; + [msl appendLineMVK: @" VaryingsPos varyings;"]; + [msl appendLineMVK: @" varyings.v_position = float4(attributes.a_position.x, -attributes.a_position.y, ccIn.colors[8].r, 1.0);"]; + [msl appendLineMVK: @" varyings.layer = uint(attributes.a_position.w);"]; + [msl appendLineMVK: @" return varyings;"]; + [msl appendLineMVK: @"}"]; + + mtlFunc = newMTLFunction(msl, funcName); +// MVKLogDebug("\n%s", msl.UTF8String); + } + return [mtlFunc autorelease]; +} + id MVKCommandResourceFactory::getClearFragFunction(MVKRPSKeyClearAtt& attKey) { id mtlFunc = nil; @autoreleasepool { @@ -368,7 +405,7 @@ void MVKCommandResourceFactory::initMTLLibrary() { uint64_t startTime = _device->getPerformanceTimestamp(); @autoreleasepool { NSError* err = nil; - _mtlLibrary = [getMTLDevice() newLibraryWithSource: mvkStaticCmdShaderSource(_device) + _mtlLibrary = [getMTLDevice() newLibraryWithSource: _MVKStaticCmdShaderSource options: getDevice()->getMTLCompileOptions() error: &err]; // retained MVKAssert( !err, "Could not compile command shaders %s (code %li) %s", err.localizedDescription.UTF8String, (long)err.code, err.localizedFailureReason.UTF8String); diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm index 384bff85..625dda8a 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm @@ -562,6 +562,7 @@ void MVKPhysicalDevice::initMetalFeatures() { _metalFeatures.dynamicMTLBuffers = true; _metalFeatures.shaderSpecialization = true; _metalFeatures.stencilViews = true; + _metalFeatures.samplerClampToBorder = true; _metalFeatures.maxMTLBufferSize = (1 * GIBI); } diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm index 5d3b1285..5c311299 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm @@ -931,6 +931,20 @@ MTLSamplerDescriptor* MVKSampler::getMTLSamplerDescriptor(const VkSamplerCreateI mtlSampDesc.compareFunctionMVK = (pCreateInfo->compareEnable ? mvkMTLCompareFunctionFromVkCompareOp(pCreateInfo->compareOp) : MTLCompareFunctionNever); +#if MVK_MACOS + mtlSampDesc.borderColorMVK = mvkMTLSamplerBorderColorFromVkBorderColor(pCreateInfo->borderColor); + if (_device->getPhysicalDevice()->getMetalFeatures()->samplerClampToBorder) { + if (pCreateInfo->addressModeU == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) { + mtlSampDesc.sAddressMode = MTLSamplerAddressModeClampToBorderColor; + } + if (pCreateInfo->addressModeV == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) { + mtlSampDesc.tAddressMode = MTLSamplerAddressModeClampToBorderColor; + } + if (pCreateInfo->addressModeW == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) { + mtlSampDesc.rAddressMode = MTLSamplerAddressModeClampToBorderColor; + } + } +#endif return [mtlSampDesc autorelease]; } diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm index 2851e1c4..49692a25 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm @@ -108,9 +108,6 @@ void MVKPipelineLayout::populateShaderConverterContext(SPIRVToMSLConverterContex kPushConstDescSet, kPushConstBinding); - _auxBufferIndex.vertex = _pushConstantsMTLResourceIndexes.vertexStage.bufferIndex + 1; - _auxBufferIndex.fragment = _pushConstantsMTLResourceIndexes.fragmentStage.bufferIndex + 1; - _auxBufferIndex.compute = _pushConstantsMTLResourceIndexes.computeStage.bufferIndex + 1; } MVKPipelineLayout::MVKPipelineLayout(MVKDevice* device, @@ -140,6 +137,11 @@ MVKPipelineLayout::MVKPipelineLayout(MVKDevice* device, for (uint32_t i = 0; i < pCreateInfo->pushConstantRangeCount; i++) { _pushConstants.push_back(pCreateInfo->pPushConstantRanges[i]); } + + // Set auxiliary buffer offsets + _auxBufferIndex.vertex = _pushConstantsMTLResourceIndexes.vertexStage.bufferIndex + 1; + _auxBufferIndex.fragment = _pushConstantsMTLResourceIndexes.fragmentStage.bufferIndex + 1; + _auxBufferIndex.compute = _pushConstantsMTLResourceIndexes.computeStage.bufferIndex + 1; } diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.h b/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.h index 74495507..34cbea6e 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.h @@ -19,6 +19,7 @@ #pragma once #include "MVKDevice.h" +#include "MVKVector.h" #include #import @@ -56,7 +57,7 @@ public: */ void populateMTLRenderPassDescriptor(MTLRenderPassDescriptor* mtlRPDesc, MVKFramebuffer* framebuffer, - std::vector& clearValues, + MVKVector& clearValues, bool isRenderingEntireAttachment); /** @@ -64,7 +65,7 @@ public: * when the render area is smaller than the full framebuffer size. */ void populateClearAttachments(std::vector& clearAtts, - std::vector& clearValues); + MVKVector& clearValues); /** Constructs an instance for the specified parent renderpass. */ MVKRenderSubpass(MVKRenderPass* renderPass, const VkSubpassDescription* pCreateInfo); diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm b/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm index 0a63201c..b695ba5a 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm @@ -67,7 +67,7 @@ VkSampleCountFlagBits MVKRenderSubpass::getSampleCount() { void MVKRenderSubpass::populateMTLRenderPassDescriptor(MTLRenderPassDescriptor* mtlRPDesc, MVKFramebuffer* framebuffer, - vector& clearValues, + MVKVector& clearValues, bool isRenderingEntireAttachment) { // Populate the Metal color attachments uint32_t caCnt = getColorAttachmentCount(); @@ -157,7 +157,7 @@ void MVKRenderSubpass::populateMTLRenderPassDescriptor(MTLRenderPassDescriptor* } void MVKRenderSubpass::populateClearAttachments(vector& clearAtts, - vector& clearValues) { + MVKVector& clearValues) { VkClearAttachment cAtt; uint32_t attIdx; diff --git a/MoltenVK/MoltenVK/OS/MTLSamplerDescriptor+MoltenVK.h b/MoltenVK/MoltenVK/OS/MTLSamplerDescriptor+MoltenVK.h index ee55dcd8..01feefea 100644 --- a/MoltenVK/MoltenVK/OS/MTLSamplerDescriptor+MoltenVK.h +++ b/MoltenVK/MoltenVK/OS/MTLSamplerDescriptor+MoltenVK.h @@ -32,4 +32,13 @@ */ @property(nonatomic, readwrite) MTLCompareFunction compareFunctionMVK; +/** + * Replacement for the borderColor property. + * + * This property allows support under all OS versions. Delegates to the borderColor + * property if it is available. otherwise, returns MTLSamplerBorderColorTransparentBlack when read and + * does nothing when set. + */ +@property(nonatomic, readwrite) /*MTLSamplerBorderColor*/ NSUInteger borderColorMVK; + @end diff --git a/MoltenVK/MoltenVK/OS/MTLSamplerDescriptor+MoltenVK.m b/MoltenVK/MoltenVK/OS/MTLSamplerDescriptor+MoltenVK.m index 87c6ea08..4f02de6d 100644 --- a/MoltenVK/MoltenVK/OS/MTLSamplerDescriptor+MoltenVK.m +++ b/MoltenVK/MoltenVK/OS/MTLSamplerDescriptor+MoltenVK.m @@ -30,4 +30,17 @@ if ( [self respondsToSelector: @selector(setCompareFunction:)] ) { self.compareFunction = cmpFunc; } } +-(NSUInteger) borderColorMVK { +#if MVK_MACOS + if ( [self respondsToSelector: @selector(borderColor)] ) { return self.borderColor; } +#endif + return /*MTLSamplerBorderColorTransparentBlack*/ 0; +} + +-(void) setBorderColorMVK: (NSUInteger) color { +#if MVK_MACOS + if ( [self respondsToSelector: @selector(setBorderColor:)] ) { self.borderColor = (MTLSamplerBorderColor) color; } +#endif +} + @end diff --git a/MoltenVK/MoltenVK/Utility/MVKVector.h b/MoltenVK/MoltenVK/Utility/MVKVector.h index 93afc744..df0b8a99 100755 --- a/MoltenVK/MoltenVK/Utility/MVKVector.h +++ b/MoltenVK/MoltenVK/Utility/MVKVector.h @@ -18,6 +18,16 @@ #pragma once +// +// in case MVKVector should use std::vector +// +#if 0 + +template +using MVKVector = std::vector; + +#else + // // a simple std::vector like container with a configurable extra stack space // this class supports just the necessary members to be compatible with MoltenVK @@ -55,7 +65,12 @@ public: return &vector->alc.ptr[index]; } - operator Type*( ) const + Type &operator*() const + { + return vector->alc.ptr[index]; + } + + operator Type*() const { return &vector->alc.ptr[index]; } @@ -91,11 +106,16 @@ public: return &vector->alc.ptr[index]; } - operator Type*( ) const + Type &operator*() const + { + return vector->alc.ptr[index]; + } + + operator Type*() const { return &vector->alc.ptr[index]; } - + bool operator==( const reverse_iterator &it ) const { return vector == it.vector && index == it.index; @@ -216,7 +236,7 @@ public: } else { - alc.destruct_all(); + alc.template destruct_all(); } for( size_t i = 0; i < n; ++i ) @@ -369,6 +389,18 @@ public: alc.num_elements_used = new_size; } + template + void assign( InputIterator first, InputIterator last ) + { + clear(); + + while( first != last ) + { + emplace_back( *first ); + ++first; + } + } + void resize( const size_t new_size, const Type t = { } ) { if( new_size == alc.num_elements_used ) @@ -492,5 +524,5 @@ public: } }; - +#endif diff --git a/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h b/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h index aaa59d29..442e0aca 100755 --- a/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h +++ b/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h @@ -74,7 +74,6 @@ public: { } - template typename std::enable_if< !std::is_trivially_destructible::value >::type destruct_all() { diff --git a/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm b/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm index d6505385..b844848a 100644 --- a/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm +++ b/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm @@ -941,6 +941,20 @@ MVK_PUBLIC_SYMBOL MTLSamplerAddressMode mvkMTLSamplerAddressModeFromVkSamplerAdd } } +#if MVK_MACOS +MVK_PUBLIC_SYMBOL MTLSamplerBorderColor mvkMTLSamplerBorderColorFromVkBorderColor(VkBorderColor vkColor) { + switch (vkColor) { + case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK: return MTLSamplerBorderColorTransparentBlack; + case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK: return MTLSamplerBorderColorTransparentBlack; + case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK: return MTLSamplerBorderColorOpaqueBlack; + case VK_BORDER_COLOR_INT_OPAQUE_BLACK: return MTLSamplerBorderColorOpaqueBlack; + case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE: return MTLSamplerBorderColorOpaqueWhite; + case VK_BORDER_COLOR_INT_OPAQUE_WHITE: return MTLSamplerBorderColorOpaqueWhite; + default: return MTLSamplerBorderColorTransparentBlack; + } +} +#endif + MVK_PUBLIC_SYMBOL MTLSamplerMinMagFilter mvkMTLSamplerMinMagFilterFromVkFilter(VkFilter vkFilter) { switch (vkFilter) { case VK_FILTER_NEAREST: return MTLSamplerMinMagFilterNearest; From ffc74f3673d5c3fd2168ea07e2c3739e90d587fb Mon Sep 17 00:00:00 2001 From: aerofly Date: Sat, 15 Dec 2018 18:21:13 +0100 Subject: [PATCH 12/14] Update SPIRVToMSLConverter.cpp --- .../MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp b/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp index dfe09659..ee6be256 100644 --- a/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp +++ b/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp @@ -237,7 +237,7 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConverterContext& mslOpts.aux_buffer_index = context.options.auxBufferIndex; mslOpts.enable_point_size_builtin = context.options.isRenderingPoints; mslOpts.disable_rasterization = context.options.isRasterizationDisabled; - mslOpts.swizzle_texture_samples = false; + mslOpts.swizzle_texture_samples = true; pMSLCompiler->set_msl_options(mslOpts); auto scOpts = pMSLCompiler->get_common_options(); From ef21f2488a2a21f0373ce8db6d7a97b2d15865b7 Mon Sep 17 00:00:00 2001 From: aerofly Date: Sat, 15 Dec 2018 18:36:07 +0100 Subject: [PATCH 13/14] Merge fork with master MoltenVK again --- MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm | 2 +- .../Commands/MVKCommandEncodingPool.mm | 2 +- .../MoltenVK/GPUObjects/MVKDescriptorSet.h | 1 + .../MoltenVK/GPUObjects/MVKDescriptorSet.mm | 15 +++++++++++---- MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm | 19 +++++++++++-------- MoltenVK/MoltenVK/GPUObjects/MVKImage.mm | 4 ++++ MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h | 4 ++-- MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm | 7 +++++++ 8 files changed, 38 insertions(+), 16 deletions(-) diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm index 5f6fd4c0..01f141c9 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm @@ -160,7 +160,7 @@ void MVKCmdPushConstants::setContent(VkPipelineLayout layout, _offset = offset; _pushConstants.resize(size); - std::copy_n((char*)pValues, size, _pushConstants.begin()); + std::copy_n((char*)pValues, size, _pushConstants.begin()); } void MVKCmdPushConstants::encode(MVKCommandEncoder* cmdEncoder) { diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncodingPool.mm b/MoltenVK/MoltenVK/Commands/MVKCommandEncodingPool.mm index b1d94620..f3447d43 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandEncodingPool.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncodingPool.mm @@ -132,7 +132,7 @@ void MVKCommandEncodingPool::destroyMetalResources() { for (auto& pair : _mtlDepthStencilStates) { [pair.second release]; } _mtlDepthStencilStates.clear(); - for (auto& pair : _transferImages) { pair.second->destroy(); } + for (auto& pair : _transferImages) { _device->destroyImage(pair.second, nullptr); } _transferImages.clear(); [_cmdBlitImageLinearMTLSamplerState release]; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h index 703e3fd8..47ce94a8 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h @@ -167,6 +167,7 @@ protected: friend class MVKDescriptorSet; MVKVector _bindings; + std::unordered_map _bindingToIndex; MVKShaderResourceBinding _mtlResourceCounts; bool _isPushDescriptorLayout : 1; }; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm index 7890672f..0cd71f3d 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm @@ -547,19 +547,22 @@ void MVKDescriptorSetLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder, if (!_isPushDescriptorLayout) return; for (const VkWriteDescriptorSet& descWrite : descriptorWrites) { - uint32_t bindIdx = descWrite.dstBinding; + uint32_t dstBinding = descWrite.dstBinding; uint32_t dstArrayElement = descWrite.dstArrayElement; uint32_t descriptorCount = descWrite.descriptorCount; const VkDescriptorImageInfo* pImageInfo = descWrite.pImageInfo; const VkDescriptorBufferInfo* pBufferInfo = descWrite.pBufferInfo; const VkBufferView* pTexelBufferView = descWrite.pTexelBufferView; + if (!_bindingToIndex.count(dstBinding)) continue; // Note: This will result in us walking off the end of the array // in case there are too many updates... but that's ill-defined anyway. - for (; descriptorCount; bindIdx++) { + for (; descriptorCount; dstBinding++) { + if (!_bindingToIndex.count(dstBinding)) continue; size_t stride; const void* pData = getWriteParameters(descWrite.descriptorType, pImageInfo, pBufferInfo, pTexelBufferView, stride); uint32_t descriptorsPushed = 0; + uint32_t bindIdx = _bindingToIndex[dstBinding]; _bindings[bindIdx].push(cmdEncoder, dstArrayElement, descriptorCount, descriptorsPushed, descWrite.descriptorType, stride, pData, dslMTLRezIdxOffsets); @@ -580,14 +583,17 @@ void MVKDescriptorSetLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder, return; for (uint32_t i = 0; i < descUpdateTemplate->getNumberOfEntries(); i++) { const VkDescriptorUpdateTemplateEntryKHR* pEntry = descUpdateTemplate->getEntry(i); - uint32_t bindIdx = pEntry->dstBinding; + uint32_t dstBinding = pEntry->dstBinding; uint32_t dstArrayElement = pEntry->dstArrayElement; uint32_t descriptorCount = pEntry->descriptorCount; const void* pCurData = (const char*)pData + pEntry->offset; + if (!_bindingToIndex.count(dstBinding)) continue; // Note: This will result in us walking off the end of the array // in case there are too many updates... but that's ill-defined anyway. - for (; descriptorCount; bindIdx++) { + for (; descriptorCount; dstBinding++) { + if (!_bindingToIndex.count(dstBinding)) continue; uint32_t descriptorsPushed = 0; + uint32_t bindIdx = _bindingToIndex[dstBinding]; _bindings[bindIdx].push(cmdEncoder, dstArrayElement, descriptorCount, descriptorsPushed, pEntry->descriptorType, pEntry->stride, pCurData, dslMTLRezIdxOffsets); @@ -612,6 +618,7 @@ MVKDescriptorSetLayout::MVKDescriptorSetLayout(MVKDevice* device, _bindings.reserve(pCreateInfo->bindingCount); for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) { _bindings.emplace_back(this, &pCreateInfo->pBindings[i]); + _bindingToIndex[pCreateInfo->pBindings[i].binding] = i; setConfigurationResult(_bindings.back().getConfigurationResult()); } } diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm index 625dda8a..eca29db2 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm @@ -208,8 +208,9 @@ VkResult MVKPhysicalDevice::getImageFormatProperties(VkFormat format, if (tiling == VK_IMAGE_TILING_LINEAR) { return VK_ERROR_FORMAT_NOT_SUPPORTED; } - // Metal does not allow compressed formats on 1D textures - if (mvkFormatTypeFromVkFormat(format) == kMVKFormatNone) { + // Metal does not allow compressed or depth/stencil formats on 1D textures + if (mvkFormatTypeFromVkFormat(format) == kMVKFormatDepthStencil || + mvkFormatTypeFromVkFormat(format) == kMVKFormatNone) { return VK_ERROR_FORMAT_NOT_SUPPORTED; } maxExt.width = pLimits->maxImageDimension1D; @@ -255,10 +256,11 @@ VkResult MVKPhysicalDevice::getImageFormatProperties(VkFormat format, if (tiling == VK_IMAGE_TILING_LINEAR) { return VK_ERROR_FORMAT_NOT_SUPPORTED; } - // Metal does not allow compressed formats on 3D textures - if (mvkFormatTypeFromVkFormat(format) == kMVKFormatNone) { - return VK_ERROR_FORMAT_NOT_SUPPORTED; - } + // Metal does not allow compressed or depth/stencil formats on 3D textures + if (mvkFormatTypeFromVkFormat(format) == kMVKFormatDepthStencil || + mvkFormatTypeFromVkFormat(format) == kMVKFormatNone) { + return VK_ERROR_FORMAT_NOT_SUPPORTED; + } maxExt.width = pLimits->maxImageDimension3D; maxExt.height = pLimits->maxImageDimension3D; maxExt.depth = pLimits->maxImageDimension3D; @@ -270,8 +272,9 @@ VkResult MVKPhysicalDevice::getImageFormatProperties(VkFormat format, if (tiling == VK_IMAGE_TILING_LINEAR) { return VK_ERROR_FORMAT_NOT_SUPPORTED; } - // Metal does not allow compressed formats on anything but 2D textures - if (mvkFormatTypeFromVkFormat(format) == kMVKFormatNone) { + // Metal does not allow compressed or depth/stencil formats on anything but 2D textures + if (mvkFormatTypeFromVkFormat(format) == kMVKFormatDepthStencil || + mvkFormatTypeFromVkFormat(format) == kMVKFormatNone) { return VK_ERROR_FORMAT_NOT_SUPPORTED; } maxExt = { 1, 1, 1}; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm index 5c311299..6c2db246 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm @@ -21,6 +21,7 @@ #include "MVKCommandBuffer.h" #include "mvk_datatypes.h" #include "MVKFoundation.h" +#include "MVKEnvironment.h" #include "MVKLogging.h" #import "MTLTextureDescriptor+MoltenVK.h" #import "MTLSamplerDescriptor+MoltenVK.h" @@ -501,6 +502,9 @@ MVKImage::MVKImage(MVKDevice* device, const VkImageCreateInfo* pCreateInfo) : MV if ( (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) && (mvkFormatTypeFromVkFormat(pCreateInfo->format) == kMVKFormatNone) ) { setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : Under Metal, compressed formats may only be used with 2D images.")); } + if ( (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) && (mvkFormatTypeFromVkFormat(pCreateInfo->format) == kMVKFormatDepthStencil) ) { + setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : Under Metal, depth/stencil formats may only be used with 2D images.")); + } // Adjust the info components to be compatible with Metal, then use the modified versions // to set other config info. Vulkan allows unused extent dimensions to be zero, but Metal diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h index 9158e6c0..47618250 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h @@ -158,8 +158,8 @@ protected: VkPipelineRasterizationStateCreateInfo _rasterInfo; VkPipelineDepthStencilStateCreateInfo _depthStencilInfo; - MVKVector _mtlViewports; - MVKVector _mtlScissors; + MVKVector _mtlViewports; + MVKVector _mtlScissors; id _mtlPipelineState; MTLCullMode _mtlCullMode; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm index 49692a25..74c496bc 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm @@ -388,6 +388,13 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor uint32_t vbIdx = _device->getMetalBufferIndexForVertexAttributeBinding(pVKVB->binding); if (shaderContext.isVertexBufferUsed(vbIdx)) { MTLVertexBufferLayoutDescriptor* vbDesc = plDesc.vertexDescriptor.layouts[vbIdx]; + // Vulkan allows any stride, but Metal only allows multiples of 4. + // TODO: We should try to expand the buffer to the required alignment + // in that case. + if ((pVKVB->stride % 4) != 0) { + setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_INITIALIZATION_FAILED, "Under Metal, vertex buffer strides must be aligned to four bytes.")); + return nil; + } vbDesc.stride = (pVKVB->stride == 0) ? sizeof(simd::float4) : pVKVB->stride; // Vulkan allows zero stride but Metal doesn't. Default to float4 vbDesc.stepFunction = mvkMTLVertexStepFunctionFromVkVertexInputRate(pVKVB->inputRate); vbDesc.stepRate = 1; From febeb9d73802f2f0016d8ce8d1c20439e3a8dc94 Mon Sep 17 00:00:00 2001 From: aerofly Date: Sat, 15 Dec 2018 18:59:58 +0100 Subject: [PATCH 14/14] Merge fork with master MoltenVK again --- MoltenVK/MoltenVK/GPUObjects/MVKQueue.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h index 8680d65b..d3e6a2f8 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h @@ -206,7 +206,7 @@ protected: void finish(); MVKVector _cmdBuffers; - MVKVector _signalSemaphores; + MVKVector _signalSemaphores; MVKFence* _fence; MVKCommandUse _cmdBuffUse; id _activeMTLCommandBuffer;