diff --git a/Docs/Whats_New.md b/Docs/Whats_New.md index 250455a3..7f7015d3 100644 --- a/Docs/Whats_New.md +++ b/Docs/Whats_New.md @@ -16,8 +16,11 @@ Copyright (c) 2015-2021 [The Brenwill Workshop Ltd.](http://www.brenwill.com) MoltenVK 1.1.3 -------------- -Released TBD +Released 2021/04/26 +- Add beta support for using Metal argument buffers for shader resources on _macOS_, by setting + `MVK_CONFIG_USE_METAL_ARGUMENT_BUFFERS` environment variable (disabled by default). + Available on _macOS 10.16 (Big Sur)_ or later, and on earlier _macOS_ versions on _Intel_ GPU's. - Add support for `HDR10` colorspace via `VK_COLOR_SPACE_HDR10_HLG_EXT` and `VK_COLOR_SPACE_HDR10_ST2084_EXT`. - Always explicitly set `CAMetalLayer` colorspace property based on _Vulkan_ parameters, and don't rely on _Metal_ default values. diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm index 00a2f450..f0ab0ea5 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm @@ -498,12 +498,12 @@ void MVKResourcesCommandEncoderState::encodeMetalArgumentBuffer(MVKShaderStage s auto* descSet = _boundDescriptorSets[dsIdx]; if ( !descSet ) { continue; } - auto* dsLayout = descSet->getLayout(); - id mtlArgEncoder = nil; id mtlArgBuffer = nil; NSUInteger metalArgBufferOffset = 0; - if (_cmdEncoder->isUsingDescriptorSetMetalArgumentBuffers()) { + + auto* dsLayout = descSet->getLayout(); + if (dsLayout->isUsingDescriptorSetMetalArgumentBuffers()) { mtlArgEncoder = dsLayout->getMTLArgumentEncoder().getMTLArgumentEncoder(); mtlArgBuffer = descSet->getMetalArgumentBuffer(); metalArgBufferOffset = descSet->getMetalArgumentBufferOffset(); @@ -518,13 +518,14 @@ void MVKResourcesCommandEncoderState::encodeMetalArgumentBuffer(MVKShaderStage s auto& argBuffDirtyDescs = descSet->getMetalArgumentBufferDirtyDescriptors(); auto& resourceUsageDirtyDescs = _metalUsageDirtyDescriptors[dsIdx]; + auto& shaderBindingUsage = pipeline->getDescriptorBindingUse(dsIdx, stage); bool mtlArgEncAttached = false; bool shouldBindArgBuffToStage = false; uint32_t dslBindCnt = dsLayout->getBindingCount(); for (uint32_t dslBindIdx = 0; dslBindIdx < dslBindCnt; dslBindIdx++) { auto* dslBind = dsLayout->getBindingAt(dslBindIdx); - if (dslBind->getApplyToStage(stage)) { + if (dslBind->getApplyToStage(stage) && shaderBindingUsage.getBit(dslBindIdx)) { shouldBindArgBuffToStage = true; uint32_t elemCnt = dslBind->getDescriptorCount(descSet); for (uint32_t elemIdx = 0; elemIdx < elemCnt; elemIdx++) { diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h index 0af15ca1..c8ec7671 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h @@ -36,18 +36,18 @@ class MVKResourcesCommandEncoderState; /** Holds and manages the lifecycle of a MTLArgumentEncoder. The encoder can only be set once. */ struct MVKMTLArgumentEncoder { + NSUInteger mtlArgumentEncoderSize = 0; + id getMTLArgumentEncoder() { return _mtlArgumentEncoder; } - NSUInteger getMTLArgumentEncoderSize() { return _mtlArgumentEncoderSize; } void init(id mtlArgEnc) { if (_mtlArgumentEncoder) { return; } _mtlArgumentEncoder = mtlArgEnc; // takes ownership - _mtlArgumentEncoderSize = mtlArgEnc.encodedLength; + mtlArgumentEncoderSize = mtlArgEnc.encodedLength; } ~MVKMTLArgumentEncoder() { [_mtlArgumentEncoder release]; } private: id _mtlArgumentEncoder = nil; - NSUInteger _mtlArgumentEncoderSize = 0; }; /** Represents a Vulkan descriptor set layout. */ @@ -86,7 +86,16 @@ public: /** Populates the specified shader converter context, at the specified DSL index. */ void populateShaderConverterContext(mvk::SPIRVToMSLConversionConfiguration& context, MVKShaderResourceBinding& dslMTLRezIdxOffsets, - uint32_t dslIndex); + uint32_t descSetIndex); + + /** + * Populates the bindings in this descriptor set layout used by the shader. + * Returns false if the shader does not use the descriptor set at all. + */ + bool populateBindingUse(MVKBitArray& bindingUse, + mvk::SPIRVToMSLConversionConfiguration& context, + MVKShaderStage stage, + uint32_t descSetIndex); /** Returns the number of bindings. */ uint32_t getBindingCount() { return (uint32_t)_bindings.size(); } diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm index 8afe563b..929c5a45 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm @@ -171,18 +171,43 @@ void MVKDescriptorSetLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder, void MVKDescriptorSetLayout::populateShaderConverterContext(mvk::SPIRVToMSLConversionConfiguration& context, MVKShaderResourceBinding& dslMTLRezIdxOffsets, - uint32_t dslIndex) { + uint32_t descSetIndex) { uint32_t bindCnt = (uint32_t)_bindings.size(); for (uint32_t bindIdx = 0; bindIdx < bindCnt; bindIdx++) { - _bindings[bindIdx].populateShaderConverterContext(context, dslMTLRezIdxOffsets, dslIndex); + _bindings[bindIdx].populateShaderConverterContext(context, dslMTLRezIdxOffsets, descSetIndex); } // Mark if Metal argument buffers are in use, but this descriptor set layout is not using them. if (isUsingMetalArgumentBuffers() && !isUsingMetalArgumentBuffer()) { - context.discreteDescriptorSets.push_back(dslIndex); + context.discreteDescriptorSets.push_back(descSetIndex); } } +bool MVKDescriptorSetLayout::populateBindingUse(MVKBitArray& bindingUse, + SPIRVToMSLConversionConfiguration& context, + MVKShaderStage stage, + uint32_t descSetIndex) { + static const spv::ExecutionModel svpExecModels[] = { + spv::ExecutionModelVertex, + spv::ExecutionModelTessellationControl, + spv::ExecutionModelTessellationEvaluation, + spv::ExecutionModelFragment, + spv::ExecutionModelGLCompute + }; + + bool descSetIsUsed = false; + uint32_t bindCnt = (uint32_t)_bindings.size(); + bindingUse.resize(bindCnt); + for (uint32_t bindIdx = 0; bindIdx < bindCnt; bindIdx++) { + auto& dslBind = _bindings[bindIdx]; + if (context.isResourceUsed(svpExecModels[stage], descSetIndex, dslBind.getBinding())) { + bindingUse.setBit(bindIdx); + descSetIsUsed = true; + } + } + return descSetIsUsed; +} + MVKDescriptorSetLayout::MVKDescriptorSetLayout(MVKDevice* device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo) : MVKVulkanAPIDeviceObject(device) { @@ -460,7 +485,7 @@ VkResult MVKDescriptorPool::allocateDescriptorSet(MVKDescriptorSetLayout* mvkDSL uint32_t variableDescriptorCount, VkDescriptorSet* pVKDS) { VkResult rslt = VK_ERROR_OUT_OF_POOL_MEMORY; - NSUInteger mtlArgBuffAllocSize = mvkDSL->getMTLArgumentEncoder().getMTLArgumentEncoderSize(); + NSUInteger mtlArgBuffAllocSize = mvkDSL->getMTLArgumentEncoder().mtlArgumentEncoderSize; NSUInteger mtlArgBuffAlignedSize = mvkAlignByteCount(mtlArgBuffAllocSize, getDevice()->_pMetalFeatures->mtlBufferAlignment); diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h index 36ef9b17..913ae015 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h @@ -109,7 +109,10 @@ public: uint32_t getDescriptorSetCount() { return (uint32_t)_descriptorSetLayouts.size(); } /** Returns the number of descriptors in the descriptor set layout. */ - uint32_t getDescriptorCount(uint32_t descSetIndex) { return _descriptorSetLayouts[descSetIndex]->getDescriptorCount(); } + uint32_t getDescriptorCount(uint32_t descSetIndex) { return getDescriptorSetLayout(descSetIndex)->getDescriptorCount(); } + + /** Returns the descriptor set layout. */ + MVKDescriptorSetLayout* getDescriptorSetLayout(uint32_t descSetIndex) { return _descriptorSetLayouts[descSetIndex]; } /** Returns the push constant binding info. */ const MVKShaderResourceBinding& getPushConstantBindings() { return _pushConstantsMTLResourceIndexes; } @@ -179,6 +182,9 @@ public: /** Returns the MTLArgumentEncoder for the descriptor set. */ virtual MVKMTLArgumentEncoder& getMTLArgumentEncoder(uint32_t descSetIndex, MVKShaderStage stage) = 0; + /** Returns the array of descriptor binding use for the descriptor set. */ + virtual MVKBitArray& getDescriptorBindingUse(uint32_t descSetIndex, MVKShaderStage stage) = 0; + /** Returns the number of descriptor sets in this pipeline layout. */ uint32_t getDescriptorSetCount() { return _descriptorSetCount; } @@ -190,6 +196,10 @@ public: protected: void propagateDebugName() override {} + template void addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc, + const CreateInfo* pCreateInfo, + SPIRVToMSLConversionConfiguration& context, + MVKShaderStage stage); MVKPipelineCache* _pipelineCache; MVKShaderImplicitRezBinding _swizzleBufferIndex; @@ -220,7 +230,11 @@ typedef std::pair MVKZeroDivisorVertexBinding; typedef MVKSmallVector MVKPiplineStages; struct MVKStagedMTLArgumentEncoders { - MVKMTLArgumentEncoder stages[4]; + MVKMTLArgumentEncoder stages[4] = {}; +}; + +struct MVKStagedDescriptorBindingUse { + MVKBitArray stages[4] = {}; }; /** The number of dynamic states possible in Vulkan. */ @@ -291,6 +305,9 @@ public: /** Returns the MTLArgumentEncoder for the descriptor set. */ MVKMTLArgumentEncoder& getMTLArgumentEncoder(uint32_t descSetIndex, MVKShaderStage stage) override { return _mtlArgumentEncoders[descSetIndex].stages[stage]; } + /** Returns the array of descriptor binding use for the descriptor set. */ + MVKBitArray& getDescriptorBindingUse(uint32_t descSetIndex, MVKShaderStage stage) override { return _descriptorBindingUse[descSetIndex].stages[stage]; } + /** Constructs an instance for the device and parent (which may be NULL). */ MVKGraphicsPipeline(MVKDevice* device, MVKPipelineCache* pipelineCache, @@ -326,7 +343,6 @@ protected: bool isRasterizationDisabled(const VkGraphicsPipelineCreateInfo* pCreateInfo); bool verifyImplicitBuffer(bool needsBuffer, MVKShaderImplicitRezBinding& index, MVKShaderStage stage, const char* name, uint32_t reservedBuffers); uint32_t getTranslatedVertexBinding(uint32_t binding, uint32_t translationOffset, uint32_t maxBinding); - void addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc, MVKShaderStage stage); const VkPipelineShaderStageCreateInfo* _pVertexSS = nullptr; const VkPipelineShaderStageCreateInfo* _pTessCtlSS = nullptr; @@ -342,6 +358,7 @@ protected: MVKSmallVector _translatedVertexBindings; MVKSmallVector _zeroDivisorVertexBindings; MVKSmallVector _mtlArgumentEncoders; + MVKSmallVector _descriptorBindingUse; MTLComputePipelineDescriptor* _mtlTessVertexStageDesc = nil; id _mtlTessVertexFunctions[3] = {nil, nil, nil}; @@ -404,6 +421,9 @@ public: /** Returns the MTLArgumentEncoder for the descriptor set. */ MVKMTLArgumentEncoder& getMTLArgumentEncoder(uint32_t descSetIndex, MVKShaderStage stage) override { return _mtlArgumentEncoders[descSetIndex]; } + /** Returns the array of descriptor binding use for the descriptor set. */ + MVKBitArray& getDescriptorBindingUse(uint32_t descSetIndex, MVKShaderStage stage) override { return _descriptorBindingUse[descSetIndex]; } + /** Constructs an instance for the device and parent (which may be NULL). */ MVKComputePipeline(MVKDevice* device, MVKPipelineCache* pipelineCache, @@ -414,10 +434,10 @@ public: protected: MVKMTLFunction getMTLFunction(const VkComputePipelineCreateInfo* pCreateInfo); - void addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc); id _mtlPipelineState; MVKSmallVector _mtlArgumentEncoders; + MVKSmallVector _descriptorBindingUse; MTLSize _mtlThreadgroupSize; bool _needsSwizzleBuffer = false; bool _needsBufferSizeBuffer = false; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm index c6a2ab3c..eedc8cba 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm @@ -182,6 +182,26 @@ void MVKPipeline::bindPushConstants(MVKCommandEncoder* cmdEncoder) { } } +// For each descriptor set, populate the descriptor bindings used by the shader for this stage, +// and if Metal argument encoders must be dedicated to a pipeline stage, create the encoder here. +template +void MVKPipeline::addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc, + const CreateInfo* pCreateInfo, + SPIRVToMSLConversionConfiguration& context, + MVKShaderStage stage) { + if ( !isUsingMetalArgumentBuffers() ) { return; } + + bool needMTLArgEnc = isUsingPipelineStageMetalArgumentBuffers(); + auto mtlFunc = mvkMTLFunc.getMTLFunction(); + for (uint32_t dsIdx = 0; dsIdx < _descriptorSetCount; dsIdx++) { + auto* dsLayout = ((MVKPipelineLayout*)pCreateInfo->layout)->getDescriptorSetLayout(dsIdx); + bool descSetIsUsed = dsLayout->populateBindingUse(getDescriptorBindingUse(dsIdx, stage), context, stage, dsIdx); + if (descSetIsUsed && needMTLArgEnc) { + getMTLArgumentEncoder(dsIdx, stage).init([mtlFunc newArgumentEncoderWithBufferIndex: dsIdx]); + } + } +} + MVKPipeline::MVKPipeline(MVKDevice* device, MVKPipelineCache* pipelineCache, MVKPipelineLayout* layout, MVKPipeline* parent) : MVKVulkanAPIDeviceObject(device), _pipelineCache(pipelineCache), @@ -488,6 +508,7 @@ void MVKGraphicsPipeline::initMTLRenderPipelineState(const VkGraphicsPipelineCre _mtlTessVertexStageDesc = nil; for (uint32_t i = 0; i < 3; i++) { _mtlTessVertexFunctions[i] = nil; } + if (isUsingMetalArgumentBuffers()) { _descriptorBindingUse.resize(_descriptorSetCount); } if (isUsingPipelineStageMetalArgumentBuffers()) { _mtlArgumentEncoders.resize(_descriptorSetCount); } if (!isTessellationPipeline()) { @@ -918,7 +939,7 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLRenderPipelineDescriptor* _needsVertexViewRangeBuffer = funcRslts.needsViewRangeBuffer; _needsVertexOutputBuffer = funcRslts.needsOutputBuffer; - addMTLArgumentEncoders(func, kMVKShaderStageVertex); + addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageVertex); if (funcRslts.isRasterizationDisabled) { _pFragmentSS = nullptr; @@ -990,7 +1011,7 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLComputePipelineDescriptor _needsVertexOutputBuffer = funcRslts.needsOutputBuffer; } - addMTLArgumentEncoders(func, kMVKShaderStageVertex); + addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageVertex); // If we need the swizzle buffer and there's no place to put it, we're in serious trouble. if (!verifyImplicitBuffer(_needsVertexSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageVertex, "swizzle", vbCnt)) { @@ -1049,7 +1070,7 @@ bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescripto _needsTessCtlPatchOutputBuffer = funcRslts.needsPatchOutputBuffer; _needsTessCtlInputBuffer = funcRslts.needsInputThreadgroupMem; - addMTLArgumentEncoders(func, kMVKShaderStageTessCtl); + addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageTessCtl); if (!verifyImplicitBuffer(_needsTessCtlSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageTessCtl, "swizzle", kMVKTessCtlNumReservedBuffers)) { return false; @@ -1105,7 +1126,7 @@ bool MVKGraphicsPipeline::addTessEvalShaderToPipeline(MTLRenderPipelineDescripto _needsTessEvalBufferSizeBuffer = funcRslts.needsBufferSizeBuffer; _needsTessEvalDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer; - addMTLArgumentEncoders(func, kMVKShaderStageTessEval); + addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageTessEval); if (funcRslts.isRasterizationDisabled) { _pFragmentSS = nullptr; @@ -1161,7 +1182,7 @@ bool MVKGraphicsPipeline::addFragmentShaderToPipeline(MTLRenderPipelineDescripto _needsFragmentDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer; _needsFragmentViewRangeBuffer = funcRslts.needsViewRangeBuffer; - addMTLArgumentEncoders(func, kMVKShaderStageFragment); + addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageFragment); if (!verifyImplicitBuffer(_needsFragmentSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageFragment, "swizzle", 0)) { return false; @@ -1664,17 +1685,6 @@ bool MVKGraphicsPipeline::isRasterizationDisabled(const VkGraphicsPipelineCreate (mvkMTLPrimitiveTopologyClassFromVkPrimitiveTopology(pCreateInfo->pInputAssemblyState->topology) == MTLPrimitiveTopologyClassTriangle)))); } -void MVKGraphicsPipeline::addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc, MVKShaderStage stage) { - if ( !isUsingPipelineStageMetalArgumentBuffers() ) { return; } - - auto mtlFunc = mvkMTLFunc.getMTLFunction(); - for (uint32_t dsIdx = 0; dsIdx < _descriptorSetCount; dsIdx++) { - if (mvkMTLFunc.shaderConversionResults.isDescriptorSetUsed(dsIdx)) { - _mtlArgumentEncoders[dsIdx].stages[stage].init([mtlFunc newArgumentEncoderWithBufferIndex: dsIdx]); - } - } -} - MVKGraphicsPipeline::~MVKGraphicsPipeline() { @synchronized (getMTLDevice()) { [_mtlTessVertexStageDesc release]; @@ -1715,13 +1725,11 @@ MVKComputePipeline::MVKComputePipeline(MVKDevice* device, _mtlThreadgroupSize = func.threadGroupSize; _mtlPipelineState = nil; + if (isUsingMetalArgumentBuffers()) { _descriptorBindingUse.resize(_descriptorSetCount); } if (isUsingPipelineStageMetalArgumentBuffers()) { _mtlArgumentEncoders.resize(_descriptorSetCount); } id mtlFunc = func.getMTLFunction(); if (mtlFunc) { - - addMTLArgumentEncoders(func); - MTLComputePipelineDescriptor* plDesc = [MTLComputePipelineDescriptor new]; // temp retain plDesc.computeFunction = mtlFunc; plDesc.maxTotalThreadsPerThreadgroup = _mtlThreadgroupSize.width * _mtlThreadgroupSize.height * _mtlThreadgroupSize.depth; @@ -1805,20 +1813,11 @@ MVKMTLFunction MVKComputePipeline::getMTLFunction(const VkComputePipelineCreateI _needsDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer; _needsDispatchBaseBuffer = funcRslts.needsDispatchBaseBuffer; + addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageCompute); + return func; } -void MVKComputePipeline::addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc) { - if ( !isUsingPipelineStageMetalArgumentBuffers() ) { return; } - - auto mtlFunc = mvkMTLFunc.getMTLFunction(); - for (uint32_t dsIdx = 0; dsIdx < _descriptorSetCount; dsIdx++) { - if (mvkMTLFunc.shaderConversionResults.isDescriptorSetUsed(dsIdx)) { - _mtlArgumentEncoders[dsIdx].init([mtlFunc newArgumentEncoderWithBufferIndex: dsIdx]); - } - } -} - MVKComputePipeline::~MVKComputePipeline() { @synchronized (getMTLDevice()) { [_mtlPipelineState release]; @@ -2225,7 +2224,6 @@ namespace mvk { template void serialize(Archive & archive, SPIRVToMSLConversionResults& scr) { archive(scr.entryPoint, - scr.activeDescriptorSets, scr.isRasterizationDisabled, scr.isPositionInvariant, scr.needsSwizzleBuffer, diff --git a/MoltenVK/MoltenVK/Utility/MVKBitArray.h b/MoltenVK/MoltenVK/Utility/MVKBitArray.h index 1730d9a3..35367a01 100755 --- a/MoltenVK/MoltenVK/Utility/MVKBitArray.h +++ b/MoltenVK/MoltenVK/Utility/MVKBitArray.h @@ -34,8 +34,12 @@ class MVKBitArray { public: - /** Returns the value of the bit, and optionally clears that bit if it was set. */ + /** + * Returns the value of the bit, and optionally clears that bit if it was set. + * Returns false if the bitIndex is beyond the size of this array, returns false. + */ inline bool getBit(size_t bitIndex, bool shouldClear = false) { + if (bitIndex >= _bitCount) { return false; } bool val = mvkIsAnyFlagEnabled(_pSections[getIndexOfSection(bitIndex)], getSectionSetMask(bitIndex)); if (shouldClear && val) { clearBit(bitIndex); } return val; @@ -174,8 +178,11 @@ public: } /** Constructs an instance for the specified number of bits, and sets the initial value of all the bits. */ - MVKBitArray(size_t size = 0, bool val = false) { - resize(size, val); + MVKBitArray(size_t size = 0, bool val = false) { resize(size, val); } + + MVKBitArray(const MVKBitArray& other) { + resize(other._bitCount); + memcpy(_pSections, other._pSections, getSectionCount() * SectionByteCount); } ~MVKBitArray() { free(_pSections); } diff --git a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVReflection.h b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVReflection.h index 915ef522..a9305ee8 100644 --- a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVReflection.h +++ b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVReflection.h @@ -265,20 +265,5 @@ namespace mvk { #endif } - /** Given the compiler, updates a bit array with the indexes of the descriptor sets in use by the shader. */ - template - void getActiveDescriptorSets(C* pCompiler, Bm& activeDescSets) { - Bm setBit = 1; - activeDescSets = 0; - if (pCompiler) { - for (auto varID : pCompiler->get_active_interface_variables()) { - if (pCompiler->has_decoration(varID, spv::DecorationDescriptorSet)) { - uint32_t descSet = pCompiler->get_decoration(varID, spv::DecorationDescriptorSet); - activeDescSets |= (setBit << descSet); - } - } - } - } - } #endif diff --git a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.h b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.h index 0def16d9..062510c0 100644 --- a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.h +++ b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.h @@ -160,6 +160,9 @@ namespace mvk { /** Returns whether the vertex buffer at the specified Vulkan binding is used by the shader. */ bool isVertexBufferUsed(uint32_t binding) const { return countShaderInputsAt(binding) > 0; } + /** Returns whether the resource at the specified descriptor set binding is used by the shader. */ + bool isResourceUsed(spv::ExecutionModel stage, uint32_t descSet, uint32_t binding) const; + /** Marks all input variables and resources as being used by the shader. */ void markAllInputsAndResourcesUsed(); @@ -221,7 +224,6 @@ namespace mvk { */ typedef struct SPIRVToMSLConversionResults { SPIRVEntryPoint entryPoint; - uint32_t activeDescriptorSets = 0; bool isRasterizationDisabled = false; bool isPositionInvariant = false; bool needsSwizzleBuffer = false; @@ -233,9 +235,6 @@ namespace mvk { bool needsDispatchBaseBuffer = false; bool needsViewRangeBuffer = false; - bool isDescriptorSetUsed(uint32_t descSet) { - return (activeDescriptorSets & ((uint32_t)1U << descSet)) != 0; - } void reset() { *this = SPIRVToMSLConversionResults(); } } SPIRVToMSLConversionResults; diff --git a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.mm b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.mm index bfe00712..2cc91379 100644 --- a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.mm +++ b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.mm @@ -181,6 +181,16 @@ MVK_PUBLIC_SYMBOL uint32_t SPIRVToMSLConversionConfiguration::countShaderInputsA return siCnt; } +MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionConfiguration::isResourceUsed(ExecutionModel stage, uint32_t descSet, uint32_t binding) const { + for (auto& rb : resourceBindings) { + auto& rbb = rb.resourceBinding; + if (rbb.stage == stage && rbb.desc_set == descSet && rbb.binding == binding) { + return rb.outIsUsedByShader; + } + } + return false; +} + MVK_PUBLIC_SYMBOL void SPIRVToMSLConversionConfiguration::markAllInputsAndResourcesUsed() { for (auto& si : shaderInputs) { si.outIsUsedByShader = true; } for (auto& rb : resourceBindings) { rb.outIsUsedByShader = true; } @@ -337,7 +347,6 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConversionConfigur // Populate the shader conversion results with info from the compilation run, // and mark which vertex attributes and resource bindings are used by the shader populateEntryPoint(pMSLCompiler, context.options); - getActiveDescriptorSets(pMSLCompiler, _shaderConversionResults.activeDescriptorSets); _shaderConversionResults.isRasterizationDisabled = pMSLCompiler && pMSLCompiler->get_is_rasterization_disabled(); _shaderConversionResults.isPositionInvariant = pMSLCompiler && pMSLCompiler->is_position_invariant(); _shaderConversionResults.needsSwizzleBuffer = pMSLCompiler && pMSLCompiler->needs_swizzle_buffer();