With Metal argument buffers, track descriptor bindings used by

pipelines and only encode bindings that are in use for each stage.

Add SPIRVToMSLConversionConfiguration::isResourceUsed() and
remove SPIRVToMSLConversionResults::isDescriptorSetUsed().
MVKBitArray add copy constructor and allow queries beyond range.
This commit is contained in:
Bill Hollings 2021-04-19 22:05:06 -04:00
parent ca89d258c9
commit f35922507a
10 changed files with 127 additions and 71 deletions

View File

@ -16,8 +16,11 @@ Copyright (c) 2015-2021 [The Brenwill Workshop Ltd.](http://www.brenwill.com)
MoltenVK 1.1.3
--------------
Released TBD
Released 2021/04/26
- Add beta support for using Metal argument buffers for shader resources on _macOS_, by setting
`MVK_CONFIG_USE_METAL_ARGUMENT_BUFFERS` environment variable (disabled by default).
Available on _macOS 10.16 (Big Sur)_ or later, and on earlier _macOS_ versions on _Intel_ GPU's.
- Add support for `HDR10` colorspace via `VK_COLOR_SPACE_HDR10_HLG_EXT` and `VK_COLOR_SPACE_HDR10_ST2084_EXT`.
- Always explicitly set `CAMetalLayer` colorspace property based on _Vulkan_ parameters,
and don't rely on _Metal_ default values.

View File

@ -498,12 +498,12 @@ void MVKResourcesCommandEncoderState::encodeMetalArgumentBuffer(MVKShaderStage s
auto* descSet = _boundDescriptorSets[dsIdx];
if ( !descSet ) { continue; }
auto* dsLayout = descSet->getLayout();
id<MTLArgumentEncoder> mtlArgEncoder = nil;
id<MTLBuffer> mtlArgBuffer = nil;
NSUInteger metalArgBufferOffset = 0;
if (_cmdEncoder->isUsingDescriptorSetMetalArgumentBuffers()) {
auto* dsLayout = descSet->getLayout();
if (dsLayout->isUsingDescriptorSetMetalArgumentBuffers()) {
mtlArgEncoder = dsLayout->getMTLArgumentEncoder().getMTLArgumentEncoder();
mtlArgBuffer = descSet->getMetalArgumentBuffer();
metalArgBufferOffset = descSet->getMetalArgumentBufferOffset();
@ -518,13 +518,14 @@ void MVKResourcesCommandEncoderState::encodeMetalArgumentBuffer(MVKShaderStage s
auto& argBuffDirtyDescs = descSet->getMetalArgumentBufferDirtyDescriptors();
auto& resourceUsageDirtyDescs = _metalUsageDirtyDescriptors[dsIdx];
auto& shaderBindingUsage = pipeline->getDescriptorBindingUse(dsIdx, stage);
bool mtlArgEncAttached = false;
bool shouldBindArgBuffToStage = false;
uint32_t dslBindCnt = dsLayout->getBindingCount();
for (uint32_t dslBindIdx = 0; dslBindIdx < dslBindCnt; dslBindIdx++) {
auto* dslBind = dsLayout->getBindingAt(dslBindIdx);
if (dslBind->getApplyToStage(stage)) {
if (dslBind->getApplyToStage(stage) && shaderBindingUsage.getBit(dslBindIdx)) {
shouldBindArgBuffToStage = true;
uint32_t elemCnt = dslBind->getDescriptorCount(descSet);
for (uint32_t elemIdx = 0; elemIdx < elemCnt; elemIdx++) {

View File

@ -36,18 +36,18 @@ class MVKResourcesCommandEncoderState;
/** Holds and manages the lifecycle of a MTLArgumentEncoder. The encoder can only be set once. */
struct MVKMTLArgumentEncoder {
NSUInteger mtlArgumentEncoderSize = 0;
id<MTLArgumentEncoder> getMTLArgumentEncoder() { return _mtlArgumentEncoder; }
NSUInteger getMTLArgumentEncoderSize() { return _mtlArgumentEncoderSize; }
void init(id<MTLArgumentEncoder> mtlArgEnc) {
if (_mtlArgumentEncoder) { return; }
_mtlArgumentEncoder = mtlArgEnc; // takes ownership
_mtlArgumentEncoderSize = mtlArgEnc.encodedLength;
mtlArgumentEncoderSize = mtlArgEnc.encodedLength;
}
~MVKMTLArgumentEncoder() { [_mtlArgumentEncoder release]; }
private:
id<MTLArgumentEncoder> _mtlArgumentEncoder = nil;
NSUInteger _mtlArgumentEncoderSize = 0;
};
/** Represents a Vulkan descriptor set layout. */
@ -86,7 +86,16 @@ public:
/** Populates the specified shader converter context, at the specified DSL index. */
void populateShaderConverterContext(mvk::SPIRVToMSLConversionConfiguration& context,
MVKShaderResourceBinding& dslMTLRezIdxOffsets,
uint32_t dslIndex);
uint32_t descSetIndex);
/**
* Populates the bindings in this descriptor set layout used by the shader.
* Returns false if the shader does not use the descriptor set at all.
*/
bool populateBindingUse(MVKBitArray& bindingUse,
mvk::SPIRVToMSLConversionConfiguration& context,
MVKShaderStage stage,
uint32_t descSetIndex);
/** Returns the number of bindings. */
uint32_t getBindingCount() { return (uint32_t)_bindings.size(); }

View File

@ -171,18 +171,43 @@ void MVKDescriptorSetLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
void MVKDescriptorSetLayout::populateShaderConverterContext(mvk::SPIRVToMSLConversionConfiguration& context,
MVKShaderResourceBinding& dslMTLRezIdxOffsets,
uint32_t dslIndex) {
uint32_t descSetIndex) {
uint32_t bindCnt = (uint32_t)_bindings.size();
for (uint32_t bindIdx = 0; bindIdx < bindCnt; bindIdx++) {
_bindings[bindIdx].populateShaderConverterContext(context, dslMTLRezIdxOffsets, dslIndex);
_bindings[bindIdx].populateShaderConverterContext(context, dslMTLRezIdxOffsets, descSetIndex);
}
// Mark if Metal argument buffers are in use, but this descriptor set layout is not using them.
if (isUsingMetalArgumentBuffers() && !isUsingMetalArgumentBuffer()) {
context.discreteDescriptorSets.push_back(dslIndex);
context.discreteDescriptorSets.push_back(descSetIndex);
}
}
bool MVKDescriptorSetLayout::populateBindingUse(MVKBitArray& bindingUse,
SPIRVToMSLConversionConfiguration& context,
MVKShaderStage stage,
uint32_t descSetIndex) {
static const spv::ExecutionModel svpExecModels[] = {
spv::ExecutionModelVertex,
spv::ExecutionModelTessellationControl,
spv::ExecutionModelTessellationEvaluation,
spv::ExecutionModelFragment,
spv::ExecutionModelGLCompute
};
bool descSetIsUsed = false;
uint32_t bindCnt = (uint32_t)_bindings.size();
bindingUse.resize(bindCnt);
for (uint32_t bindIdx = 0; bindIdx < bindCnt; bindIdx++) {
auto& dslBind = _bindings[bindIdx];
if (context.isResourceUsed(svpExecModels[stage], descSetIndex, dslBind.getBinding())) {
bindingUse.setBit(bindIdx);
descSetIsUsed = true;
}
}
return descSetIsUsed;
}
MVKDescriptorSetLayout::MVKDescriptorSetLayout(MVKDevice* device,
const VkDescriptorSetLayoutCreateInfo* pCreateInfo) : MVKVulkanAPIDeviceObject(device) {
@ -460,7 +485,7 @@ VkResult MVKDescriptorPool::allocateDescriptorSet(MVKDescriptorSetLayout* mvkDSL
uint32_t variableDescriptorCount,
VkDescriptorSet* pVKDS) {
VkResult rslt = VK_ERROR_OUT_OF_POOL_MEMORY;
NSUInteger mtlArgBuffAllocSize = mvkDSL->getMTLArgumentEncoder().getMTLArgumentEncoderSize();
NSUInteger mtlArgBuffAllocSize = mvkDSL->getMTLArgumentEncoder().mtlArgumentEncoderSize;
NSUInteger mtlArgBuffAlignedSize = mvkAlignByteCount(mtlArgBuffAllocSize,
getDevice()->_pMetalFeatures->mtlBufferAlignment);

View File

@ -109,7 +109,10 @@ public:
uint32_t getDescriptorSetCount() { return (uint32_t)_descriptorSetLayouts.size(); }
/** Returns the number of descriptors in the descriptor set layout. */
uint32_t getDescriptorCount(uint32_t descSetIndex) { return _descriptorSetLayouts[descSetIndex]->getDescriptorCount(); }
uint32_t getDescriptorCount(uint32_t descSetIndex) { return getDescriptorSetLayout(descSetIndex)->getDescriptorCount(); }
/** Returns the descriptor set layout. */
MVKDescriptorSetLayout* getDescriptorSetLayout(uint32_t descSetIndex) { return _descriptorSetLayouts[descSetIndex]; }
/** Returns the push constant binding info. */
const MVKShaderResourceBinding& getPushConstantBindings() { return _pushConstantsMTLResourceIndexes; }
@ -179,6 +182,9 @@ public:
/** Returns the MTLArgumentEncoder for the descriptor set. */
virtual MVKMTLArgumentEncoder& getMTLArgumentEncoder(uint32_t descSetIndex, MVKShaderStage stage) = 0;
/** Returns the array of descriptor binding use for the descriptor set. */
virtual MVKBitArray& getDescriptorBindingUse(uint32_t descSetIndex, MVKShaderStage stage) = 0;
/** Returns the number of descriptor sets in this pipeline layout. */
uint32_t getDescriptorSetCount() { return _descriptorSetCount; }
@ -190,6 +196,10 @@ public:
protected:
void propagateDebugName() override {}
template<typename CreateInfo> void addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc,
const CreateInfo* pCreateInfo,
SPIRVToMSLConversionConfiguration& context,
MVKShaderStage stage);
MVKPipelineCache* _pipelineCache;
MVKShaderImplicitRezBinding _swizzleBufferIndex;
@ -220,7 +230,11 @@ typedef std::pair<uint32_t, uint32_t> MVKZeroDivisorVertexBinding;
typedef MVKSmallVector<MVKGraphicsStage, 4> MVKPiplineStages;
struct MVKStagedMTLArgumentEncoders {
MVKMTLArgumentEncoder stages[4];
MVKMTLArgumentEncoder stages[4] = {};
};
struct MVKStagedDescriptorBindingUse {
MVKBitArray stages[4] = {};
};
/** The number of dynamic states possible in Vulkan. */
@ -291,6 +305,9 @@ public:
/** Returns the MTLArgumentEncoder for the descriptor set. */
MVKMTLArgumentEncoder& getMTLArgumentEncoder(uint32_t descSetIndex, MVKShaderStage stage) override { return _mtlArgumentEncoders[descSetIndex].stages[stage]; }
/** Returns the array of descriptor binding use for the descriptor set. */
MVKBitArray& getDescriptorBindingUse(uint32_t descSetIndex, MVKShaderStage stage) override { return _descriptorBindingUse[descSetIndex].stages[stage]; }
/** Constructs an instance for the device and parent (which may be NULL). */
MVKGraphicsPipeline(MVKDevice* device,
MVKPipelineCache* pipelineCache,
@ -326,7 +343,6 @@ protected:
bool isRasterizationDisabled(const VkGraphicsPipelineCreateInfo* pCreateInfo);
bool verifyImplicitBuffer(bool needsBuffer, MVKShaderImplicitRezBinding& index, MVKShaderStage stage, const char* name, uint32_t reservedBuffers);
uint32_t getTranslatedVertexBinding(uint32_t binding, uint32_t translationOffset, uint32_t maxBinding);
void addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc, MVKShaderStage stage);
const VkPipelineShaderStageCreateInfo* _pVertexSS = nullptr;
const VkPipelineShaderStageCreateInfo* _pTessCtlSS = nullptr;
@ -342,6 +358,7 @@ protected:
MVKSmallVector<MVKTranslatedVertexBinding> _translatedVertexBindings;
MVKSmallVector<MVKZeroDivisorVertexBinding> _zeroDivisorVertexBindings;
MVKSmallVector<MVKStagedMTLArgumentEncoders> _mtlArgumentEncoders;
MVKSmallVector<MVKStagedDescriptorBindingUse> _descriptorBindingUse;
MTLComputePipelineDescriptor* _mtlTessVertexStageDesc = nil;
id<MTLFunction> _mtlTessVertexFunctions[3] = {nil, nil, nil};
@ -404,6 +421,9 @@ public:
/** Returns the MTLArgumentEncoder for the descriptor set. */
MVKMTLArgumentEncoder& getMTLArgumentEncoder(uint32_t descSetIndex, MVKShaderStage stage) override { return _mtlArgumentEncoders[descSetIndex]; }
/** Returns the array of descriptor binding use for the descriptor set. */
MVKBitArray& getDescriptorBindingUse(uint32_t descSetIndex, MVKShaderStage stage) override { return _descriptorBindingUse[descSetIndex]; }
/** Constructs an instance for the device and parent (which may be NULL). */
MVKComputePipeline(MVKDevice* device,
MVKPipelineCache* pipelineCache,
@ -414,10 +434,10 @@ public:
protected:
MVKMTLFunction getMTLFunction(const VkComputePipelineCreateInfo* pCreateInfo);
void addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc);
id<MTLComputePipelineState> _mtlPipelineState;
MVKSmallVector<MVKMTLArgumentEncoder> _mtlArgumentEncoders;
MVKSmallVector<MVKBitArray> _descriptorBindingUse;
MTLSize _mtlThreadgroupSize;
bool _needsSwizzleBuffer = false;
bool _needsBufferSizeBuffer = false;

View File

@ -182,6 +182,26 @@ void MVKPipeline::bindPushConstants(MVKCommandEncoder* cmdEncoder) {
}
}
// For each descriptor set, populate the descriptor bindings used by the shader for this stage,
// and if Metal argument encoders must be dedicated to a pipeline stage, create the encoder here.
template<typename CreateInfo>
void MVKPipeline::addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc,
const CreateInfo* pCreateInfo,
SPIRVToMSLConversionConfiguration& context,
MVKShaderStage stage) {
if ( !isUsingMetalArgumentBuffers() ) { return; }
bool needMTLArgEnc = isUsingPipelineStageMetalArgumentBuffers();
auto mtlFunc = mvkMTLFunc.getMTLFunction();
for (uint32_t dsIdx = 0; dsIdx < _descriptorSetCount; dsIdx++) {
auto* dsLayout = ((MVKPipelineLayout*)pCreateInfo->layout)->getDescriptorSetLayout(dsIdx);
bool descSetIsUsed = dsLayout->populateBindingUse(getDescriptorBindingUse(dsIdx, stage), context, stage, dsIdx);
if (descSetIsUsed && needMTLArgEnc) {
getMTLArgumentEncoder(dsIdx, stage).init([mtlFunc newArgumentEncoderWithBufferIndex: dsIdx]);
}
}
}
MVKPipeline::MVKPipeline(MVKDevice* device, MVKPipelineCache* pipelineCache, MVKPipelineLayout* layout, MVKPipeline* parent) :
MVKVulkanAPIDeviceObject(device),
_pipelineCache(pipelineCache),
@ -488,6 +508,7 @@ void MVKGraphicsPipeline::initMTLRenderPipelineState(const VkGraphicsPipelineCre
_mtlTessVertexStageDesc = nil;
for (uint32_t i = 0; i < 3; i++) { _mtlTessVertexFunctions[i] = nil; }
if (isUsingMetalArgumentBuffers()) { _descriptorBindingUse.resize(_descriptorSetCount); }
if (isUsingPipelineStageMetalArgumentBuffers()) { _mtlArgumentEncoders.resize(_descriptorSetCount); }
if (!isTessellationPipeline()) {
@ -918,7 +939,7 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLRenderPipelineDescriptor*
_needsVertexViewRangeBuffer = funcRslts.needsViewRangeBuffer;
_needsVertexOutputBuffer = funcRslts.needsOutputBuffer;
addMTLArgumentEncoders(func, kMVKShaderStageVertex);
addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageVertex);
if (funcRslts.isRasterizationDisabled) {
_pFragmentSS = nullptr;
@ -990,7 +1011,7 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLComputePipelineDescriptor
_needsVertexOutputBuffer = funcRslts.needsOutputBuffer;
}
addMTLArgumentEncoders(func, kMVKShaderStageVertex);
addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageVertex);
// If we need the swizzle buffer and there's no place to put it, we're in serious trouble.
if (!verifyImplicitBuffer(_needsVertexSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageVertex, "swizzle", vbCnt)) {
@ -1049,7 +1070,7 @@ bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescripto
_needsTessCtlPatchOutputBuffer = funcRslts.needsPatchOutputBuffer;
_needsTessCtlInputBuffer = funcRslts.needsInputThreadgroupMem;
addMTLArgumentEncoders(func, kMVKShaderStageTessCtl);
addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageTessCtl);
if (!verifyImplicitBuffer(_needsTessCtlSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageTessCtl, "swizzle", kMVKTessCtlNumReservedBuffers)) {
return false;
@ -1105,7 +1126,7 @@ bool MVKGraphicsPipeline::addTessEvalShaderToPipeline(MTLRenderPipelineDescripto
_needsTessEvalBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
_needsTessEvalDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
addMTLArgumentEncoders(func, kMVKShaderStageTessEval);
addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageTessEval);
if (funcRslts.isRasterizationDisabled) {
_pFragmentSS = nullptr;
@ -1161,7 +1182,7 @@ bool MVKGraphicsPipeline::addFragmentShaderToPipeline(MTLRenderPipelineDescripto
_needsFragmentDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
_needsFragmentViewRangeBuffer = funcRslts.needsViewRangeBuffer;
addMTLArgumentEncoders(func, kMVKShaderStageFragment);
addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageFragment);
if (!verifyImplicitBuffer(_needsFragmentSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageFragment, "swizzle", 0)) {
return false;
@ -1664,17 +1685,6 @@ bool MVKGraphicsPipeline::isRasterizationDisabled(const VkGraphicsPipelineCreate
(mvkMTLPrimitiveTopologyClassFromVkPrimitiveTopology(pCreateInfo->pInputAssemblyState->topology) == MTLPrimitiveTopologyClassTriangle))));
}
void MVKGraphicsPipeline::addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc, MVKShaderStage stage) {
if ( !isUsingPipelineStageMetalArgumentBuffers() ) { return; }
auto mtlFunc = mvkMTLFunc.getMTLFunction();
for (uint32_t dsIdx = 0; dsIdx < _descriptorSetCount; dsIdx++) {
if (mvkMTLFunc.shaderConversionResults.isDescriptorSetUsed(dsIdx)) {
_mtlArgumentEncoders[dsIdx].stages[stage].init([mtlFunc newArgumentEncoderWithBufferIndex: dsIdx]);
}
}
}
MVKGraphicsPipeline::~MVKGraphicsPipeline() {
@synchronized (getMTLDevice()) {
[_mtlTessVertexStageDesc release];
@ -1715,13 +1725,11 @@ MVKComputePipeline::MVKComputePipeline(MVKDevice* device,
_mtlThreadgroupSize = func.threadGroupSize;
_mtlPipelineState = nil;
if (isUsingMetalArgumentBuffers()) { _descriptorBindingUse.resize(_descriptorSetCount); }
if (isUsingPipelineStageMetalArgumentBuffers()) { _mtlArgumentEncoders.resize(_descriptorSetCount); }
id<MTLFunction> mtlFunc = func.getMTLFunction();
if (mtlFunc) {
addMTLArgumentEncoders(func);
MTLComputePipelineDescriptor* plDesc = [MTLComputePipelineDescriptor new]; // temp retain
plDesc.computeFunction = mtlFunc;
plDesc.maxTotalThreadsPerThreadgroup = _mtlThreadgroupSize.width * _mtlThreadgroupSize.height * _mtlThreadgroupSize.depth;
@ -1805,20 +1813,11 @@ MVKMTLFunction MVKComputePipeline::getMTLFunction(const VkComputePipelineCreateI
_needsDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
_needsDispatchBaseBuffer = funcRslts.needsDispatchBaseBuffer;
addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageCompute);
return func;
}
void MVKComputePipeline::addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc) {
if ( !isUsingPipelineStageMetalArgumentBuffers() ) { return; }
auto mtlFunc = mvkMTLFunc.getMTLFunction();
for (uint32_t dsIdx = 0; dsIdx < _descriptorSetCount; dsIdx++) {
if (mvkMTLFunc.shaderConversionResults.isDescriptorSetUsed(dsIdx)) {
_mtlArgumentEncoders[dsIdx].init([mtlFunc newArgumentEncoderWithBufferIndex: dsIdx]);
}
}
}
MVKComputePipeline::~MVKComputePipeline() {
@synchronized (getMTLDevice()) {
[_mtlPipelineState release];
@ -2225,7 +2224,6 @@ namespace mvk {
template<class Archive>
void serialize(Archive & archive, SPIRVToMSLConversionResults& scr) {
archive(scr.entryPoint,
scr.activeDescriptorSets,
scr.isRasterizationDisabled,
scr.isPositionInvariant,
scr.needsSwizzleBuffer,

View File

@ -34,8 +34,12 @@ class MVKBitArray {
public:
/** Returns the value of the bit, and optionally clears that bit if it was set. */
/**
* Returns the value of the bit, and optionally clears that bit if it was set.
* Returns false if the bitIndex is beyond the size of this array, returns false.
*/
inline bool getBit(size_t bitIndex, bool shouldClear = false) {
if (bitIndex >= _bitCount) { return false; }
bool val = mvkIsAnyFlagEnabled(_pSections[getIndexOfSection(bitIndex)], getSectionSetMask(bitIndex));
if (shouldClear && val) { clearBit(bitIndex); }
return val;
@ -174,8 +178,11 @@ public:
}
/** Constructs an instance for the specified number of bits, and sets the initial value of all the bits. */
MVKBitArray(size_t size = 0, bool val = false) {
resize(size, val);
MVKBitArray(size_t size = 0, bool val = false) { resize(size, val); }
MVKBitArray(const MVKBitArray& other) {
resize(other._bitCount);
memcpy(_pSections, other._pSections, getSectionCount() * SectionByteCount);
}
~MVKBitArray() { free(_pSections); }

View File

@ -265,20 +265,5 @@ namespace mvk {
#endif
}
/** Given the compiler, updates a bit array with the indexes of the descriptor sets in use by the shader. */
template<typename C, typename Bm>
void getActiveDescriptorSets(C* pCompiler, Bm& activeDescSets) {
Bm setBit = 1;
activeDescSets = 0;
if (pCompiler) {
for (auto varID : pCompiler->get_active_interface_variables()) {
if (pCompiler->has_decoration(varID, spv::DecorationDescriptorSet)) {
uint32_t descSet = pCompiler->get_decoration(varID, spv::DecorationDescriptorSet);
activeDescSets |= (setBit << descSet);
}
}
}
}
}
#endif

View File

@ -160,6 +160,9 @@ namespace mvk {
/** Returns whether the vertex buffer at the specified Vulkan binding is used by the shader. */
bool isVertexBufferUsed(uint32_t binding) const { return countShaderInputsAt(binding) > 0; }
/** Returns whether the resource at the specified descriptor set binding is used by the shader. */
bool isResourceUsed(spv::ExecutionModel stage, uint32_t descSet, uint32_t binding) const;
/** Marks all input variables and resources as being used by the shader. */
void markAllInputsAndResourcesUsed();
@ -221,7 +224,6 @@ namespace mvk {
*/
typedef struct SPIRVToMSLConversionResults {
SPIRVEntryPoint entryPoint;
uint32_t activeDescriptorSets = 0;
bool isRasterizationDisabled = false;
bool isPositionInvariant = false;
bool needsSwizzleBuffer = false;
@ -233,9 +235,6 @@ namespace mvk {
bool needsDispatchBaseBuffer = false;
bool needsViewRangeBuffer = false;
bool isDescriptorSetUsed(uint32_t descSet) {
return (activeDescriptorSets & ((uint32_t)1U << descSet)) != 0;
}
void reset() { *this = SPIRVToMSLConversionResults(); }
} SPIRVToMSLConversionResults;

View File

@ -181,6 +181,16 @@ MVK_PUBLIC_SYMBOL uint32_t SPIRVToMSLConversionConfiguration::countShaderInputsA
return siCnt;
}
MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionConfiguration::isResourceUsed(ExecutionModel stage, uint32_t descSet, uint32_t binding) const {
for (auto& rb : resourceBindings) {
auto& rbb = rb.resourceBinding;
if (rbb.stage == stage && rbb.desc_set == descSet && rbb.binding == binding) {
return rb.outIsUsedByShader;
}
}
return false;
}
MVK_PUBLIC_SYMBOL void SPIRVToMSLConversionConfiguration::markAllInputsAndResourcesUsed() {
for (auto& si : shaderInputs) { si.outIsUsedByShader = true; }
for (auto& rb : resourceBindings) { rb.outIsUsedByShader = true; }
@ -337,7 +347,6 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConversionConfigur
// Populate the shader conversion results with info from the compilation run,
// and mark which vertex attributes and resource bindings are used by the shader
populateEntryPoint(pMSLCompiler, context.options);
getActiveDescriptorSets(pMSLCompiler, _shaderConversionResults.activeDescriptorSets);
_shaderConversionResults.isRasterizationDisabled = pMSLCompiler && pMSLCompiler->get_is_rasterization_disabled();
_shaderConversionResults.isPositionInvariant = pMSLCompiler && pMSLCompiler->is_position_invariant();
_shaderConversionResults.needsSwizzleBuffer = pMSLCompiler && pMSLCompiler->needs_swizzle_buffer();