MVKPipeline: Fix fragment inputs to have as many components as vertex outputs.

Metal is picky about interface matching. If the types of a vertex output
and its corresponding fragment input don't match, down to the number of
vector components, it fails pipeline compilation. To support cases where
the number of components in the fragment input is less than the
corresponding vertex output, we need to fix up the fragment shader to
accept the extra components.
This commit is contained in:
Chip Davis 2020-06-17 21:36:27 -05:00
parent 2dd34de8ee
commit d14d4e918d
6 changed files with 104 additions and 97 deletions

View File

@ -1 +1 @@
d385bf096f5dabbc4cdaeb6872b0f64be1a63ad0
a64484f62b55d2ded4639fb248e21e835606d2ee

View File

@ -267,7 +267,7 @@ protected:
bool addVertexShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderContext);
bool addTessCtlShaderToPipeline(MTLComputePipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderContext, SPIRVShaderOutputs& prevOutput);
bool addTessEvalShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderContext, SPIRVShaderOutputs& prevOutput);
bool addFragmentShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderContext);
bool addFragmentShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderContext, SPIRVShaderOutputs& prevOutput);
bool addVertexInputToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkPipelineVertexInputStateCreateInfo* pVI, const SPIRVToMSLConversionConfiguration& shaderContext);
void addTessellationToPipeline(MTLRenderPipelineDescriptor* plDesc, const SPIRVTessReflectionData& reflectData, const VkPipelineTessellationStateCreateInfo* pTS);
void addFragmentOutputToPipeline(MTLRenderPipelineDescriptor* plDesc, const SPIRVTessReflectionData& reflectData, const VkGraphicsPipelineCreateInfo* pCreateInfo, bool isTessellationVertexPipeline = false);

View File

@ -466,15 +466,23 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::newMTLRenderPipelineDescriptor
MTLRenderPipelineDescriptor* plDesc = [MTLRenderPipelineDescriptor new]; // retained
SPIRVShaderOutputs vtxOutputs;
std::string errorLog;
if (!getShaderOutputs(((MVKShaderModule*)_pVertexSS->module)->getSPIRV(), spv::ExecutionModelVertex, _pVertexSS->pName, vtxOutputs, errorLog) ) {
setConfigurationResult(reportError(VK_ERROR_INITIALIZATION_FAILED, "Failed to get vertex outputs: %s", errorLog.c_str()));
return nil;
}
// Add shader stages. Compile vertex shader before others just in case conversion changes anything...like rasterizaion disable.
if (!addVertexShaderToPipeline(plDesc, pCreateInfo, shaderContext)) { return nil; }
// Fragment shader - only add if rasterization is enabled
if (!addFragmentShaderToPipeline(plDesc, pCreateInfo, shaderContext)) { return nil; }
// Vertex input
// This needs to happen before compiling the fragment shader, or we'll lose information on vertex attributes.
if (!addVertexInputToPipeline(plDesc, pCreateInfo->pVertexInputState, shaderContext)) { return nil; }
// Fragment shader - only add if rasterization is enabled
if (!addFragmentShaderToPipeline(plDesc, pCreateInfo, shaderContext, vtxOutputs)) { return nil; }
// Output
addFragmentOutputToPipeline(plDesc, reflectData, pCreateInfo);
@ -625,7 +633,7 @@ MTLComputePipelineDescriptor* MVKGraphicsPipeline::newMTLTessControlStageDescrip
for (const SPIRVShaderOutput& output : vtxOutputs) {
if (output.builtin == spv::BuiltInPointSize && !reflectData.pointMode) { continue; }
offset = (uint32_t)mvkAlignByteCount(offset, sizeOfOutput(output));
if (shaderContext.isVertexAttributeLocationUsed(output.location)) {
if (shaderContext.isShaderInputLocationUsed(output.location)) {
plDesc.stageInputDescriptor.attributes[output.location].bufferIndex = kMVKTessCtlInputBufferIndex;
plDesc.stageInputDescriptor.attributes[output.location].format = (MTLAttributeFormat)getPixelFormats()->getMTLVertexFormat(mvkFormatFromOutput(output));
plDesc.stageInputDescriptor.attributes[output.location].offset = offset;
@ -653,12 +661,16 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::newMTLTessRasterStageDescripto
SPIRVToMSLConversionConfiguration& shaderContext) {
MTLRenderPipelineDescriptor* plDesc = [MTLRenderPipelineDescriptor new]; // retained
SPIRVShaderOutputs tcOutputs;
SPIRVShaderOutputs tcOutputs, teOutputs;
std::string errorLog;
if (!getShaderOutputs(((MVKShaderModule*)_pTessCtlSS->module)->getSPIRV(), spv::ExecutionModelTessellationControl, _pTessCtlSS->pName, tcOutputs, errorLog) ) {
setConfigurationResult(reportError(VK_ERROR_INITIALIZATION_FAILED, "Failed to get tessellation control outputs: %s", errorLog.c_str()));
return nil;
}
if (!getShaderOutputs(((MVKShaderModule*)_pTessEvalSS->module)->getSPIRV(), spv::ExecutionModelTessellationEvaluation, _pTessEvalSS->pName, teOutputs, errorLog) ) {
setConfigurationResult(reportError(VK_ERROR_INITIALIZATION_FAILED, "Failed to get tessellation evaluation outputs: %s", errorLog.c_str()));
return nil;
}
// Add shader stages. Compile tessellation evaluation shader before others just in case conversion changes anything...like rasterizaion disable.
if (!addTessEvalShaderToPipeline(plDesc, pCreateInfo, shaderContext, tcOutputs)) {
@ -666,20 +678,15 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::newMTLTessRasterStageDescripto
return nil;
}
// Fragment shader - only add if rasterization is enabled
if (!addFragmentShaderToPipeline(plDesc, pCreateInfo, shaderContext)) {
[plDesc release];
return nil;
}
// Stage input
// Tessellation evaluation stage input
// This needs to happen before compiling the fragment shader, or we'll lose information on shader inputs.
plDesc.vertexDescriptor = [MTLVertexDescriptor vertexDescriptor];
uint32_t offset = 0, patchOffset = 0, outerLoc = -1, innerLoc = -1;
bool usedPerVertex = false, usedPerPatch = false;
const SPIRVShaderOutput* firstVertex = nullptr, * firstPatch = nullptr;
for (const SPIRVShaderOutput& output : tcOutputs) {
if (output.builtin == spv::BuiltInPointSize && !reflectData.pointMode) { continue; }
if (!shaderContext.isVertexAttributeLocationUsed(output.location)) {
if (!shaderContext.isShaderInputLocationUsed(output.location)) {
if (output.perPatch && !(output.builtin == spv::BuiltInTessLevelOuter || output.builtin == spv::BuiltInTessLevelInner) ) {
if (!firstPatch) { firstPatch = &output; }
patchOffset += sizeOfOutput(output);
@ -749,6 +756,12 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::newMTLTessRasterStageDescripto
sizeof(MTLQuadTessellationFactorsHalf);
}
// Fragment shader - only add if rasterization is enabled
if (!addFragmentShaderToPipeline(plDesc, pCreateInfo, shaderContext, teOutputs)) {
[plDesc release];
return nil;
}
// Tessellation state
addTessellationToPipeline(plDesc, reflectData, pCreateInfo->pTessellationState);
@ -909,13 +922,15 @@ bool MVKGraphicsPipeline::addTessEvalShaderToPipeline(MTLRenderPipelineDescripto
bool MVKGraphicsPipeline::addFragmentShaderToPipeline(MTLRenderPipelineDescriptor* plDesc,
const VkGraphicsPipelineCreateInfo* pCreateInfo,
SPIRVToMSLConversionConfiguration& shaderContext) {
SPIRVToMSLConversionConfiguration& shaderContext,
SPIRVShaderOutputs& shaderOutputs) {
if (_pFragmentSS) {
shaderContext.options.entryPointStage = spv::ExecutionModelFragment;
shaderContext.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageFragment];
shaderContext.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageFragment];
shaderContext.options.entryPointName = _pFragmentSS->pName;
shaderContext.options.mslOptions.capture_output_to_buffer = false;
addPrevStageOutputToShaderConverterContext(shaderContext, shaderOutputs);
MVKMTLFunction func = ((MVKShaderModule*)_pFragmentSS->module)->getMTLFunction(&shaderContext, _pFragmentSS->pSpecializationInfo, _pipelineCache);
id<MTLFunction> mtlFunc = func.getMTLFunction();
@ -1005,7 +1020,7 @@ bool MVKGraphicsPipeline::addVertexInputToPipeline(MTLRenderPipelineDescriptor*
uint32_t vaCnt = pVI->vertexAttributeDescriptionCount;
for (uint32_t i = 0; i < vaCnt; i++) {
const VkVertexInputAttributeDescription* pVKVA = &pVI->pVertexAttributeDescriptions[i];
if (shaderContext.isVertexAttributeLocationUsed(pVKVA->location)) {
if (shaderContext.isShaderInputLocationUsed(pVKVA->location)) {
uint32_t vaBinding = pVKVA->binding;
uint32_t vaOffset = pVKVA->offset;
@ -1052,7 +1067,7 @@ bool MVKGraphicsPipeline::addVertexInputToPipeline(MTLRenderPipelineDescriptor*
// but at an offset that is one or more strides away from the original.
for (uint32_t i = 0; i < vbCnt; i++) {
const VkVertexInputBindingDescription* pVKVB = &pVI->pVertexBindingDescriptions[i];
uint32_t vbVACnt = shaderContext.countVertexAttributesAt(pVKVB->binding);
uint32_t vbVACnt = shaderContext.countShaderInputsAt(pVKVB->binding);
if (vbVACnt > 0) {
uint32_t vbIdx = getMetalBufferIndexForVertexAttributeBinding(pVKVB->binding);
MTLVertexBufferLayoutDescriptor* vbDesc = plDesc.vertexDescriptor.layouts[vbIdx];
@ -1249,15 +1264,15 @@ void MVKGraphicsPipeline::initMVKShaderConverterContext(SPIRVToMSLConversionConf
void MVKGraphicsPipeline::addVertexInputToShaderConverterContext(SPIRVToMSLConversionConfiguration& shaderContext,
const VkGraphicsPipelineCreateInfo* pCreateInfo) {
// Set the shader context vertex attribute information
shaderContext.vertexAttributes.clear();
shaderContext.shaderInputs.clear();
uint32_t vaCnt = pCreateInfo->pVertexInputState->vertexAttributeDescriptionCount;
for (uint32_t vaIdx = 0; vaIdx < vaCnt; vaIdx++) {
const VkVertexInputAttributeDescription* pVKVA = &pCreateInfo->pVertexInputState->pVertexAttributeDescriptions[vaIdx];
// Set binding and offset from Vulkan vertex attribute
MSLVertexAttribute va;
va.vertexAttribute.location = pVKVA->location;
va.binding = pVKVA->binding;
mvk::MSLShaderInput si;
si.shaderInput.location = pVKVA->location;
si.binding = pVKVA->binding;
// Metal can't do signedness conversions on vertex buffers (rdar://45922847). If the shader
// and the vertex attribute have mismatched signedness, we have to fix the shader
@ -1266,11 +1281,11 @@ void MVKGraphicsPipeline::addVertexInputToShaderConverterContext(SPIRVToMSLConve
// declared type. Programs that try to invoke undefined behavior are on their own.
switch (getPixelFormats()->getFormatType(pVKVA->format) ) {
case kMVKFormatColorUInt8:
va.vertexAttribute.format = MSL_VERTEX_FORMAT_UINT8;
si.shaderInput.format = MSL_VERTEX_FORMAT_UINT8;
break;
case kMVKFormatColorUInt16:
va.vertexAttribute.format = MSL_VERTEX_FORMAT_UINT16;
si.shaderInput.format = MSL_VERTEX_FORMAT_UINT16;
break;
case kMVKFormatDepthStencil:
@ -1280,7 +1295,7 @@ void MVKGraphicsPipeline::addVertexInputToShaderConverterContext(SPIRVToMSLConve
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
va.vertexAttribute.format = MSL_VERTEX_FORMAT_UINT8;
si.shaderInput.format = MSL_VERTEX_FORMAT_UINT8;
break;
default:
@ -1293,35 +1308,36 @@ void MVKGraphicsPipeline::addVertexInputToShaderConverterContext(SPIRVToMSLConve
}
shaderContext.vertexAttributes.push_back(va);
shaderContext.shaderInputs.push_back(si);
}
}
// Initializes the vertex attributes in a shader converter context from the previous stage output.
// Initializes the shader inputs in a shader converter context from the previous stage output.
void MVKGraphicsPipeline::addPrevStageOutputToShaderConverterContext(SPIRVToMSLConversionConfiguration& shaderContext,
SPIRVShaderOutputs& shaderOutputs) {
// Set the shader context vertex attribute information
shaderContext.vertexAttributes.clear();
uint32_t vaCnt = (uint32_t)shaderOutputs.size();
for (uint32_t vaIdx = 0; vaIdx < vaCnt; vaIdx++) {
MSLVertexAttribute va;
va.vertexAttribute.location = shaderOutputs[vaIdx].location;
va.vertexAttribute.builtin = shaderOutputs[vaIdx].builtin;
// Set the shader context input variable information
shaderContext.shaderInputs.clear();
uint32_t siCnt = (uint32_t)shaderOutputs.size();
for (uint32_t siIdx = 0; siIdx < siCnt; siIdx++) {
mvk::MSLShaderInput si;
si.shaderInput.location = shaderOutputs[siIdx].location;
si.shaderInput.builtin = shaderOutputs[siIdx].builtin;
si.shaderInput.vecsize = shaderOutputs[siIdx].vecWidth;
switch (getPixelFormats()->getFormatType(mvkFormatFromOutput(shaderOutputs[vaIdx]) ) ) {
switch (getPixelFormats()->getFormatType(mvkFormatFromOutput(shaderOutputs[siIdx]) ) ) {
case kMVKFormatColorUInt8:
va.vertexAttribute.format = MSL_VERTEX_FORMAT_UINT8;
si.shaderInput.format = MSL_VERTEX_FORMAT_UINT8;
break;
case kMVKFormatColorUInt16:
va.vertexAttribute.format = MSL_VERTEX_FORMAT_UINT16;
si.shaderInput.format = MSL_VERTEX_FORMAT_UINT16;
break;
default:
break;
}
shaderContext.vertexAttributes.push_back(va);
shaderContext.shaderInputs.push_back(si);
}
}
@ -1709,10 +1725,11 @@ namespace SPIRV_CROSS_NAMESPACE {
}
template<class Archive>
void serialize(Archive & archive, MSLVertexAttr& va) {
archive(va.location,
va.format,
va.builtin);
void serialize(Archive & archive, MSLShaderInput& si) {
archive(si.location,
si.format,
si.builtin,
si.vecsize);
}
template<class Archive>
@ -1784,10 +1801,10 @@ namespace mvk {
}
template<class Archive>
void serialize(Archive & archive, MSLVertexAttribute& va) {
archive(va.vertexAttribute,
va.binding,
va.isUsedByShader);
void serialize(Archive & archive, MSLShaderInput& si) {
archive(si.shaderInput,
si.binding,
si.isUsedByShader);
}
template<class Archive>
@ -1801,7 +1818,7 @@ namespace mvk {
template<class Archive>
void serialize(Archive & archive, SPIRVToMSLConversionConfiguration& ctx) {
archive(ctx.options,
ctx.vertexAttributes,
ctx.shaderInputs,
ctx.resourceBindings);
}

View File

@ -278,7 +278,7 @@ MVKMTLFunction MVKShaderModule::getMTLFunction(SPIRVToMSLConversionConfiguration
_device->addActivityPerformance(_device->_performanceStatistics.shaderCompilation.shaderLibraryFromCache, startTime);
} else {
mvkLib->setEntryPointName(pContext->options.entryPointName);
pContext->markAllAttributesAndResourcesUsed();
pContext->markAllInputsAndResourcesUsed();
}
return mvkLib ? mvkLib->getMTLFunction(pSpecializationInfo, this) : MVKMTLFunctionNull;

View File

@ -104,10 +104,11 @@ MVK_PUBLIC_SYMBOL SPIRVToMSLConversionOptions::SPIRVToMSLConversionOptions() {
#endif
}
MVK_PUBLIC_SYMBOL bool MSLVertexAttribute::matches(const MSLVertexAttribute& other) const {
if (vertexAttribute.location != other.vertexAttribute.location) { return false; }
if (vertexAttribute.format != other.vertexAttribute.format) { return false; }
if (vertexAttribute.builtin != other.vertexAttribute.builtin) { return false; }
MVK_PUBLIC_SYMBOL bool mvk::MSLShaderInput::matches(const mvk::MSLShaderInput& other) const {
if (shaderInput.location != other.shaderInput.location) { return false; }
if (shaderInput.format != other.shaderInput.format) { return false; }
if (shaderInput.builtin != other.shaderInput.builtin) { return false; }
if (shaderInput.vecsize != other.shaderInput.vecsize) { return false; }
if (binding != other.binding) { return false; }
return true;
}
@ -164,26 +165,23 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionConfiguration::stageSupportsVertexAtt
}
// Check them all in case inactive VA's duplicate locations used by active VA's.
MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionConfiguration::isVertexAttributeLocationUsed(uint32_t location) const {
for (auto& va : vertexAttributes) {
if ((va.vertexAttribute.location == location) && va.isUsedByShader) { return true; }
MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionConfiguration::isShaderInputLocationUsed(uint32_t location) const {
for (auto& si : shaderInputs) {
if ((si.shaderInput.location == location) && si.isUsedByShader) { return true; }
}
return false;
}
MVK_PUBLIC_SYMBOL uint32_t SPIRVToMSLConversionConfiguration::countVertexAttributesAt(uint32_t binding) const {
uint32_t vaCnt = 0;
for (auto& va : vertexAttributes) {
if ((va.binding == binding) && va.isUsedByShader) { vaCnt++; }
MVK_PUBLIC_SYMBOL uint32_t SPIRVToMSLConversionConfiguration::countShaderInputsAt(uint32_t binding) const {
uint32_t siCnt = 0;
for (auto& si : shaderInputs) {
if ((si.binding == binding) && si.isUsedByShader) { siCnt++; }
}
return vaCnt;
return siCnt;
}
MVK_PUBLIC_SYMBOL void SPIRVToMSLConversionConfiguration::markAllAttributesAndResourcesUsed() {
if (stageSupportsVertexAttributes()) {
for (auto& va : vertexAttributes) { va.isUsedByShader = true; }
}
MVK_PUBLIC_SYMBOL void SPIRVToMSLConversionConfiguration::markAllInputsAndResourcesUsed() {
for (auto& si : shaderInputs) { si.isUsedByShader = true; }
for (auto& rb : resourceBindings) { rb.isUsedByShader = true; }
}
@ -191,10 +189,8 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionConfiguration::matches(const SPIRVToM
if ( !options.matches(other.options) ) { return false; }
if (stageSupportsVertexAttributes()) {
for (const auto& va : vertexAttributes) {
if (va.isUsedByShader && !containsMatching(other.vertexAttributes, va)) { return false; }
}
for (const auto& si : shaderInputs) {
if (si.isUsedByShader && !containsMatching(other.shaderInputs, si)) { return false; }
}
for (const auto& rb : resourceBindings) {
@ -207,12 +203,10 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionConfiguration::matches(const SPIRVToM
MVK_PUBLIC_SYMBOL void SPIRVToMSLConversionConfiguration::alignWith(const SPIRVToMSLConversionConfiguration& srcContext) {
if (stageSupportsVertexAttributes()) {
for (auto& va : vertexAttributes) {
va.isUsedByShader = false;
for (auto& srcVA : srcContext.vertexAttributes) {
if (va.matches(srcVA)) { va.isUsedByShader = srcVA.isUsedByShader; }
}
for (auto& si : shaderInputs) {
si.isUsedByShader = false;
for (auto& srcSI : srcContext.shaderInputs) {
if (si.matches(srcSI)) { si.isUsedByShader = srcSI.isUsedByShader; }
}
}
@ -283,11 +277,9 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConversionConfigur
scOpts.vertex.flip_vert_y = context.options.shouldFlipVertexY;
pMSLCompiler->set_common_options(scOpts);
// Add vertex attributes
if (context.stageSupportsVertexAttributes()) {
for (auto& va : context.vertexAttributes) {
pMSLCompiler->add_msl_vertex_attribute(va.vertexAttribute);
}
// Add shader inputs
for (auto& si : context.shaderInputs) {
pMSLCompiler->add_msl_shader_input(si.shaderInput);
}
// Add resource bindings and hardcoded constexpr samplers
@ -327,10 +319,8 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConversionConfigur
_shaderConversionResults.needsInputThreadgroupMem = pMSLCompiler && pMSLCompiler->needs_input_threadgroup_mem();
_shaderConversionResults.needsDispatchBaseBuffer = pMSLCompiler && pMSLCompiler->needs_dispatch_base_buffer();
if (context.stageSupportsVertexAttributes()) {
for (auto& ctxVA : context.vertexAttributes) {
ctxVA.isUsedByShader = pMSLCompiler->is_msl_vertex_attribute_used(ctxVA.vertexAttribute.location);
}
for (auto& ctxSI : context.shaderInputs) {
ctxSI.isUsedByShader = pMSLCompiler->is_msl_shader_input_used(ctxSI.shaderInput.location);
}
for (auto& ctxRB : context.resourceBindings) {
ctxRB.isUsedByShader = pMSLCompiler->is_msl_resource_binding_used(ctxRB.resourceBinding.stage,

View File

@ -68,11 +68,11 @@ namespace mvk {
* shader conversion configurations to be compared only against the attributes that are
* actually used by the shader.
*
* THIS STRUCT IS STREAMED OUT AS PART OF THE PIEPLINE CACHE.
* THIS STRUCT IS STREAMED OUT AS PART OF THE PIPELINE CACHE.
* CHANGES TO THIS STRUCT SHOULD BE CAPTURED IN THE STREAMING LOGIC OF THE PIPELINE CACHE.
*/
typedef struct MSLVertexAttribute {
SPIRV_CROSS_NAMESPACE::MSLVertexAttr vertexAttribute;
typedef struct MSLShaderInput {
SPIRV_CROSS_NAMESPACE::MSLShaderInput shaderInput;
uint32_t binding = 0;
bool isUsedByShader = false;
@ -81,9 +81,9 @@ namespace mvk {
* Returns whether the specified vertex attribute match this one.
* It does if all corresponding elements except isUsedByShader are equal.
*/
bool matches(const MSLVertexAttribute& other) const;
bool matches(const MSLShaderInput& other) const;
} MSLVertexAttribute;
} MSLShaderInput;
/**
* Matches the binding index of a MSL resource for a binding within a descriptor set.
@ -127,23 +127,23 @@ namespace mvk {
*/
typedef struct SPIRVToMSLConversionConfiguration {
SPIRVToMSLConversionOptions options;
std::vector<MSLVertexAttribute> vertexAttributes;
std::vector<MSLShaderInput> shaderInputs;
std::vector<MSLResourceBinding> resourceBindings;
/** Returns whether the pipeline stage being converted supports vertex attributes. */
bool stageSupportsVertexAttributes() const;
/** Returns whether the vertex attribute at the specified location is used by the shader. */
bool isVertexAttributeLocationUsed(uint32_t location) const;
/** Returns whether the shader input variable at the specified location is used by the shader. */
bool isShaderInputLocationUsed(uint32_t location) const;
/** Returns the number of vertex attributes bound to the specified Vulkan buffer binding, and used by the shader. */
uint32_t countVertexAttributesAt(uint32_t binding) const;
/** Returns the number of shader input variables bound to the specified Vulkan buffer binding, and used by the shader. */
uint32_t countShaderInputsAt(uint32_t binding) const;
/** Returns whether the vertex buffer at the specified Vulkan binding is used by the shader. */
bool isVertexBufferUsed(uint32_t binding) const { return countVertexAttributesAt(binding) > 0; }
bool isVertexBufferUsed(uint32_t binding) const { return countShaderInputsAt(binding) > 0; }
/** Marks all vertex attributes and resources as being used by the shader. */
void markAllAttributesAndResourcesUsed();
/** Marks all input variables and resources as being used by the shader. */
void markAllInputsAndResourcesUsed();
/**
* Returns whether this configuration matches the other context. It does if the