Fix vertex buffer binding counts when establishing implicit buffers binding indexes.
This patch fixes a regression caused by 1b6b8bc9 when the implicit buffers were moved to top of Metal buffer index range. Fix vertex buffer binding counts when binding indexes are not consecutive, or when additional synthetic buffer bindings are required to accommodate vertex attributes that are outside the vertex buffer stride values. Take into consideration that the app may bind more vertex attribute buffers than a pipeline may consume, and don't allow these to overwrite any implicit buffers sent to the shader. MVKResourcesCommandEncoderState::encodeBinding() clear binding dirty flag before calling lambda function to allow function operation to possibly override and leave binding marked dirty. Set tessellation vertex buffer indexes based on platform maximum vertex stage buffer count, instead of hardcoding them, to preemptively avoid conflict with implicit buffers should the platform counts ever change.
This commit is contained in:
parent
27aaaf50c1
commit
866c0dc8eb
@ -30,6 +30,7 @@ Released TBD
|
||||
used by a subsequent pipeline that does not use push constants.
|
||||
- Fix error on some Apple GPU's where a `vkCmdTimestampQuery()` after a renderpass was
|
||||
writing timestamp before renderpass activity was complete.
|
||||
- Fix regression error in vertex buffer binding counts when establishing implicit buffers binding indexes.
|
||||
- Work around zombie memory bug in Intel Iris Plus Graphics driver when repeatedly retrieving GPU counter sets.
|
||||
- Update to latest SPIRV-Cross:
|
||||
- MSL: Emit interface block members of array length 1 as arrays instead of scalars.
|
||||
|
@ -193,7 +193,7 @@ void MVKCmdDraw::encode(MVKCommandEncoder* cmdEncoder) {
|
||||
if (pipeline->needsVertexOutputBuffer()) {
|
||||
[mtlTessCtlEncoder setBuffer: vtxOutBuff->_mtlBuffer
|
||||
offset: vtxOutBuff->_offset
|
||||
atIndex: kMVKTessCtlInputBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessCtlInputBufferBinding)];
|
||||
}
|
||||
|
||||
NSUInteger sgSize = pipeline->getTessControlStageState().threadExecutionWidth;
|
||||
@ -221,16 +221,16 @@ void MVKCmdDraw::encode(MVKCommandEncoder* cmdEncoder) {
|
||||
if (pipeline->needsTessCtlOutputBuffer()) {
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcOutBuff->_mtlBuffer
|
||||
offset: tcOutBuff->_offset
|
||||
atIndex: kMVKTessEvalInputBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalInputBufferBinding)];
|
||||
}
|
||||
if (pipeline->needsTessCtlPatchOutputBuffer()) {
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcPatchOutBuff->_mtlBuffer
|
||||
offset: tcPatchOutBuff->_offset
|
||||
atIndex: kMVKTessEvalPatchInputBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalPatchInputBufferBinding)];
|
||||
}
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcLevelBuff->_mtlBuffer
|
||||
offset: tcLevelBuff->_offset
|
||||
atIndex: kMVKTessEvalLevelBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalLevelBufferBinding)];
|
||||
[cmdEncoder->_mtlRenderEncoder setTessellationFactorBuffer: tcLevelBuff->_mtlBuffer
|
||||
offset: tcLevelBuff->_offset
|
||||
instanceStride: 0];
|
||||
@ -395,7 +395,7 @@ void MVKCmdDrawIndexed::encode(MVKCommandEncoder* cmdEncoder) {
|
||||
if (pipeline->needsVertexOutputBuffer()) {
|
||||
[mtlTessCtlEncoder setBuffer: vtxOutBuff->_mtlBuffer
|
||||
offset: vtxOutBuff->_offset
|
||||
atIndex: kMVKTessCtlInputBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessCtlInputBufferBinding)];
|
||||
}
|
||||
// The vertex shader produced output in the correct order, so there's no need to use
|
||||
// an index buffer here.
|
||||
@ -424,16 +424,16 @@ void MVKCmdDrawIndexed::encode(MVKCommandEncoder* cmdEncoder) {
|
||||
if (pipeline->needsTessCtlOutputBuffer()) {
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcOutBuff->_mtlBuffer
|
||||
offset: tcOutBuff->_offset
|
||||
atIndex: kMVKTessEvalInputBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalInputBufferBinding)];
|
||||
}
|
||||
if (pipeline->needsTessCtlPatchOutputBuffer()) {
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcPatchOutBuff->_mtlBuffer
|
||||
offset: tcPatchOutBuff->_offset
|
||||
atIndex: kMVKTessEvalPatchInputBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalPatchInputBufferBinding)];
|
||||
}
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcLevelBuff->_mtlBuffer
|
||||
offset: tcLevelBuff->_offset
|
||||
atIndex: kMVKTessEvalLevelBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalLevelBufferBinding)];
|
||||
[cmdEncoder->_mtlRenderEncoder setTessellationFactorBuffer: tcLevelBuff->_mtlBuffer
|
||||
offset: tcLevelBuff->_offset
|
||||
instanceStride: 0];
|
||||
@ -741,7 +741,7 @@ void MVKCmdDrawIndirect::encode(MVKCommandEncoder* cmdEncoder) {
|
||||
if (pipeline->needsVertexOutputBuffer()) {
|
||||
[mtlTessCtlEncoder setBuffer: vtxOutBuff->_mtlBuffer
|
||||
offset: vtxOutBuff->_offset
|
||||
atIndex: kMVKTessCtlInputBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessCtlInputBufferBinding)];
|
||||
}
|
||||
[mtlTessCtlEncoder dispatchThreadgroupsWithIndirectBuffer: mtlIndBuff
|
||||
indirectBufferOffset: mtlIndBuffOfst
|
||||
@ -757,16 +757,16 @@ void MVKCmdDrawIndirect::encode(MVKCommandEncoder* cmdEncoder) {
|
||||
if (pipeline->needsTessCtlOutputBuffer()) {
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcOutBuff->_mtlBuffer
|
||||
offset: tcOutBuff->_offset
|
||||
atIndex: kMVKTessEvalInputBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalInputBufferBinding)];
|
||||
}
|
||||
if (pipeline->needsTessCtlPatchOutputBuffer()) {
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcPatchOutBuff->_mtlBuffer
|
||||
offset: tcPatchOutBuff->_offset
|
||||
atIndex: kMVKTessEvalPatchInputBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalPatchInputBufferBinding)];
|
||||
}
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcLevelBuff->_mtlBuffer
|
||||
offset: tcLevelBuff->_offset
|
||||
atIndex: kMVKTessEvalLevelBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalLevelBufferBinding)];
|
||||
[cmdEncoder->_mtlRenderEncoder setTessellationFactorBuffer: tcLevelBuff->_mtlBuffer
|
||||
offset: tcLevelBuff->_offset
|
||||
instanceStride: 0];
|
||||
@ -1076,7 +1076,7 @@ void MVKCmdDrawIndexedIndirect::encode(MVKCommandEncoder* cmdEncoder) {
|
||||
if (pipeline->needsVertexOutputBuffer()) {
|
||||
[mtlTessCtlEncoder setBuffer: vtxOutBuff->_mtlBuffer
|
||||
offset: vtxOutBuff->_offset
|
||||
atIndex: kMVKTessCtlInputBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessCtlInputBufferBinding)];
|
||||
}
|
||||
[mtlTessCtlEncoder dispatchThreadgroupsWithIndirectBuffer: mtlIndBuff
|
||||
indirectBufferOffset: mtlTempIndBuffOfst
|
||||
@ -1092,16 +1092,16 @@ void MVKCmdDrawIndexedIndirect::encode(MVKCommandEncoder* cmdEncoder) {
|
||||
if (pipeline->needsTessCtlOutputBuffer()) {
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcOutBuff->_mtlBuffer
|
||||
offset: tcOutBuff->_offset
|
||||
atIndex: kMVKTessEvalInputBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalInputBufferBinding)];
|
||||
}
|
||||
if (pipeline->needsTessCtlPatchOutputBuffer()) {
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcPatchOutBuff->_mtlBuffer
|
||||
offset: tcPatchOutBuff->_offset
|
||||
atIndex: kMVKTessEvalPatchInputBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalPatchInputBufferBinding)];
|
||||
}
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcLevelBuff->_mtlBuffer
|
||||
offset: tcLevelBuff->_offset
|
||||
atIndex: kMVKTessEvalLevelBufferIndex];
|
||||
atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalLevelBufferBinding)];
|
||||
[cmdEncoder->_mtlRenderEncoder setTessellationFactorBuffer: tcLevelBuff->_mtlBuffer
|
||||
offset: tcLevelBuff->_offset
|
||||
instanceStride: 0];
|
||||
|
@ -407,6 +407,7 @@ protected:
|
||||
|
||||
// Template function that executes a lambda expression on each dirty element of
|
||||
// a vector of bindings, and marks the bindings and the vector as no longer dirty.
|
||||
// Clear isDirty flag before operation to allow operation to possibly override.
|
||||
template<class T, class V>
|
||||
void encodeBinding(V& bindings,
|
||||
bool& bindingsDirtyFlag,
|
||||
@ -415,8 +416,9 @@ protected:
|
||||
bindingsDirtyFlag = false;
|
||||
for (auto& b : bindings) {
|
||||
if (b.isDirty) {
|
||||
mtlOperation(_cmdEncoder, b);
|
||||
b.isDirty = false;
|
||||
mtlOperation(_cmdEncoder, b);
|
||||
if (b.isDirty) { bindingsDirtyFlag = true; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -775,26 +775,33 @@ void MVKGraphicsResourcesCommandEncoderState::encodeImpl(uint32_t stage) {
|
||||
} else if (!forTessellation && stage == kMVKGraphicsStageRasterization) {
|
||||
encodeBindings(kMVKShaderStageVertex, "vertex", fullImageViewSwizzle,
|
||||
[pipeline](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b)->void {
|
||||
if (b.isInline) {
|
||||
cmdEncoder->setVertexBytes(cmdEncoder->_mtlRenderEncoder,
|
||||
b.mtlBytes,
|
||||
b.size,
|
||||
b.index);
|
||||
} else {
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: b.mtlBuffer
|
||||
offset: b.offset
|
||||
atIndex: b.index];
|
||||
// The app may have bound more vertex attribute buffers than used by the pipeline.
|
||||
// We must not bind those extra buffers to the shader because they might overwrite
|
||||
// any implicit buffers used by the pipeline.
|
||||
if (pipeline->isValidVertexBufferIndex(kMVKShaderStageVertex, b.index)) {
|
||||
if (b.isInline) {
|
||||
cmdEncoder->setVertexBytes(cmdEncoder->_mtlRenderEncoder,
|
||||
b.mtlBytes,
|
||||
b.size,
|
||||
b.index);
|
||||
} else {
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: b.mtlBuffer
|
||||
offset: b.offset
|
||||
atIndex: b.index];
|
||||
|
||||
// Add any translated vertex bindings for this binding
|
||||
auto xltdVtxBindings = pipeline->getTranslatedVertexBindings();
|
||||
for (auto& xltdBind : xltdVtxBindings) {
|
||||
if (b.index == pipeline->getMetalBufferIndexForVertexAttributeBinding(xltdBind.binding)) {
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: b.mtlBuffer
|
||||
offset: b.offset + xltdBind.translationOffset
|
||||
atIndex: pipeline->getMetalBufferIndexForVertexAttributeBinding(xltdBind.translationBinding)];
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add any translated vertex bindings for this binding
|
||||
auto xltdVtxBindings = pipeline->getTranslatedVertexBindings();
|
||||
for (auto& xltdBind : xltdVtxBindings) {
|
||||
if (b.index == pipeline->getMetalBufferIndexForVertexAttributeBinding(xltdBind.binding)) {
|
||||
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: b.mtlBuffer
|
||||
offset: b.offset + xltdBind.translationOffset
|
||||
atIndex: pipeline->getMetalBufferIndexForVertexAttributeBinding(xltdBind.translationBinding)];
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
b.isDirty = true; // We haven't written it out, so leave dirty until next time.
|
||||
}
|
||||
},
|
||||
[](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b, const MVKArrayRef<uint32_t> s)->void {
|
||||
cmdEncoder->setVertexBytes(cmdEncoder->_mtlRenderEncoder,
|
||||
|
@ -112,13 +112,13 @@ protected:
|
||||
#pragma mark -
|
||||
#pragma mark MVKPipeline
|
||||
|
||||
static const uint32_t kMVKTessCtlInputBufferIndex = 30;
|
||||
static const uint32_t kMVKTessCtlNumReservedBuffers = 1;
|
||||
static const uint32_t kMVKTessCtlInputBufferBinding = 0;
|
||||
|
||||
static const uint32_t kMVKTessEvalInputBufferIndex = 30;
|
||||
static const uint32_t kMVKTessEvalPatchInputBufferIndex = 29;
|
||||
static const uint32_t kMVKTessEvalLevelBufferIndex = 28;
|
||||
static const uint32_t kMVKTessEvalNumReservedBuffers = 3;
|
||||
static const uint32_t kMVKTessEvalInputBufferBinding = 0;
|
||||
static const uint32_t kMVKTessEvalPatchInputBufferBinding = 1;
|
||||
static const uint32_t kMVKTessEvalLevelBufferBinding = 2;
|
||||
|
||||
/** Represents an abstract Vulkan pipeline. */
|
||||
class MVKPipeline : public MVKVulkanAPIDeviceObject {
|
||||
@ -259,6 +259,13 @@ public:
|
||||
/** Returns whether this pipeline has custom sample positions enabled. */
|
||||
bool isUsingCustomSamplePositions() { return _isUsingCustomSamplePositions; }
|
||||
|
||||
/**
|
||||
* Returns whether the MTLBuffer vertex shader buffer index is valid for a stage of this pipeline.
|
||||
* It is if it is a descriptor binding within the descriptor binding range,
|
||||
* or a vertex attribute binding above any implicit buffer bindings.
|
||||
*/
|
||||
bool isValidVertexBufferIndex(MVKShaderStage stage, uint32_t mtlBufferIndex);
|
||||
|
||||
/** Returns the custom samples used by this pipeline. */
|
||||
MVKArrayRef<MTLSamplePosition> getCustomSamplePositions() { return _customSamplePositions.contents(); }
|
||||
|
||||
@ -293,6 +300,7 @@ protected:
|
||||
void initCustomSamplePositions(const VkGraphicsPipelineCreateInfo* pCreateInfo);
|
||||
void initMTLRenderPipelineState(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
|
||||
void initShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig, const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
|
||||
void initReservedVertexAttributeBufferCount(const VkGraphicsPipelineCreateInfo* pCreateInfo);
|
||||
void addVertexInputToShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig, const VkGraphicsPipelineCreateInfo* pCreateInfo);
|
||||
void addPrevStageOutputToShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig, SPIRVShaderOutputs& outputs);
|
||||
MTLRenderPipelineDescriptor* newMTLRenderPipelineDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
|
||||
@ -313,8 +321,7 @@ protected:
|
||||
bool isRasterizationDisabled(const VkGraphicsPipelineCreateInfo* pCreateInfo);
|
||||
bool verifyImplicitBuffer(bool needsBuffer, MVKShaderImplicitRezBinding& index, MVKShaderStage stage, const char* name);
|
||||
uint32_t getTranslatedVertexBinding(uint32_t binding, uint32_t translationOffset, uint32_t maxBinding);
|
||||
uint32_t getImplicitBufferIndex(const VkGraphicsPipelineCreateInfo* pCreateInfo, MVKShaderStage stage, uint32_t bufferIndexOffset);
|
||||
uint32_t getReservedBufferCount(const VkGraphicsPipelineCreateInfo* pCreateInfo, MVKShaderStage stage);
|
||||
uint32_t getImplicitBufferIndex(MVKShaderStage stage, uint32_t bufferIndexOffset);
|
||||
|
||||
const VkPipelineShaderStageCreateInfo* _pVertexSS = nullptr;
|
||||
const VkPipelineShaderStageCreateInfo* _pTessCtlSS = nullptr;
|
||||
@ -351,6 +358,7 @@ protected:
|
||||
|
||||
float _blendConstants[4] = { 0.0, 0.0, 0.0, 1.0 };
|
||||
uint32_t _outputControlPointCount;
|
||||
MVKShaderImplicitRezBinding _reservedVertexAttributeBufferCount;
|
||||
MVKShaderImplicitRezBinding _viewRangeBufferIndex;
|
||||
MVKShaderImplicitRezBinding _outputBufferIndex;
|
||||
uint32_t _tessCtlPatchOutputBufferIndex = 0;
|
||||
|
@ -869,7 +869,7 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::newMTLTessRasterStageDescripto
|
||||
}
|
||||
innerLoc = location;
|
||||
}
|
||||
plDesc.vertexDescriptor.attributes[location].bufferIndex = kMVKTessEvalLevelBufferIndex;
|
||||
plDesc.vertexDescriptor.attributes[location].bufferIndex = getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalLevelBufferBinding);
|
||||
if (reflectData.patchKind == spv::ExecutionModeTriangles || output.builtin == spv::BuiltInTessLevelOuter) {
|
||||
plDesc.vertexDescriptor.attributes[location].offset = 0;
|
||||
plDesc.vertexDescriptor.attributes[location].format = MTLVertexFormatHalf4; // FIXME Should use Float4
|
||||
@ -879,7 +879,7 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::newMTLTessRasterStageDescripto
|
||||
}
|
||||
} else if (output.perPatch) {
|
||||
patchOffset = (uint32_t)mvkAlignByteCount(patchOffset, getShaderOutputAlignment(output));
|
||||
plDesc.vertexDescriptor.attributes[output.location].bufferIndex = kMVKTessEvalPatchInputBufferIndex;
|
||||
plDesc.vertexDescriptor.attributes[output.location].bufferIndex = getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalPatchInputBufferBinding);
|
||||
plDesc.vertexDescriptor.attributes[output.location].format = getPixelFormats()->getMTLVertexFormat(mvkFormatFromOutput(output));
|
||||
plDesc.vertexDescriptor.attributes[output.location].offset = patchOffset;
|
||||
patchOffset += getShaderOutputSize(output);
|
||||
@ -887,7 +887,7 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::newMTLTessRasterStageDescripto
|
||||
usedPerPatch = true;
|
||||
} else {
|
||||
offset = (uint32_t)mvkAlignByteCount(offset, getShaderOutputAlignment(output));
|
||||
plDesc.vertexDescriptor.attributes[output.location].bufferIndex = kMVKTessEvalInputBufferIndex;
|
||||
plDesc.vertexDescriptor.attributes[output.location].bufferIndex = getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalInputBufferBinding);
|
||||
plDesc.vertexDescriptor.attributes[output.location].format = getPixelFormats()->getMTLVertexFormat(mvkFormatFromOutput(output));
|
||||
plDesc.vertexDescriptor.attributes[output.location].offset = offset;
|
||||
offset += getShaderOutputSize(output);
|
||||
@ -896,16 +896,19 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::newMTLTessRasterStageDescripto
|
||||
}
|
||||
}
|
||||
if (usedPerVertex) {
|
||||
plDesc.vertexDescriptor.layouts[kMVKTessEvalInputBufferIndex].stepFunction = MTLVertexStepFunctionPerPatchControlPoint;
|
||||
plDesc.vertexDescriptor.layouts[kMVKTessEvalInputBufferIndex].stride = mvkAlignByteCount(offset, getShaderOutputAlignment(*firstVertex));
|
||||
uint32_t mtlVBIdx = getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalInputBufferBinding);
|
||||
plDesc.vertexDescriptor.layouts[mtlVBIdx].stepFunction = MTLVertexStepFunctionPerPatchControlPoint;
|
||||
plDesc.vertexDescriptor.layouts[mtlVBIdx].stride = mvkAlignByteCount(offset, getShaderOutputAlignment(*firstVertex));
|
||||
}
|
||||
if (usedPerPatch) {
|
||||
plDesc.vertexDescriptor.layouts[kMVKTessEvalPatchInputBufferIndex].stepFunction = MTLVertexStepFunctionPerPatch;
|
||||
plDesc.vertexDescriptor.layouts[kMVKTessEvalPatchInputBufferIndex].stride = mvkAlignByteCount(patchOffset, getShaderOutputAlignment(*firstPatch));
|
||||
uint32_t mtlVBIdx = getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalPatchInputBufferBinding);
|
||||
plDesc.vertexDescriptor.layouts[mtlVBIdx].stepFunction = MTLVertexStepFunctionPerPatch;
|
||||
plDesc.vertexDescriptor.layouts[mtlVBIdx].stride = mvkAlignByteCount(patchOffset, getShaderOutputAlignment(*firstPatch));
|
||||
}
|
||||
if (outerLoc != (uint32_t)(-1) || innerLoc != (uint32_t)(-1)) {
|
||||
plDesc.vertexDescriptor.layouts[kMVKTessEvalLevelBufferIndex].stepFunction = MTLVertexStepFunctionPerPatch;
|
||||
plDesc.vertexDescriptor.layouts[kMVKTessEvalLevelBufferIndex].stride =
|
||||
uint32_t mtlVBIdx = getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalLevelBufferBinding);
|
||||
plDesc.vertexDescriptor.layouts[mtlVBIdx].stepFunction = MTLVertexStepFunctionPerPatch;
|
||||
plDesc.vertexDescriptor.layouts[mtlVBIdx].stride =
|
||||
reflectData.patchKind == spv::ExecutionModeTriangles ? sizeof(MTLTriangleTessellationFactorsHalf) :
|
||||
sizeof(MTLQuadTessellationFactorsHalf);
|
||||
}
|
||||
@ -1074,7 +1077,7 @@ bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescripto
|
||||
shaderConfig.options.entryPointName = _pTessCtlSS->pName;
|
||||
shaderConfig.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageTessCtl];
|
||||
shaderConfig.options.mslOptions.indirect_params_buffer_index = _indirectParamsIndex.stages[kMVKShaderStageTessCtl];
|
||||
shaderConfig.options.mslOptions.shader_input_buffer_index = kMVKTessCtlInputBufferIndex;
|
||||
shaderConfig.options.mslOptions.shader_input_buffer_index = getMetalBufferIndexForVertexAttributeBinding(kMVKTessCtlInputBufferBinding);
|
||||
shaderConfig.options.mslOptions.shader_output_buffer_index = _outputBufferIndex.stages[kMVKShaderStageTessCtl];
|
||||
shaderConfig.options.mslOptions.shader_patch_output_buffer_index = _tessCtlPatchOutputBufferIndex;
|
||||
shaderConfig.options.mslOptions.shader_tess_factor_buffer_index = _tessCtlLevelBufferIndex;
|
||||
@ -1571,16 +1574,17 @@ void MVKGraphicsPipeline::initShaderConversionConfig(SPIRVToMSLConversionConfigu
|
||||
// FIXME: Many of these are optional. We shouldn't set the ones that aren't
|
||||
// present--or at least, we should move the ones that are down to avoid running over
|
||||
// the limit of available buffers. But we can't know that until we compile the shaders.
|
||||
initReservedVertexAttributeBufferCount(pCreateInfo);
|
||||
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
|
||||
MVKShaderStage stage = (MVKShaderStage)i;
|
||||
_dynamicOffsetBufferIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 0);
|
||||
_bufferSizeBufferIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 1);
|
||||
_swizzleBufferIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 2);
|
||||
_indirectParamsIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 3);
|
||||
_outputBufferIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 4);
|
||||
_dynamicOffsetBufferIndex.stages[stage] = getImplicitBufferIndex(stage, 0);
|
||||
_bufferSizeBufferIndex.stages[stage] = getImplicitBufferIndex(stage, 1);
|
||||
_swizzleBufferIndex.stages[stage] = getImplicitBufferIndex(stage, 2);
|
||||
_indirectParamsIndex.stages[stage] = getImplicitBufferIndex(stage, 3);
|
||||
_outputBufferIndex.stages[stage] = getImplicitBufferIndex(stage, 4);
|
||||
if (stage == kMVKShaderStageTessCtl) {
|
||||
_tessCtlPatchOutputBufferIndex = getImplicitBufferIndex(pCreateInfo, stage, 5);
|
||||
_tessCtlLevelBufferIndex = getImplicitBufferIndex(pCreateInfo, stage, 6);
|
||||
_tessCtlPatchOutputBufferIndex = getImplicitBufferIndex(stage, 5);
|
||||
_tessCtlLevelBufferIndex = getImplicitBufferIndex(stage, 6);
|
||||
}
|
||||
}
|
||||
// Since we currently can't use multiview with tessellation or geometry shaders,
|
||||
@ -1630,17 +1634,48 @@ void MVKGraphicsPipeline::initShaderConversionConfig(SPIRVToMSLConversionConfigu
|
||||
shaderConfig.options.numTessControlPoints = reflectData.numControlPoints;
|
||||
}
|
||||
|
||||
uint32_t MVKGraphicsPipeline::getImplicitBufferIndex(const VkGraphicsPipelineCreateInfo* pCreateInfo, MVKShaderStage stage, uint32_t bufferIndexOffset) {
|
||||
return _device->_pMetalFeatures->maxPerStageBufferCount - (getReservedBufferCount(pCreateInfo, stage) + bufferIndexOffset + 1);
|
||||
uint32_t MVKGraphicsPipeline::getImplicitBufferIndex(MVKShaderStage stage, uint32_t bufferIndexOffset) {
|
||||
return getMetalBufferIndexForVertexAttributeBinding(_reservedVertexAttributeBufferCount.stages[stage] + bufferIndexOffset);
|
||||
}
|
||||
|
||||
uint32_t MVKGraphicsPipeline::getReservedBufferCount(const VkGraphicsPipelineCreateInfo* pCreateInfo, MVKShaderStage stage) {
|
||||
switch (stage) {
|
||||
case kMVKShaderStageVertex: return pCreateInfo->pVertexInputState->vertexBindingDescriptionCount;
|
||||
case kMVKShaderStageTessCtl: return kMVKTessCtlNumReservedBuffers;
|
||||
case kMVKShaderStageTessEval: return kMVKTessEvalNumReservedBuffers;
|
||||
default: return 0;
|
||||
// Set the number of vertex attribute buffers consumed by this pipeline at each stage.
|
||||
// Any implicit buffers needed by this pipeline will be assigned indexes below the range
|
||||
// defined by this count below the max number of Metal buffer bindings per stage.
|
||||
// Must be called before any calls to getImplicitBufferIndex().
|
||||
void MVKGraphicsPipeline::initReservedVertexAttributeBufferCount(const VkGraphicsPipelineCreateInfo* pCreateInfo) {
|
||||
int32_t maxBinding = -1;
|
||||
uint32_t xltdBuffCnt = 0;
|
||||
|
||||
const VkPipelineVertexInputStateCreateInfo* pVI = pCreateInfo->pVertexInputState;
|
||||
uint32_t vaCnt = pVI->vertexAttributeDescriptionCount;
|
||||
uint32_t vbCnt = pVI->vertexBindingDescriptionCount;
|
||||
|
||||
// Determine the highest binding number used by the vertex buffers
|
||||
for (uint32_t vbIdx = 0; vbIdx < vbCnt; vbIdx++) {
|
||||
const VkVertexInputBindingDescription* pVKVB = &pVI->pVertexBindingDescriptions[vbIdx];
|
||||
maxBinding = max<int32_t>(pVKVB->binding, maxBinding);
|
||||
|
||||
// Iterate through the vertex attributes and determine if any need a synthetic binding buffer to
|
||||
// accommodate offsets that are outside the stride, which Vulkan supports, but Metal does not.
|
||||
// This value will be worst case, as some synthetic buffers may end up being shared.
|
||||
for (uint32_t vaIdx = 0; vaIdx < vaCnt; vaIdx++) {
|
||||
const VkVertexInputAttributeDescription* pVKVA = &pVI->pVertexAttributeDescriptions[vaIdx];
|
||||
if ((pVKVA->binding == pVKVB->binding) && (pVKVA->offset + getPixelFormats()->getBytesPerBlock(pVKVA->format) > pVKVB->stride)) {
|
||||
xltdBuffCnt++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The number of reserved bindings we need for the vertex stage is determined from the largest vertex
|
||||
// attribute binding number, plus any synthetic buffer bindings created to support translated offsets.
|
||||
mvkClear<uint32_t>(_reservedVertexAttributeBufferCount.stages, kMVKShaderStageCount);
|
||||
_reservedVertexAttributeBufferCount.stages[kMVKShaderStageVertex] = (maxBinding + 1) + xltdBuffCnt;
|
||||
_reservedVertexAttributeBufferCount.stages[kMVKShaderStageTessCtl] = kMVKTessCtlNumReservedBuffers;
|
||||
_reservedVertexAttributeBufferCount.stages[kMVKShaderStageTessEval] = kMVKTessEvalNumReservedBuffers;
|
||||
}
|
||||
|
||||
bool MVKGraphicsPipeline::isValidVertexBufferIndex(MVKShaderStage stage, uint32_t mtlBufferIndex) {
|
||||
return mtlBufferIndex < _descriptorBufferCounts.stages[stage] || mtlBufferIndex > getImplicitBufferIndex(stage, 0);
|
||||
}
|
||||
|
||||
// Initializes the vertex attributes in a shader conversion configuration.
|
||||
|
Loading…
x
Reference in New Issue
Block a user