Move implicit buffers to top of Metal buffer index range.
To better support pipeline layout compatibility between pipelines with differing quantities of descriptor sets, move the buffer indexes used by implicit buffers to the top end of the Metal buffer index range, below vertex and tessellation buffers. MVKPipeline calculates implicit buffer indexes based on vertex and tessellation buffers required by pipeline, instead of based on descriptors in MVKPipelineLayout. MVKPipeline track buffer index counts consumed by MVKPipelineLayout, to validate room for implicit buffers.
This commit is contained in:
parent
2a17f757e1
commit
1b6b8bc992
@ -75,30 +75,6 @@ public:
|
||||
/** Populates the specified shader conversion config. */
|
||||
void populateShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig);
|
||||
|
||||
/** Returns the current swizzle buffer bindings. */
|
||||
const MVKShaderImplicitRezBinding& getSwizzleBufferIndex() { return _swizzleBufferIndex; }
|
||||
|
||||
/** Returns the current buffer size buffer bindings. */
|
||||
const MVKShaderImplicitRezBinding& getBufferSizeBufferIndex() { return _bufferSizeBufferIndex; }
|
||||
|
||||
/** Returns the current dynamic buffer offset buffer bindings. */
|
||||
const MVKShaderImplicitRezBinding& getDynamicOffsetBufferIndex() { return _dynamicOffsetBufferIndex; }
|
||||
|
||||
/** Returns the current view range buffer binding for multiview draws. */
|
||||
const MVKShaderImplicitRezBinding& getViewRangeBufferIndex() { return _viewRangeBufferIndex; }
|
||||
|
||||
/** Returns the current indirect parameter buffer bindings. */
|
||||
const MVKShaderImplicitRezBinding& getIndirectParamsIndex() { return _indirectParamsIndex; }
|
||||
|
||||
/** Returns the current captured output buffer bindings. */
|
||||
const MVKShaderImplicitRezBinding& getOutputBufferIndex() { return _outputBufferIndex; }
|
||||
|
||||
/** Returns the current captured per-patch output buffer binding for the tess. control shader. */
|
||||
uint32_t getTessCtlPatchOutputBufferIndex() { return _tessCtlPatchOutputBufferIndex; }
|
||||
|
||||
/** Returns the current tessellation level buffer binding for the tess. control shader. */
|
||||
uint32_t getTessCtlLevelBufferIndex() { return _tessCtlLevelBufferIndex; }
|
||||
|
||||
/** Returns the number of textures in this layout. This is used to calculate the size of the swizzle buffer. */
|
||||
uint32_t getTextureCount() { return _mtlResourceCounts.getMaxTextureIndex(); }
|
||||
|
||||
@ -130,14 +106,6 @@ protected:
|
||||
MVKSmallVector<VkPushConstantRange> _pushConstants;
|
||||
MVKShaderResourceBinding _mtlResourceCounts;
|
||||
MVKShaderResourceBinding _pushConstantsMTLResourceIndexes;
|
||||
MVKShaderImplicitRezBinding _swizzleBufferIndex;
|
||||
MVKShaderImplicitRezBinding _bufferSizeBufferIndex;
|
||||
MVKShaderImplicitRezBinding _dynamicOffsetBufferIndex;
|
||||
MVKShaderImplicitRezBinding _viewRangeBufferIndex;
|
||||
MVKShaderImplicitRezBinding _indirectParamsIndex;
|
||||
MVKShaderImplicitRezBinding _outputBufferIndex;
|
||||
uint32_t _tessCtlPatchOutputBufferIndex = 0;
|
||||
uint32_t _tessCtlLevelBufferIndex = 0;
|
||||
};
|
||||
|
||||
|
||||
@ -198,6 +166,7 @@ protected:
|
||||
MVKShaderStage stage);
|
||||
|
||||
MVKPipelineCache* _pipelineCache;
|
||||
MVKShaderImplicitRezBinding _descriptorBufferCounts;
|
||||
MVKShaderImplicitRezBinding _swizzleBufferIndex;
|
||||
MVKShaderImplicitRezBinding _bufferSizeBufferIndex;
|
||||
MVKShaderImplicitRezBinding _dynamicOffsetBufferIndex;
|
||||
@ -338,8 +307,10 @@ protected:
|
||||
void addFragmentOutputToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo);
|
||||
bool isRenderingPoints(const VkGraphicsPipelineCreateInfo* pCreateInfo);
|
||||
bool isRasterizationDisabled(const VkGraphicsPipelineCreateInfo* pCreateInfo);
|
||||
bool verifyImplicitBuffer(bool needsBuffer, MVKShaderImplicitRezBinding& index, MVKShaderStage stage, const char* name, uint32_t reservedBuffers);
|
||||
bool verifyImplicitBuffer(bool needsBuffer, MVKShaderImplicitRezBinding& index, MVKShaderStage stage, const char* name);
|
||||
uint32_t getTranslatedVertexBinding(uint32_t binding, uint32_t translationOffset, uint32_t maxBinding);
|
||||
uint32_t getImplicitBufferIndex(const VkGraphicsPipelineCreateInfo* pCreateInfo, MVKShaderStage stage, uint32_t bufferIndexOffset);
|
||||
uint32_t getReservedBufferCount(const VkGraphicsPipelineCreateInfo* pCreateInfo, MVKShaderStage stage);
|
||||
|
||||
const VkPipelineShaderStageCreateInfo* _pVertexSS = nullptr;
|
||||
const VkPipelineShaderStageCreateInfo* _pTessCtlSS = nullptr;
|
||||
@ -434,6 +405,7 @@ public:
|
||||
|
||||
protected:
|
||||
MVKMTLFunction getMTLFunction(const VkComputePipelineCreateInfo* pCreateInfo);
|
||||
uint32_t getImplicitBufferIndex(uint32_t bufferIndexOffset);
|
||||
|
||||
id<MTLComputePipelineState> _mtlPipelineState;
|
||||
MVKSmallVector<MVKMTLArgumentEncoder> _mtlArgumentEncoders;
|
||||
|
@ -163,26 +163,6 @@ MVKPipelineLayout::MVKPipelineLayout(MVKDevice* device,
|
||||
_dslMTLResourceIndexOffsets.push_back(adjstdDSLRezOfsts);
|
||||
_mtlResourceCounts += adjstdDSLRezCnts;
|
||||
}
|
||||
|
||||
// Set implicit buffer indices
|
||||
// FIXME: Many of these are optional. We shouldn't set the ones that aren't
|
||||
// present--or at least, we should move the ones that are down to avoid running over
|
||||
// the limit of available buffers. But we can't know that until we compile the shaders.
|
||||
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
|
||||
_dynamicOffsetBufferIndex.stages[i] = _mtlResourceCounts.stages[i].bufferIndex + 1;
|
||||
_bufferSizeBufferIndex.stages[i] = _dynamicOffsetBufferIndex.stages[i] + 1;
|
||||
_swizzleBufferIndex.stages[i] = _bufferSizeBufferIndex.stages[i] + 1;
|
||||
_indirectParamsIndex.stages[i] = _swizzleBufferIndex.stages[i] + 1;
|
||||
_outputBufferIndex.stages[i] = _indirectParamsIndex.stages[i] + 1;
|
||||
if (i == kMVKShaderStageTessCtl) {
|
||||
_tessCtlPatchOutputBufferIndex = _outputBufferIndex.stages[i] + 1;
|
||||
_tessCtlLevelBufferIndex = _tessCtlPatchOutputBufferIndex + 1;
|
||||
}
|
||||
}
|
||||
// Since we currently can't use multiview with tessellation or geometry shaders,
|
||||
// to conserve the number of buffer bindings, use the same bindings for the
|
||||
// view range buffer as for the indirect paramters buffer.
|
||||
_viewRangeBufferIndex = _indirectParamsIndex;
|
||||
}
|
||||
|
||||
MVKPipelineLayout::~MVKPipelineLayout() {
|
||||
@ -227,10 +207,11 @@ MVKPipeline::MVKPipeline(MVKDevice* device, MVKPipelineCache* pipelineCache, MVK
|
||||
_fullImageViewSwizzle(mvkConfig().fullImageViewSwizzle),
|
||||
_descriptorSetCount(layout->getDescriptorSetCount()) {
|
||||
|
||||
// Establish push constant use.
|
||||
// Establish descriptor counts and push constants use.
|
||||
for (uint32_t stage = kMVKShaderStageVertex; stage < kMVKShaderStageCount; stage++) {
|
||||
_stageUsesPushConstants[stage] = layout->stageUsesPushConstants((MVKShaderStage)stage);
|
||||
_descriptorBufferCounts.stages[stage] = layout->_mtlResourceCounts.stages[stage].bufferIndex;
|
||||
_pushConstantsBufferIndex.stages[stage] = layout->_pushConstantsMTLResourceIndexes.stages[stage].bufferIndex;
|
||||
_stageUsesPushConstants[stage] = layout->stageUsesPushConstants((MVKShaderStage)stage);
|
||||
}
|
||||
}
|
||||
|
||||
@ -904,14 +885,14 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::newMTLTessRasterStageDescripto
|
||||
return plDesc;
|
||||
}
|
||||
|
||||
bool MVKGraphicsPipeline::verifyImplicitBuffer(bool needsBuffer, MVKShaderImplicitRezBinding& index, MVKShaderStage stage, const char* name, uint32_t reservedBuffers) {
|
||||
bool MVKGraphicsPipeline::verifyImplicitBuffer(bool needsBuffer, MVKShaderImplicitRezBinding& index, MVKShaderStage stage, const char* name) {
|
||||
const char* stageNames[] = {
|
||||
"Vertex",
|
||||
"Tessellation control",
|
||||
"Tessellation evaluation",
|
||||
"Fragment"
|
||||
};
|
||||
if (needsBuffer && index.stages[stage] >= _device->_pMetalFeatures->maxPerStageBufferCount - reservedBuffers) {
|
||||
if (needsBuffer && index.stages[stage] < _descriptorBufferCounts.stages[stage]) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "%s shader requires %s buffer, but there is no free slot to pass it.", stageNames[stage], name));
|
||||
return false;
|
||||
}
|
||||
@ -922,7 +903,6 @@ bool MVKGraphicsPipeline::verifyImplicitBuffer(bool needsBuffer, MVKShaderImplic
|
||||
bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLRenderPipelineDescriptor* plDesc,
|
||||
const VkGraphicsPipelineCreateInfo* pCreateInfo,
|
||||
SPIRVToMSLConversionConfiguration& shaderConfig) {
|
||||
uint32_t vbCnt = pCreateInfo->pVertexInputState->vertexBindingDescriptionCount;
|
||||
shaderConfig.options.entryPointStage = spv::ExecutionModelVertex;
|
||||
shaderConfig.options.entryPointName = _pVertexSS->pName;
|
||||
shaderConfig.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageVertex];
|
||||
@ -958,25 +938,25 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLRenderPipelineDescriptor*
|
||||
}
|
||||
|
||||
// If we need the swizzle buffer and there's no place to put it, we're in serious trouble.
|
||||
if (!verifyImplicitBuffer(_needsVertexSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageVertex, "swizzle", vbCnt)) {
|
||||
if (!verifyImplicitBuffer(_needsVertexSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageVertex, "swizzle")) {
|
||||
return false;
|
||||
}
|
||||
// Ditto buffer size buffer.
|
||||
if (!verifyImplicitBuffer(_needsVertexBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageVertex, "buffer size", vbCnt)) {
|
||||
if (!verifyImplicitBuffer(_needsVertexBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageVertex, "buffer size")) {
|
||||
return false;
|
||||
}
|
||||
// Ditto dynamic offset buffer.
|
||||
if (!verifyImplicitBuffer(_needsVertexDynamicOffsetBuffer, _dynamicOffsetBufferIndex, kMVKShaderStageVertex, "dynamic offset", vbCnt)) {
|
||||
if (!verifyImplicitBuffer(_needsVertexDynamicOffsetBuffer, _dynamicOffsetBufferIndex, kMVKShaderStageVertex, "dynamic offset")) {
|
||||
return false;
|
||||
}
|
||||
// Ditto captured output buffer.
|
||||
if (!verifyImplicitBuffer(_needsVertexOutputBuffer, _outputBufferIndex, kMVKShaderStageVertex, "output", vbCnt)) {
|
||||
if (!verifyImplicitBuffer(_needsVertexOutputBuffer, _outputBufferIndex, kMVKShaderStageVertex, "output")) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsVertexOutputBuffer, _indirectParamsIndex, kMVKShaderStageVertex, "indirect parameters", vbCnt)) {
|
||||
if (!verifyImplicitBuffer(_needsVertexOutputBuffer, _indirectParamsIndex, kMVKShaderStageVertex, "indirect parameters")) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsVertexViewRangeBuffer, _viewRangeBufferIndex, kMVKShaderStageVertex, "view range", vbCnt)) {
|
||||
if (!verifyImplicitBuffer(_needsVertexViewRangeBuffer, _viewRangeBufferIndex, kMVKShaderStageVertex, "view range")) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -986,7 +966,6 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLRenderPipelineDescriptor*
|
||||
bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLComputePipelineDescriptor* plDesc,
|
||||
const VkGraphicsPipelineCreateInfo* pCreateInfo,
|
||||
SPIRVToMSLConversionConfiguration& shaderConfig) {
|
||||
uint32_t vbCnt = pCreateInfo->pVertexInputState->vertexBindingDescriptionCount;
|
||||
shaderConfig.options.entryPointStage = spv::ExecutionModelVertex;
|
||||
shaderConfig.options.entryPointName = _pVertexSS->pName;
|
||||
shaderConfig.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageVertex];
|
||||
@ -1026,22 +1005,22 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLComputePipelineDescriptor
|
||||
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageVertex);
|
||||
|
||||
// If we need the swizzle buffer and there's no place to put it, we're in serious trouble.
|
||||
if (!verifyImplicitBuffer(_needsVertexSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageVertex, "swizzle", vbCnt)) {
|
||||
if (!verifyImplicitBuffer(_needsVertexSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageVertex, "swizzle")) {
|
||||
return false;
|
||||
}
|
||||
// Ditto buffer size buffer.
|
||||
if (!verifyImplicitBuffer(_needsVertexBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageVertex, "buffer size", vbCnt)) {
|
||||
if (!verifyImplicitBuffer(_needsVertexBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageVertex, "buffer size")) {
|
||||
return false;
|
||||
}
|
||||
// Ditto dynamic offset buffer.
|
||||
if (!verifyImplicitBuffer(_needsVertexDynamicOffsetBuffer, _dynamicOffsetBufferIndex, kMVKShaderStageVertex, "dynamic offset", vbCnt)) {
|
||||
if (!verifyImplicitBuffer(_needsVertexDynamicOffsetBuffer, _dynamicOffsetBufferIndex, kMVKShaderStageVertex, "dynamic offset")) {
|
||||
return false;
|
||||
}
|
||||
// Ditto captured output buffer.
|
||||
if (!verifyImplicitBuffer(_needsVertexOutputBuffer, _outputBufferIndex, kMVKShaderStageVertex, "output", vbCnt)) {
|
||||
if (!verifyImplicitBuffer(_needsVertexOutputBuffer, _outputBufferIndex, kMVKShaderStageVertex, "output")) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(!shaderConfig.shaderInputs.empty(), _indirectParamsIndex, kMVKShaderStageVertex, "index", vbCnt)) {
|
||||
if (!verifyImplicitBuffer(!shaderConfig.shaderInputs.empty(), _indirectParamsIndex, kMVKShaderStageVertex, "index")) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -1084,26 +1063,26 @@ bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescripto
|
||||
|
||||
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageTessCtl);
|
||||
|
||||
if (!verifyImplicitBuffer(_needsTessCtlSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageTessCtl, "swizzle", kMVKTessCtlNumReservedBuffers)) {
|
||||
if (!verifyImplicitBuffer(_needsTessCtlSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageTessCtl, "swizzle")) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsTessCtlBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageTessCtl, "buffer size", kMVKTessCtlNumReservedBuffers)) {
|
||||
if (!verifyImplicitBuffer(_needsTessCtlBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageTessCtl, "buffer size")) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsTessCtlDynamicOffsetBuffer, _dynamicOffsetBufferIndex, kMVKShaderStageTessCtl, "dynamic offset", kMVKTessCtlNumReservedBuffers)) {
|
||||
if (!verifyImplicitBuffer(_needsTessCtlDynamicOffsetBuffer, _dynamicOffsetBufferIndex, kMVKShaderStageTessCtl, "dynamic offset")) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(true, _indirectParamsIndex, kMVKShaderStageTessCtl, "indirect parameters", kMVKTessCtlNumReservedBuffers)) {
|
||||
if (!verifyImplicitBuffer(true, _indirectParamsIndex, kMVKShaderStageTessCtl, "indirect parameters")) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsTessCtlOutputBuffer, _outputBufferIndex, kMVKShaderStageTessCtl, "per-vertex output", kMVKTessCtlNumReservedBuffers)) {
|
||||
if (!verifyImplicitBuffer(_needsTessCtlOutputBuffer, _outputBufferIndex, kMVKShaderStageTessCtl, "per-vertex output")) {
|
||||
return false;
|
||||
}
|
||||
if (_needsTessCtlPatchOutputBuffer && _tessCtlPatchOutputBufferIndex >= _device->_pMetalFeatures->maxPerStageBufferCount - kMVKTessCtlNumReservedBuffers) {
|
||||
if (_needsTessCtlPatchOutputBuffer && _tessCtlPatchOutputBufferIndex < _descriptorBufferCounts.stages[kMVKShaderStageTessCtl]) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Tessellation control shader requires per-patch output buffer, but there is no free slot to pass it."));
|
||||
return false;
|
||||
}
|
||||
if (_tessCtlLevelBufferIndex >= _device->_pMetalFeatures->maxPerStageBufferCount - kMVKTessCtlNumReservedBuffers) {
|
||||
if (_tessCtlLevelBufferIndex < _descriptorBufferCounts.stages[kMVKShaderStageTessCtl]) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Tessellation control shader requires tessellation level output buffer, but there is no free slot to pass it."));
|
||||
return false;
|
||||
}
|
||||
@ -1144,13 +1123,13 @@ bool MVKGraphicsPipeline::addTessEvalShaderToPipeline(MTLRenderPipelineDescripto
|
||||
_pFragmentSS = nullptr;
|
||||
}
|
||||
|
||||
if (!verifyImplicitBuffer(_needsTessEvalSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageTessEval, "swizzle", kMVKTessEvalNumReservedBuffers)) {
|
||||
if (!verifyImplicitBuffer(_needsTessEvalSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageTessEval, "swizzle")) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsTessEvalBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageTessEval, "buffer size", kMVKTessEvalNumReservedBuffers)) {
|
||||
if (!verifyImplicitBuffer(_needsTessEvalBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageTessEval, "buffer size")) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsTessEvalDynamicOffsetBuffer, _dynamicOffsetBufferIndex, kMVKShaderStageTessEval, "dynamic offset", kMVKTessEvalNumReservedBuffers)) {
|
||||
if (!verifyImplicitBuffer(_needsTessEvalDynamicOffsetBuffer, _dynamicOffsetBufferIndex, kMVKShaderStageTessEval, "dynamic offset")) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -1196,16 +1175,16 @@ bool MVKGraphicsPipeline::addFragmentShaderToPipeline(MTLRenderPipelineDescripto
|
||||
|
||||
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageFragment);
|
||||
|
||||
if (!verifyImplicitBuffer(_needsFragmentSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageFragment, "swizzle", 0)) {
|
||||
if (!verifyImplicitBuffer(_needsFragmentSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageFragment, "swizzle")) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsFragmentBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageFragment, "buffer size", 0)) {
|
||||
if (!verifyImplicitBuffer(_needsFragmentBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageFragment, "buffer size")) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsFragmentDynamicOffsetBuffer, _dynamicOffsetBufferIndex, kMVKShaderStageFragment, "dynamic offset", 0)) {
|
||||
if (!verifyImplicitBuffer(_needsFragmentDynamicOffsetBuffer, _dynamicOffsetBufferIndex, kMVKShaderStageFragment, "dynamic offset")) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsFragmentViewRangeBuffer, _viewRangeBufferIndex, kMVKShaderStageFragment, "view range", 0)) {
|
||||
if (!verifyImplicitBuffer(_needsFragmentViewRangeBuffer, _viewRangeBufferIndex, kMVKShaderStageFragment, "view range")) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1545,14 +1524,27 @@ void MVKGraphicsPipeline::initShaderConversionConfig(SPIRVToMSLConversionConfigu
|
||||
|
||||
MVKPipelineLayout* layout = (MVKPipelineLayout*)pCreateInfo->layout;
|
||||
layout->populateShaderConversionConfig(shaderConfig);
|
||||
_swizzleBufferIndex = layout->getSwizzleBufferIndex();
|
||||
_bufferSizeBufferIndex = layout->getBufferSizeBufferIndex();
|
||||
_dynamicOffsetBufferIndex = layout->getDynamicOffsetBufferIndex();
|
||||
_indirectParamsIndex = layout->getIndirectParamsIndex();
|
||||
_outputBufferIndex = layout->getOutputBufferIndex();
|
||||
_tessCtlPatchOutputBufferIndex = layout->getTessCtlPatchOutputBufferIndex();
|
||||
_tessCtlLevelBufferIndex = layout->getTessCtlLevelBufferIndex();
|
||||
_viewRangeBufferIndex = layout->getViewRangeBufferIndex();
|
||||
|
||||
// Set implicit buffer indices
|
||||
// FIXME: Many of these are optional. We shouldn't set the ones that aren't
|
||||
// present--or at least, we should move the ones that are down to avoid running over
|
||||
// the limit of available buffers. But we can't know that until we compile the shaders.
|
||||
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
|
||||
MVKShaderStage stage = (MVKShaderStage)i;
|
||||
_dynamicOffsetBufferIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 0);
|
||||
_bufferSizeBufferIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 1);
|
||||
_swizzleBufferIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 2);
|
||||
_indirectParamsIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 3);
|
||||
_outputBufferIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 4);
|
||||
if (stage == kMVKShaderStageTessCtl) {
|
||||
_tessCtlPatchOutputBufferIndex = getImplicitBufferIndex(pCreateInfo, stage, 5);
|
||||
_tessCtlLevelBufferIndex = getImplicitBufferIndex(pCreateInfo, stage, 6);
|
||||
}
|
||||
}
|
||||
// Since we currently can't use multiview with tessellation or geometry shaders,
|
||||
// to conserve the number of buffer bindings, use the same bindings for the
|
||||
// view range buffer as for the indirect paramters buffer.
|
||||
_viewRangeBufferIndex = _indirectParamsIndex;
|
||||
|
||||
MVKRenderPass* mvkRendPass = (MVKRenderPass*)pCreateInfo->renderPass;
|
||||
MVKRenderSubpass* mvkRenderSubpass = mvkRendPass->getSubpass(pCreateInfo->subpass);
|
||||
@ -1597,6 +1589,19 @@ void MVKGraphicsPipeline::initShaderConversionConfig(SPIRVToMSLConversionConfigu
|
||||
shaderConfig.options.numTessControlPoints = reflectData.numControlPoints;
|
||||
}
|
||||
|
||||
uint32_t MVKGraphicsPipeline::getImplicitBufferIndex(const VkGraphicsPipelineCreateInfo* pCreateInfo, MVKShaderStage stage, uint32_t bufferIndexOffset) {
|
||||
return _device->_pMetalFeatures->maxPerStageBufferCount - (getReservedBufferCount(pCreateInfo, stage) + bufferIndexOffset + 1);
|
||||
}
|
||||
|
||||
uint32_t MVKGraphicsPipeline::getReservedBufferCount(const VkGraphicsPipelineCreateInfo* pCreateInfo, MVKShaderStage stage) {
|
||||
switch (stage) {
|
||||
case kMVKShaderStageVertex: return pCreateInfo->pVertexInputState->vertexBindingDescriptionCount;
|
||||
case kMVKShaderStageTessCtl: return kMVKTessCtlNumReservedBuffers;
|
||||
case kMVKShaderStageTessEval: return kMVKTessEvalNumReservedBuffers;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Initializes the vertex attributes in a shader conversion configuration.
|
||||
void MVKGraphicsPipeline::addVertexInputToShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig,
|
||||
const VkGraphicsPipelineCreateInfo* pCreateInfo) {
|
||||
@ -1818,9 +1823,19 @@ MVKMTLFunction MVKComputePipeline::getMTLFunction(const VkComputePipelineCreateI
|
||||
|
||||
MVKPipelineLayout* layout = (MVKPipelineLayout*)pCreateInfo->layout;
|
||||
layout->populateShaderConversionConfig(shaderConfig);
|
||||
_swizzleBufferIndex = layout->getSwizzleBufferIndex();
|
||||
_bufferSizeBufferIndex = layout->getBufferSizeBufferIndex();
|
||||
_dynamicOffsetBufferIndex = layout->getDynamicOffsetBufferIndex();
|
||||
|
||||
// Set implicit buffer indices
|
||||
// FIXME: Many of these are optional. We shouldn't set the ones that aren't
|
||||
// present--or at least, we should move the ones that are down to avoid running over
|
||||
// the limit of available buffers. But we can't know that until we compile the shaders.
|
||||
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
|
||||
MVKShaderStage stage = (MVKShaderStage)i;
|
||||
_dynamicOffsetBufferIndex.stages[stage] = getImplicitBufferIndex(0);
|
||||
_bufferSizeBufferIndex.stages[stage] = getImplicitBufferIndex(1);
|
||||
_swizzleBufferIndex.stages[stage] = getImplicitBufferIndex(2);
|
||||
_indirectParamsIndex.stages[stage] = getImplicitBufferIndex(3);
|
||||
}
|
||||
|
||||
shaderConfig.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageCompute];
|
||||
shaderConfig.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageCompute];
|
||||
shaderConfig.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageCompute];
|
||||
@ -1839,6 +1854,10 @@ MVKMTLFunction MVKComputePipeline::getMTLFunction(const VkComputePipelineCreateI
|
||||
return func;
|
||||
}
|
||||
|
||||
uint32_t MVKComputePipeline::getImplicitBufferIndex(uint32_t bufferIndexOffset) {
|
||||
return _device->_pMetalFeatures->maxPerStageBufferCount - (bufferIndexOffset + 1);
|
||||
}
|
||||
|
||||
MVKComputePipeline::~MVKComputePipeline() {
|
||||
@synchronized (getMTLDevice()) {
|
||||
[_mtlPipelineState release];
|
||||
|
Loading…
x
Reference in New Issue
Block a user