Merge pull request #622 from cdavis5e/new-spirv-cross
Update SPIRV-Cross.
This commit is contained in:
commit
2e36a38524
@ -1 +1 @@
|
||||
c125bbd2482815f2f1b6a92ec4f06e1c01b47c5b
|
||||
cb686a5dba9a7086a778fe21900383beed9ea5d3
|
||||
|
@ -403,7 +403,7 @@ protected:
|
||||
}
|
||||
}
|
||||
|
||||
void updateSwizzle(MVKVector<uint32_t> &constants, uint32_t index, uint32_t swizzle);
|
||||
void updateImplicitBuffer(MVKVector<uint32_t> &contents, uint32_t index, uint32_t value);
|
||||
void assertMissingSwizzles(bool needsSwizzle, const char* stageName, MVKVector<MVKMTLTextureBinding>& texBindings);
|
||||
|
||||
};
|
||||
@ -434,18 +434,25 @@ public:
|
||||
_mtlIndexBufferBinding = binding; // No need to track dirty state
|
||||
}
|
||||
|
||||
/** Sets the current auxiliary buffer state. */
|
||||
void bindAuxBuffer(const MVKShaderImplicitRezBinding& binding,
|
||||
bool needVertexAuxBuffer,
|
||||
bool needTessCtlAuxBuffer,
|
||||
bool needTessEvalAuxBuffer,
|
||||
bool needFragmentAuxBuffer);
|
||||
/** Sets the current swizzle buffer state. */
|
||||
void bindSwizzleBuffer(const MVKShaderImplicitRezBinding& binding,
|
||||
bool needVertexSwizzleBuffer,
|
||||
bool needTessCtlSwizzleBuffer,
|
||||
bool needTessEvalSwizzleBuffer,
|
||||
bool needFragmentSwizzleBuffer);
|
||||
|
||||
/** Sets the current buffer size buffer state. */
|
||||
void bindBufferSizeBuffer(const MVKShaderImplicitRezBinding& binding,
|
||||
bool needVertexSizeBuffer,
|
||||
bool needTessCtlSizeBuffer,
|
||||
bool needTessEvalSizeBuffer,
|
||||
bool needFragmentSizeBuffer);
|
||||
|
||||
void encodeBindings(MVKShaderStage stage,
|
||||
const char* pStageName,
|
||||
bool fullImageViewSwizzle,
|
||||
std::function<void(MVKCommandEncoder*, MVKMTLBufferBinding&)> bindBuffer,
|
||||
std::function<void(MVKCommandEncoder*, MVKMTLBufferBinding&, MVKVector<uint32_t>&)> bindAuxBuffer,
|
||||
std::function<void(MVKCommandEncoder*, MVKMTLBufferBinding&, MVKVector<uint32_t>&)> bindImplicitBuffer,
|
||||
std::function<void(MVKCommandEncoder*, MVKMTLTextureBinding&)> bindTexture,
|
||||
std::function<void(MVKCommandEncoder*, MVKMTLSamplerStateBinding&)> bindSampler);
|
||||
|
||||
@ -464,7 +471,9 @@ protected:
|
||||
MVKVectorInline<MVKMTLTextureBinding, 8> textureBindings;
|
||||
MVKVectorInline<MVKMTLSamplerStateBinding, 8> samplerStateBindings;
|
||||
MVKVectorInline<uint32_t, 8> swizzleConstants;
|
||||
MVKMTLBufferBinding auxBufferBinding;
|
||||
MVKVectorInline<uint32_t, 8> bufferSizes;
|
||||
MVKMTLBufferBinding swizzleBufferBinding;
|
||||
MVKMTLBufferBinding bufferSizeBufferBinding;
|
||||
|
||||
bool areBufferBindingsDirty = false;
|
||||
bool areTextureBindingsDirty = false;
|
||||
@ -494,8 +503,11 @@ public:
|
||||
/** Binds the specified sampler state. */
|
||||
void bindSamplerState(const MVKMTLSamplerStateBinding& binding);
|
||||
|
||||
/** Sets the current auxiliary buffer state. */
|
||||
void bindAuxBuffer(const MVKShaderImplicitRezBinding& binding, bool needAuxBuffer);
|
||||
/** Sets the current swizzle buffer state. */
|
||||
void bindSwizzleBuffer(const MVKShaderImplicitRezBinding& binding, bool needSwizzleBuffer);
|
||||
|
||||
/** Sets the current buffer size buffer state. */
|
||||
void bindBufferSizeBuffer(const MVKShaderImplicitRezBinding& binding, bool needSizeBuffer);
|
||||
|
||||
#pragma mark Construction
|
||||
|
||||
@ -511,13 +523,15 @@ protected:
|
||||
MVKVectorDefault<MVKMTLTextureBinding> _textureBindings;
|
||||
MVKVectorDefault<MVKMTLSamplerStateBinding> _samplerStateBindings;
|
||||
MVKVectorDefault<uint32_t> _swizzleConstants;
|
||||
MVKMTLBufferBinding _auxBufferBinding;
|
||||
MVKVectorDefault<uint32_t> _bufferSizes;
|
||||
MVKMTLBufferBinding _swizzleBufferBinding;
|
||||
MVKMTLBufferBinding _bufferSizeBufferBinding;
|
||||
|
||||
bool _areBufferBindingsDirty = false;
|
||||
bool _areTextureBindingsDirty = false;
|
||||
bool _areSamplerStateBindingsDirty = false;
|
||||
|
||||
bool _needsSwizzle = false;
|
||||
bool _needsSwizzle = false;
|
||||
};
|
||||
|
||||
|
||||
|
@ -447,10 +447,10 @@ void MVKBlendColorCommandEncoderState::resetImpl() {
|
||||
#pragma mark -
|
||||
#pragma mark MVKResourcesCommandEncoderState
|
||||
|
||||
// Updates the swizzle for an image in the given vector.
|
||||
void MVKResourcesCommandEncoderState::updateSwizzle(MVKVector<uint32_t> &constants, uint32_t index, uint32_t swizzle) {
|
||||
if (index >= constants.size()) { constants.resize(index + 1); }
|
||||
constants[index] = swizzle;
|
||||
// Updates a value at the given index in the given vector.
|
||||
void MVKResourcesCommandEncoderState::updateImplicitBuffer(MVKVector<uint32_t> &contents, uint32_t index, uint32_t value) {
|
||||
if (index >= contents.size()) { contents.resize(index + 1); }
|
||||
contents[index] = value;
|
||||
}
|
||||
|
||||
// If a swizzle is needed for this stage, iterates all the bindings and logs errors for those that need texture swizzling.
|
||||
@ -486,42 +486,64 @@ void MVKGraphicsResourcesCommandEncoderState::bindSamplerState(MVKShaderStage st
|
||||
bind(binding, _shaderStages[stage].samplerStateBindings, _shaderStages[stage].areSamplerStateBindingsDirty);
|
||||
}
|
||||
|
||||
void MVKGraphicsResourcesCommandEncoderState::bindAuxBuffer(const MVKShaderImplicitRezBinding& binding,
|
||||
bool needVertexAuxBuffer,
|
||||
bool needTessCtlAuxBuffer,
|
||||
bool needTessEvalAuxBuffer,
|
||||
bool needFragmentAuxBuffer) {
|
||||
void MVKGraphicsResourcesCommandEncoderState::bindSwizzleBuffer(const MVKShaderImplicitRezBinding& binding,
|
||||
bool needVertexSwizzleBuffer,
|
||||
bool needTessCtlSwizzleBuffer,
|
||||
bool needTessEvalSwizzleBuffer,
|
||||
bool needFragmentSwizzleBuffer) {
|
||||
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCompute; i++) {
|
||||
_shaderStages[i].auxBufferBinding.index = binding.stages[i];
|
||||
_shaderStages[i].swizzleBufferBinding.index = binding.stages[i];
|
||||
}
|
||||
_shaderStages[kMVKShaderStageVertex].auxBufferBinding.isDirty = needVertexAuxBuffer;
|
||||
_shaderStages[kMVKShaderStageTessCtl].auxBufferBinding.isDirty = needTessCtlAuxBuffer;
|
||||
_shaderStages[kMVKShaderStageTessEval].auxBufferBinding.isDirty = needTessEvalAuxBuffer;
|
||||
_shaderStages[kMVKShaderStageFragment].auxBufferBinding.isDirty = needFragmentAuxBuffer;
|
||||
_shaderStages[kMVKShaderStageVertex].swizzleBufferBinding.isDirty = needVertexSwizzleBuffer;
|
||||
_shaderStages[kMVKShaderStageTessCtl].swizzleBufferBinding.isDirty = needTessCtlSwizzleBuffer;
|
||||
_shaderStages[kMVKShaderStageTessEval].swizzleBufferBinding.isDirty = needTessEvalSwizzleBuffer;
|
||||
_shaderStages[kMVKShaderStageFragment].swizzleBufferBinding.isDirty = needFragmentSwizzleBuffer;
|
||||
}
|
||||
|
||||
void MVKGraphicsResourcesCommandEncoderState::bindBufferSizeBuffer(const MVKShaderImplicitRezBinding& binding,
|
||||
bool needVertexSizeBuffer,
|
||||
bool needTessCtlSizeBuffer,
|
||||
bool needTessEvalSizeBuffer,
|
||||
bool needFragmentSizeBuffer) {
|
||||
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCompute; i++) {
|
||||
_shaderStages[i].bufferSizeBufferBinding.index = binding.stages[i];
|
||||
}
|
||||
_shaderStages[kMVKShaderStageVertex].bufferSizeBufferBinding.isDirty = needVertexSizeBuffer;
|
||||
_shaderStages[kMVKShaderStageTessCtl].bufferSizeBufferBinding.isDirty = needTessCtlSizeBuffer;
|
||||
_shaderStages[kMVKShaderStageTessEval].bufferSizeBufferBinding.isDirty = needTessEvalSizeBuffer;
|
||||
_shaderStages[kMVKShaderStageFragment].bufferSizeBufferBinding.isDirty = needFragmentSizeBuffer;
|
||||
}
|
||||
|
||||
void MVKGraphicsResourcesCommandEncoderState::encodeBindings(MVKShaderStage stage,
|
||||
const char* pStageName,
|
||||
bool fullImageViewSwizzle,
|
||||
std::function<void(MVKCommandEncoder*, MVKMTLBufferBinding&)> bindBuffer,
|
||||
std::function<void(MVKCommandEncoder*, MVKMTLBufferBinding&, MVKVector<uint32_t>&)> bindAuxBuffer,
|
||||
std::function<void(MVKCommandEncoder*, MVKMTLBufferBinding&, MVKVector<uint32_t>&)> bindImplicitBuffer,
|
||||
std::function<void(MVKCommandEncoder*, MVKMTLTextureBinding&)> bindTexture,
|
||||
std::function<void(MVKCommandEncoder*, MVKMTLSamplerStateBinding&)> bindSampler) {
|
||||
auto& shaderStage = _shaderStages[stage];
|
||||
encodeBinding<MVKMTLBufferBinding>(shaderStage.bufferBindings, shaderStage.areBufferBindingsDirty, bindBuffer);
|
||||
|
||||
if (shaderStage.auxBufferBinding.isDirty) {
|
||||
if (shaderStage.swizzleBufferBinding.isDirty) {
|
||||
|
||||
for (auto& b : shaderStage.textureBindings) {
|
||||
if (b.isDirty) { updateSwizzle(shaderStage.swizzleConstants, b.index, b.swizzle); }
|
||||
if (b.isDirty) { updateImplicitBuffer(shaderStage.swizzleConstants, b.index, b.swizzle); }
|
||||
}
|
||||
|
||||
bindAuxBuffer(_cmdEncoder, shaderStage.auxBufferBinding, shaderStage.swizzleConstants);
|
||||
bindImplicitBuffer(_cmdEncoder, shaderStage.swizzleBufferBinding, shaderStage.swizzleConstants);
|
||||
|
||||
} else {
|
||||
assertMissingSwizzles(shaderStage.needsSwizzle && !fullImageViewSwizzle, pStageName, shaderStage.textureBindings);
|
||||
}
|
||||
|
||||
if (shaderStage.bufferSizeBufferBinding.isDirty) {
|
||||
for (auto& b : shaderStage.bufferBindings) {
|
||||
if (b.isDirty) { updateImplicitBuffer(shaderStage.bufferSizes, b.index, b.size); }
|
||||
}
|
||||
|
||||
bindImplicitBuffer(_cmdEncoder, shaderStage.bufferSizeBufferBinding, shaderStage.bufferSizes);
|
||||
}
|
||||
|
||||
encodeBinding<MVKMTLTextureBinding>(shaderStage.textureBindings, shaderStage.areTextureBindingsDirty, bindTexture);
|
||||
encodeBinding<MVKMTLSamplerStateBinding>(shaderStage.samplerStateBindings, shaderStage.areSamplerStateBindingsDirty, bindSampler);
|
||||
}
|
||||
@ -622,7 +644,7 @@ void MVKGraphicsResourcesCommandEncoderState::encodeImpl(uint32_t stage) {
|
||||
atIndex: b.index];
|
||||
},
|
||||
[](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b, MVKVector<uint32_t>& s)->void {
|
||||
cmdEncoder->setFragmentBytes(cmdEncoder->_mtlRenderEncoder,
|
||||
cmdEncoder->setFragmentBytes(cmdEncoder->_mtlRenderEncoder,
|
||||
s.data(),
|
||||
s.size() * sizeof(uint32_t),
|
||||
b.index);
|
||||
@ -644,11 +666,13 @@ void MVKGraphicsResourcesCommandEncoderState::resetImpl() {
|
||||
_shaderStages[i].textureBindings.clear();
|
||||
_shaderStages[i].samplerStateBindings.clear();
|
||||
_shaderStages[i].swizzleConstants.clear();
|
||||
_shaderStages[i].bufferSizes.clear();
|
||||
|
||||
_shaderStages[i].areBufferBindingsDirty = false;
|
||||
_shaderStages[i].areTextureBindingsDirty = false;
|
||||
_shaderStages[i].areSamplerStateBindingsDirty = false;
|
||||
_shaderStages[i].auxBufferBinding.isDirty = false;
|
||||
_shaderStages[i].swizzleBufferBinding.isDirty = false;
|
||||
_shaderStages[i].bufferSizeBufferBinding.isDirty = false;
|
||||
|
||||
_shaderStages[i].needsSwizzle = false;
|
||||
}
|
||||
@ -663,17 +687,23 @@ void MVKComputeResourcesCommandEncoderState::bindBuffer(const MVKMTLBufferBindin
|
||||
}
|
||||
|
||||
void MVKComputeResourcesCommandEncoderState::bindTexture(const MVKMTLTextureBinding& binding) {
|
||||
bind(binding, _textureBindings, _areTextureBindingsDirty, _needsSwizzle);
|
||||
bind(binding, _textureBindings, _areTextureBindingsDirty, _needsSwizzle);
|
||||
}
|
||||
|
||||
void MVKComputeResourcesCommandEncoderState::bindSamplerState(const MVKMTLSamplerStateBinding& binding) {
|
||||
bind(binding, _samplerStateBindings, _areSamplerStateBindingsDirty);
|
||||
}
|
||||
|
||||
void MVKComputeResourcesCommandEncoderState::bindAuxBuffer(const MVKShaderImplicitRezBinding& binding,
|
||||
bool needAuxBuffer) {
|
||||
_auxBufferBinding.index = binding.stages[kMVKShaderStageCompute];
|
||||
_auxBufferBinding.isDirty = needAuxBuffer;
|
||||
void MVKComputeResourcesCommandEncoderState::bindSwizzleBuffer(const MVKShaderImplicitRezBinding& binding,
|
||||
bool needSwizzleBuffer) {
|
||||
_swizzleBufferBinding.index = binding.stages[kMVKShaderStageCompute];
|
||||
_swizzleBufferBinding.isDirty = needSwizzleBuffer;
|
||||
}
|
||||
|
||||
void MVKComputeResourcesCommandEncoderState::bindBufferSizeBuffer(const MVKShaderImplicitRezBinding& binding,
|
||||
bool needBufferSizeBuffer) {
|
||||
_bufferSizeBufferBinding.index = binding.stages[kMVKShaderStageCompute];
|
||||
_bufferSizeBufferBinding.isDirty = needBufferSizeBuffer;
|
||||
}
|
||||
|
||||
// Mark everything as dirty
|
||||
@ -698,21 +728,33 @@ void MVKComputeResourcesCommandEncoderState::encodeImpl(uint32_t) {
|
||||
atIndex: b.index];
|
||||
});
|
||||
|
||||
if (_auxBufferBinding.isDirty) {
|
||||
if (_swizzleBufferBinding.isDirty) {
|
||||
|
||||
for (auto& b : _textureBindings) {
|
||||
if (b.isDirty) { updateSwizzle(_swizzleConstants, b.index, b.swizzle); }
|
||||
if (b.isDirty) { updateImplicitBuffer(_swizzleConstants, b.index, b.swizzle); }
|
||||
}
|
||||
|
||||
_cmdEncoder->setComputeBytes(_cmdEncoder->getMTLComputeEncoder(kMVKCommandUseDispatch),
|
||||
_swizzleConstants.data(),
|
||||
_swizzleConstants.size() * sizeof(uint32_t),
|
||||
_auxBufferBinding.index);
|
||||
_swizzleBufferBinding.index);
|
||||
|
||||
} else {
|
||||
assertMissingSwizzles(_needsSwizzle && !fullImageViewSwizzle, "compute", _textureBindings);
|
||||
}
|
||||
|
||||
if (_bufferSizeBufferBinding.isDirty) {
|
||||
for (auto& b : _bufferBindings) {
|
||||
if (b.isDirty) { updateImplicitBuffer(_bufferSizes, b.index, b.size); }
|
||||
}
|
||||
|
||||
_cmdEncoder->setComputeBytes(_cmdEncoder->getMTLComputeEncoder(kMVKCommandUseDispatch),
|
||||
_bufferSizes.data(),
|
||||
_bufferSizes.size() * sizeof(uint32_t),
|
||||
_bufferSizeBufferBinding.index);
|
||||
|
||||
}
|
||||
|
||||
encodeBinding<MVKMTLTextureBinding>(_textureBindings, _areTextureBindingsDirty,
|
||||
[](MVKCommandEncoder* cmdEncoder, MVKMTLTextureBinding& b)->void {
|
||||
[cmdEncoder->getMTLComputeEncoder(kMVKCommandUseDispatch) setTexture: b.mtlTexture
|
||||
@ -731,11 +773,13 @@ void MVKComputeResourcesCommandEncoderState::resetImpl() {
|
||||
_textureBindings.clear();
|
||||
_samplerStateBindings.clear();
|
||||
_swizzleConstants.clear();
|
||||
_bufferSizes.clear();
|
||||
|
||||
_areBufferBindingsDirty = false;
|
||||
_areTextureBindingsDirty = false;
|
||||
_areSamplerStateBindingsDirty = false;
|
||||
_auxBufferBinding.isDirty = false;
|
||||
_swizzleBufferBinding.isDirty = false;
|
||||
_bufferSizeBufferBinding.isDirty = false;
|
||||
|
||||
_needsSwizzle = false;
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ typedef struct {
|
||||
union { id<MTLBuffer> mtlBuffer = nil; id<MTLBuffer> mtlResource; }; // aliases
|
||||
NSUInteger offset = 0;
|
||||
uint32_t index = 0;
|
||||
uint32_t size = 0;
|
||||
bool isDirty = true;
|
||||
} MVKMTLBufferBinding;
|
||||
|
||||
|
@ -105,6 +105,7 @@ void MVKDescriptorSetLayoutBinding::bind(MVKCommandEncoder* cmdEncoder,
|
||||
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
|
||||
bb.mtlBuffer = descBinding._mtlBuffers[rezIdx];
|
||||
bb.offset = descBinding._mtlBufferOffsets[rezIdx] + bufferDynamicOffset;
|
||||
bb.size = (uint32_t)((MVKBuffer*)descBinding._bufferBindings[rezIdx].buffer)->getByteCount();
|
||||
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
|
||||
if (_applyToStage[i]) {
|
||||
bb.index = mtlIdxs.stages[i].bufferIndex + rezIdx;
|
||||
@ -236,6 +237,7 @@ void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
|
||||
MVKBuffer* buffer = (MVKBuffer*)bufferInfo.buffer;
|
||||
bb.mtlBuffer = buffer->getMTLBuffer();
|
||||
bb.offset = bufferInfo.offset;
|
||||
bb.size = (uint32_t)buffer->getByteCount();
|
||||
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
|
||||
if (_applyToStage[i]) {
|
||||
bb.index = mtlIdxs.stages[i].bufferIndex + rezIdx;
|
||||
|
@ -69,8 +69,11 @@ public:
|
||||
uint32_t set,
|
||||
const void* pData);
|
||||
|
||||
/** Returns the current auxiliary buffer bindings. */
|
||||
const MVKShaderImplicitRezBinding& getAuxBufferIndex() { return _auxBufferIndex; }
|
||||
/** Returns the current swizzle buffer bindings. */
|
||||
const MVKShaderImplicitRezBinding& getSwizzleBufferIndex() { return _swizzleBufferIndex; }
|
||||
|
||||
/** Returns the current buffer size buffer bindings. */
|
||||
const MVKShaderImplicitRezBinding& getBufferSizeBufferIndex() { return _bufferSizeBufferIndex; }
|
||||
|
||||
/** Returns the current indirect parameter buffer bindings. */
|
||||
const MVKShaderImplicitRezBinding& getIndirectParamsIndex() { return _indirectParamsIndex; }
|
||||
@ -84,9 +87,12 @@ public:
|
||||
/** Returns the current tessellation level buffer binding for the tess. control shader. */
|
||||
uint32_t getTessCtlLevelBufferIndex() { return _tessCtlLevelBufferIndex; }
|
||||
|
||||
/** Returns the number of textures in this layout. This is used to calculate the size of the auxiliary buffer. */
|
||||
/** Returns the number of textures in this layout. This is used to calculate the size of the swizzle buffer. */
|
||||
uint32_t getTextureCount() { return _pushConstantsMTLResourceIndexes.getMaxTextureIndex(); }
|
||||
|
||||
/** Returns the number of buffers in this layout. This is used to calculate the size of the buffer size buffer. */
|
||||
uint32_t getBufferCount() { return _pushConstantsMTLResourceIndexes.getMaxBufferIndex(); }
|
||||
|
||||
/** Constructs an instance for the specified device. */
|
||||
MVKPipelineLayout(MVKDevice* device, const VkPipelineLayoutCreateInfo* pCreateInfo);
|
||||
|
||||
@ -97,7 +103,8 @@ protected:
|
||||
MVKVectorInline<MVKShaderResourceBinding, 8> _dslMTLResourceIndexOffsets;
|
||||
MVKVectorInline<VkPushConstantRange, 8> _pushConstants;
|
||||
MVKShaderResourceBinding _pushConstantsMTLResourceIndexes;
|
||||
MVKShaderImplicitRezBinding _auxBufferIndex;
|
||||
MVKShaderImplicitRezBinding _swizzleBufferIndex;
|
||||
MVKShaderImplicitRezBinding _bufferSizeBufferIndex;
|
||||
MVKShaderImplicitRezBinding _indirectParamsIndex;
|
||||
MVKShaderImplicitRezBinding _outputBufferIndex;
|
||||
uint32_t _tessCtlPatchOutputBufferIndex = 0;
|
||||
@ -131,8 +138,11 @@ public:
|
||||
/** Binds this pipeline to the specified command encoder. */
|
||||
virtual void encode(MVKCommandEncoder* cmdEncoder, uint32_t stage = 0) = 0;
|
||||
|
||||
/** Returns the current auxiliary buffer bindings. */
|
||||
const MVKShaderImplicitRezBinding& getAuxBufferIndex() { return _auxBufferIndex; }
|
||||
/** Returns the current swizzle buffer bindings. */
|
||||
const MVKShaderImplicitRezBinding& getSwizzleBufferIndex() { return _swizzleBufferIndex; }
|
||||
|
||||
/** Returns the current buffer size buffer bindings. */
|
||||
const MVKShaderImplicitRezBinding& getBufferSizeBufferIndex() { return _bufferSizeBufferIndex; }
|
||||
|
||||
/** Returns whether or not full image view swizzling is enabled for this pipeline. */
|
||||
bool fullImageViewSwizzle() const { return _fullImageViewSwizzle; }
|
||||
@ -146,7 +156,8 @@ protected:
|
||||
void propogateDebugName() override {}
|
||||
|
||||
MVKPipelineCache* _pipelineCache;
|
||||
MVKShaderImplicitRezBinding _auxBufferIndex;
|
||||
MVKShaderImplicitRezBinding _swizzleBufferIndex;
|
||||
MVKShaderImplicitRezBinding _bufferSizeBufferIndex;
|
||||
bool _fullImageViewSwizzle;
|
||||
|
||||
};
|
||||
@ -230,6 +241,7 @@ protected:
|
||||
void addTessellationToPipeline(MTLRenderPipelineDescriptor* plDesc, const SPIRVTessReflectionData& reflectData, const VkPipelineTessellationStateCreateInfo* pTS);
|
||||
void addFragmentOutputToPipeline(MTLRenderPipelineDescriptor* plDesc, const SPIRVTessReflectionData& reflectData, const VkGraphicsPipelineCreateInfo* pCreateInfo, bool isTessellationVertexPipeline = false);
|
||||
bool isRenderingPoints(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
|
||||
bool verifyImplicitBuffer(bool needsBuffer, MVKShaderImplicitRezBinding& index, MVKShaderStage stage, const char* name, uint32_t reservedBuffers);
|
||||
|
||||
const VkPipelineShaderStageCreateInfo* _pVertexSS = nullptr;
|
||||
const VkPipelineShaderStageCreateInfo* _pTessCtlSS = nullptr;
|
||||
@ -265,14 +277,18 @@ protected:
|
||||
|
||||
bool _dynamicStateEnabled[VK_DYNAMIC_STATE_RANGE_SIZE];
|
||||
bool _hasDepthStencilInfo;
|
||||
bool _needsVertexAuxBuffer = false;
|
||||
bool _needsVertexSwizzleBuffer = false;
|
||||
bool _needsVertexBufferSizeBuffer = false;
|
||||
bool _needsVertexOutputBuffer = false;
|
||||
bool _needsTessCtlAuxBuffer = false;
|
||||
bool _needsTessCtlSwizzleBuffer = false;
|
||||
bool _needsTessCtlBufferSizeBuffer = false;
|
||||
bool _needsTessCtlOutputBuffer = false;
|
||||
bool _needsTessCtlPatchOutputBuffer = false;
|
||||
bool _needsTessCtlInput = false;
|
||||
bool _needsTessEvalAuxBuffer = false;
|
||||
bool _needsFragmentAuxBuffer = false;
|
||||
bool _needsTessEvalSwizzleBuffer = false;
|
||||
bool _needsTessEvalBufferSizeBuffer = false;
|
||||
bool _needsFragmentSwizzleBuffer = false;
|
||||
bool _needsFragmentBufferSizeBuffer = false;
|
||||
};
|
||||
|
||||
|
||||
@ -303,7 +319,8 @@ protected:
|
||||
|
||||
id<MTLComputePipelineState> _mtlPipelineState;
|
||||
MTLSize _mtlThreadgroupSize;
|
||||
bool _needsAuxBuffer = false;
|
||||
bool _needsSwizzleBuffer = false;
|
||||
bool _needsBufferSizeBuffer = false;
|
||||
};
|
||||
|
||||
|
||||
|
@ -135,9 +135,14 @@ MVKPipelineLayout::MVKPipelineLayout(MVKDevice* device,
|
||||
}
|
||||
|
||||
// Set implicit buffer indices
|
||||
// FIXME: Many of these are optional. We shouldn't set the ones that aren't
|
||||
// present--or at least, we should move the ones that are down to avoid
|
||||
// running over the limit of available buffers. But we can't know that
|
||||
// until we compile the shaders.
|
||||
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
|
||||
_auxBufferIndex.stages[i] = _pushConstantsMTLResourceIndexes.stages[i].bufferIndex + 1;
|
||||
_indirectParamsIndex.stages[i] = _auxBufferIndex.stages[i] + 1;
|
||||
_swizzleBufferIndex.stages[i] = _pushConstantsMTLResourceIndexes.stages[i].bufferIndex + 1;
|
||||
_bufferSizeBufferIndex.stages[i] = _swizzleBufferIndex.stages[i] + 1;
|
||||
_indirectParamsIndex.stages[i] = _bufferSizeBufferIndex.stages[i] + 1;
|
||||
_outputBufferIndex.stages[i] = _indirectParamsIndex.stages[i] + 1;
|
||||
if (i == kMVKShaderStageTessCtl) {
|
||||
_tessCtlPatchOutputBufferIndex = _outputBufferIndex.stages[i] + 1;
|
||||
@ -232,7 +237,8 @@ void MVKGraphicsPipeline::encode(MVKCommandEncoder* cmdEncoder, uint32_t stage)
|
||||
|
||||
break;
|
||||
}
|
||||
cmdEncoder->_graphicsResourcesState.bindAuxBuffer(_auxBufferIndex, _needsVertexAuxBuffer, _needsTessCtlAuxBuffer, _needsTessEvalAuxBuffer, _needsFragmentAuxBuffer);
|
||||
cmdEncoder->_graphicsResourcesState.bindSwizzleBuffer(_swizzleBufferIndex, _needsVertexSwizzleBuffer, _needsTessCtlSwizzleBuffer, _needsTessEvalSwizzleBuffer, _needsFragmentSwizzleBuffer);
|
||||
cmdEncoder->_graphicsResourcesState.bindBufferSizeBuffer(_bufferSizeBufferIndex, _needsVertexBufferSizeBuffer, _needsTessCtlBufferSizeBuffer, _needsTessEvalBufferSizeBuffer, _needsFragmentBufferSizeBuffer);
|
||||
}
|
||||
|
||||
bool MVKGraphicsPipeline::supportsDynamicState(VkDynamicState state) {
|
||||
@ -700,14 +706,29 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLTessRasterStageDescripto
|
||||
return plDesc;
|
||||
}
|
||||
|
||||
bool MVKGraphicsPipeline::verifyImplicitBuffer(bool needsBuffer, MVKShaderImplicitRezBinding& index, MVKShaderStage stage, const char* name, uint32_t reservedBuffers) {
|
||||
const char* stageNames[] = {
|
||||
"Vertex",
|
||||
"Tessellation control",
|
||||
"Tessellation evaluation",
|
||||
"Fragment"
|
||||
};
|
||||
if (needsBuffer && index.stages[stage] >= _device->_pMetalFeatures->maxPerStageBufferCount - reservedBuffers) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "%s shader requires %s buffer, but there is no free slot to pass it.", stageNames[stage], name));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Adds a vertex shader to the pipeline description.
|
||||
bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConverterContext& shaderContext) {
|
||||
uint32_t vbCnt = pCreateInfo->pVertexInputState->vertexBindingDescriptionCount;
|
||||
shaderContext.options.entryPointStage = spv::ExecutionModelVertex;
|
||||
shaderContext.options.entryPointName = _pVertexSS->pName;
|
||||
shaderContext.options.auxBufferIndex = _auxBufferIndex.stages[kMVKShaderStageVertex];
|
||||
shaderContext.options.swizzleBufferIndex = _swizzleBufferIndex.stages[kMVKShaderStageVertex];
|
||||
shaderContext.options.indirectParamsBufferIndex = _indirectParamsIndex.stages[kMVKShaderStageVertex];
|
||||
shaderContext.options.outputBufferIndex = _outputBufferIndex.stages[kMVKShaderStageVertex];
|
||||
shaderContext.options.bufferSizeBufferIndex = _bufferSizeBufferIndex.stages[kMVKShaderStageVertex];
|
||||
shaderContext.options.shouldCaptureOutput = isTessellationPipeline();
|
||||
shaderContext.options.isRasterizationDisabled = isTessellationPipeline() || (pCreateInfo->pRasterizationState && (pCreateInfo->pRasterizationState->rasterizerDiscardEnable));
|
||||
addVertexInputToShaderConverterContext(shaderContext, pCreateInfo);
|
||||
@ -718,20 +739,22 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLRenderPipelineDescriptor*
|
||||
}
|
||||
plDesc.vertexFunction = mtlFunction;
|
||||
plDesc.rasterizationEnabled = !shaderContext.options.isRasterizationDisabled;
|
||||
_needsVertexAuxBuffer = shaderContext.options.needsAuxBuffer;
|
||||
_needsVertexSwizzleBuffer = shaderContext.options.needsSwizzleBuffer;
|
||||
_needsVertexBufferSizeBuffer = shaderContext.options.needsBufferSizeBuffer;
|
||||
_needsVertexOutputBuffer = shaderContext.options.needsOutputBuffer;
|
||||
// If we need the auxiliary buffer and there's no place to put it, we're in serious trouble.
|
||||
if (_needsVertexAuxBuffer && _auxBufferIndex.stages[kMVKShaderStageVertex] >= _device->_pMetalFeatures->maxPerStageBufferCount - vbCnt) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Vertex shader requires auxiliary buffer, but there is no free slot to pass it."));
|
||||
// If we need the swizzle buffer and there's no place to put it, we're in serious trouble.
|
||||
if (!verifyImplicitBuffer(_needsVertexSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageVertex, "swizzle", vbCnt)) {
|
||||
return false;
|
||||
}
|
||||
// Ditto buffer size buffer.
|
||||
if (!verifyImplicitBuffer(_needsVertexBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageVertex, "buffer size", vbCnt)) {
|
||||
return false;
|
||||
}
|
||||
// Ditto captured output buffer.
|
||||
if (_needsVertexOutputBuffer && _outputBufferIndex.stages[kMVKShaderStageVertex] >= _device->_pMetalFeatures->maxPerStageBufferCount - vbCnt) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Vertex shader requires output buffer, but there is no free slot to pass it."));
|
||||
if (!verifyImplicitBuffer(_needsVertexOutputBuffer, _outputBufferIndex, kMVKShaderStageVertex, "output", vbCnt)) {
|
||||
return false;
|
||||
}
|
||||
if (_needsVertexOutputBuffer && _indirectParamsIndex.stages[kMVKShaderStageVertex] >= _device->_pMetalFeatures->maxPerStageBufferCount - vbCnt) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Vertex shader requires indirect parameters buffer, but there is no free slot to pass it."));
|
||||
if (!verifyImplicitBuffer(_needsVertexOutputBuffer, _indirectParamsIndex, kMVKShaderStageVertex, "indirect parameters", vbCnt)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -740,11 +763,12 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLRenderPipelineDescriptor*
|
||||
bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConverterContext& shaderContext, std::vector<SPIRVShaderOutput>& vtxOutputs) {
|
||||
shaderContext.options.entryPointStage = spv::ExecutionModelTessellationControl;
|
||||
shaderContext.options.entryPointName = _pTessCtlSS->pName;
|
||||
shaderContext.options.auxBufferIndex = _auxBufferIndex.stages[kMVKShaderStageTessCtl];
|
||||
shaderContext.options.swizzleBufferIndex = _swizzleBufferIndex.stages[kMVKShaderStageTessCtl];
|
||||
shaderContext.options.indirectParamsBufferIndex = _indirectParamsIndex.stages[kMVKShaderStageTessCtl];
|
||||
shaderContext.options.outputBufferIndex = _outputBufferIndex.stages[kMVKShaderStageTessCtl];
|
||||
shaderContext.options.patchOutputBufferIndex = _tessCtlPatchOutputBufferIndex;
|
||||
shaderContext.options.tessLevelBufferIndex = _tessCtlLevelBufferIndex;
|
||||
shaderContext.options.bufferSizeBufferIndex = _bufferSizeBufferIndex.stages[kMVKShaderStageTessCtl];
|
||||
shaderContext.options.shouldCaptureOutput = true;
|
||||
addPrevStageOutputToShaderConverterContext(shaderContext, vtxOutputs);
|
||||
id<MTLFunction> mtlFunction = ((MVKShaderModule*)_pTessCtlSS->module)->getMTLFunction(&shaderContext, _pTessCtlSS->pSpecializationInfo, _pipelineCache).mtlFunction;
|
||||
@ -753,20 +777,21 @@ bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescripto
|
||||
return false;
|
||||
}
|
||||
plDesc.computeFunction = mtlFunction;
|
||||
_needsTessCtlAuxBuffer = shaderContext.options.needsAuxBuffer;
|
||||
_needsTessCtlSwizzleBuffer = shaderContext.options.needsSwizzleBuffer;
|
||||
_needsTessCtlBufferSizeBuffer = shaderContext.options.needsBufferSizeBuffer;
|
||||
_needsTessCtlOutputBuffer = shaderContext.options.needsOutputBuffer;
|
||||
_needsTessCtlPatchOutputBuffer = shaderContext.options.needsPatchOutputBuffer;
|
||||
_needsTessCtlInput = shaderContext.options.needsInputThreadgroupMem;
|
||||
if (_needsTessCtlAuxBuffer && _auxBufferIndex.stages[kMVKShaderStageTessCtl] >= _device->_pMetalFeatures->maxPerStageBufferCount - kMVKTessCtlNumReservedBuffers) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Tessellation control shader requires auxiliary buffer, but there is no free slot to pass it."));
|
||||
if (!verifyImplicitBuffer(_needsTessCtlSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageTessCtl, "swizzle", kMVKTessCtlNumReservedBuffers)) {
|
||||
return false;
|
||||
}
|
||||
if (_indirectParamsIndex.stages[kMVKShaderStageTessCtl] >= _device->_pMetalFeatures->maxPerStageBufferCount - kMVKTessCtlNumReservedBuffers) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Tessellation control shader requires indirect parameters buffer, but there is no free slot to pass it."));
|
||||
if (!verifyImplicitBuffer(_needsTessCtlBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageTessCtl, "buffer size", kMVKTessCtlNumReservedBuffers)) {
|
||||
return false;
|
||||
}
|
||||
if (_needsTessCtlOutputBuffer && _outputBufferIndex.stages[kMVKShaderStageTessCtl] >= _device->_pMetalFeatures->maxPerStageBufferCount - kMVKTessCtlNumReservedBuffers) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Tessellation control shader requires per-vertex output buffer, but there is no free slot to pass it."));
|
||||
if (!verifyImplicitBuffer(true, _indirectParamsIndex, kMVKShaderStageTessCtl, "indirect parameters", kMVKTessCtlNumReservedBuffers)) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsTessCtlOutputBuffer, _outputBufferIndex, kMVKShaderStageTessCtl, "per-vertex output", kMVKTessCtlNumReservedBuffers)) {
|
||||
return false;
|
||||
}
|
||||
if (_needsTessCtlPatchOutputBuffer && _tessCtlPatchOutputBufferIndex >= _device->_pMetalFeatures->maxPerStageBufferCount - kMVKTessCtlNumReservedBuffers) {
|
||||
@ -783,7 +808,8 @@ bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescripto
|
||||
bool MVKGraphicsPipeline::addTessEvalShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConverterContext& shaderContext, std::vector<SPIRVShaderOutput>& tcOutputs) {
|
||||
shaderContext.options.entryPointStage = spv::ExecutionModelTessellationEvaluation;
|
||||
shaderContext.options.entryPointName = _pTessEvalSS->pName;
|
||||
shaderContext.options.auxBufferIndex = _auxBufferIndex.stages[kMVKShaderStageTessEval];
|
||||
shaderContext.options.swizzleBufferIndex = _swizzleBufferIndex.stages[kMVKShaderStageTessEval];
|
||||
shaderContext.options.bufferSizeBufferIndex = _bufferSizeBufferIndex.stages[kMVKShaderStageTessEval];
|
||||
shaderContext.options.shouldCaptureOutput = false;
|
||||
shaderContext.options.isRasterizationDisabled = (pCreateInfo->pRasterizationState && (pCreateInfo->pRasterizationState->rasterizerDiscardEnable));
|
||||
addPrevStageOutputToShaderConverterContext(shaderContext, tcOutputs);
|
||||
@ -795,10 +821,12 @@ bool MVKGraphicsPipeline::addTessEvalShaderToPipeline(MTLRenderPipelineDescripto
|
||||
// Yeah, you read that right. Tess. eval functions are a kind of vertex function in Metal.
|
||||
plDesc.vertexFunction = mtlFunction;
|
||||
plDesc.rasterizationEnabled = !shaderContext.options.isRasterizationDisabled;
|
||||
_needsTessEvalAuxBuffer = shaderContext.options.needsAuxBuffer;
|
||||
// If we need the auxiliary buffer and there's no place to put it, we're in serious trouble.
|
||||
if (_needsTessEvalAuxBuffer && _auxBufferIndex.stages[kMVKShaderStageTessEval] >= _device->_pMetalFeatures->maxPerStageBufferCount - kMVKTessEvalNumReservedBuffers) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Tessellation evaluation shader requires auxiliary buffer, but there is no free slot to pass it."));
|
||||
_needsTessEvalSwizzleBuffer = shaderContext.options.needsSwizzleBuffer;
|
||||
_needsTessEvalBufferSizeBuffer = shaderContext.options.needsBufferSizeBuffer;
|
||||
if (!verifyImplicitBuffer(_needsTessEvalSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageTessEval, "swizzle", kMVKTessEvalNumReservedBuffers)) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsTessEvalBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageTessEval, "buffer size", kMVKTessEvalNumReservedBuffers)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -807,7 +835,8 @@ bool MVKGraphicsPipeline::addTessEvalShaderToPipeline(MTLRenderPipelineDescripto
|
||||
bool MVKGraphicsPipeline::addFragmentShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConverterContext& shaderContext) {
|
||||
if (_pFragmentSS) {
|
||||
shaderContext.options.entryPointStage = spv::ExecutionModelFragment;
|
||||
shaderContext.options.auxBufferIndex = _auxBufferIndex.stages[kMVKShaderStageFragment];
|
||||
shaderContext.options.swizzleBufferIndex = _swizzleBufferIndex.stages[kMVKShaderStageFragment];
|
||||
shaderContext.options.bufferSizeBufferIndex = _bufferSizeBufferIndex.stages[kMVKShaderStageFragment];
|
||||
shaderContext.options.entryPointName = _pFragmentSS->pName;
|
||||
shaderContext.options.shouldCaptureOutput = false;
|
||||
id<MTLFunction> mtlFunction = ((MVKShaderModule*)_pFragmentSS->module)->getMTLFunction(&shaderContext, _pFragmentSS->pSpecializationInfo, _pipelineCache).mtlFunction;
|
||||
@ -816,9 +845,12 @@ bool MVKGraphicsPipeline::addFragmentShaderToPipeline(MTLRenderPipelineDescripto
|
||||
return false;
|
||||
}
|
||||
plDesc.fragmentFunction = mtlFunction;
|
||||
_needsFragmentAuxBuffer = shaderContext.options.needsAuxBuffer;
|
||||
if (_needsFragmentAuxBuffer && _auxBufferIndex.stages[kMVKShaderStageFragment] >= _device->_pMetalFeatures->maxPerStageBufferCount) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Fragment shader requires auxiliary buffer, but there is no free slot to pass it."));
|
||||
_needsFragmentSwizzleBuffer = shaderContext.options.needsSwizzleBuffer;
|
||||
_needsFragmentBufferSizeBuffer = shaderContext.options.needsBufferSizeBuffer;
|
||||
if (!verifyImplicitBuffer(_needsFragmentSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageFragment, "swizzle", 0)) {
|
||||
return false;
|
||||
}
|
||||
if (!verifyImplicitBuffer(_needsFragmentBufferSizeBuffer, _bufferSizeBufferIndex, kMVKShaderStageFragment, "buffer size", 0)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1029,7 +1061,8 @@ void MVKGraphicsPipeline::initMVKShaderConverterContext(SPIRVToMSLConverterConte
|
||||
|
||||
MVKPipelineLayout* layout = (MVKPipelineLayout*)pCreateInfo->layout;
|
||||
layout->populateShaderConverterContext(shaderContext);
|
||||
_auxBufferIndex = layout->getAuxBufferIndex();
|
||||
_swizzleBufferIndex = layout->getSwizzleBufferIndex();
|
||||
_bufferSizeBufferIndex = layout->getBufferSizeBufferIndex();
|
||||
_indirectParamsIndex = layout->getIndirectParamsIndex();
|
||||
_outputBufferIndex = layout->getOutputBufferIndex();
|
||||
_tessCtlPatchOutputBufferIndex = layout->getTessCtlPatchOutputBufferIndex();
|
||||
@ -1164,7 +1197,8 @@ void MVKComputePipeline::getStages(MVKVector<uint32_t>& stages) {
|
||||
void MVKComputePipeline::encode(MVKCommandEncoder* cmdEncoder, uint32_t) {
|
||||
[cmdEncoder->getMTLComputeEncoder(kMVKCommandUseDispatch) setComputePipelineState: _mtlPipelineState];
|
||||
cmdEncoder->_mtlThreadgroupSize = _mtlThreadgroupSize;
|
||||
cmdEncoder->_computeResourcesState.bindAuxBuffer(_auxBufferIndex, _needsAuxBuffer);
|
||||
cmdEncoder->_computeResourcesState.bindSwizzleBuffer(_swizzleBufferIndex, _needsSwizzleBuffer);
|
||||
cmdEncoder->_computeResourcesState.bindBufferSizeBuffer(_bufferSizeBufferIndex, _needsBufferSizeBuffer);
|
||||
}
|
||||
|
||||
MVKComputePipeline::MVKComputePipeline(MVKDevice* device,
|
||||
@ -1191,8 +1225,11 @@ MVKComputePipeline::MVKComputePipeline(MVKDevice* device,
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Compute shader function could not be compiled into pipeline. See previous logged error."));
|
||||
}
|
||||
|
||||
if (_needsAuxBuffer && _auxBufferIndex.stages[kMVKShaderStageCompute] > _device->_pMetalFeatures->maxPerStageBufferCount) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Compute shader requires auxiliary buffer, but there is no free slot to pass it."));
|
||||
if (_needsSwizzleBuffer && _swizzleBufferIndex.stages[kMVKShaderStageCompute] > _device->_pMetalFeatures->maxPerStageBufferCount) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Compute shader requires swizzle buffer, but there is no free slot to pass it."));
|
||||
}
|
||||
if (_needsBufferSizeBuffer && _bufferSizeBufferIndex.stages[kMVKShaderStageCompute] > _device->_pMetalFeatures->maxPerStageBufferCount) {
|
||||
setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Compute shader requires buffer size buffer, but there is no free slot to pass it."));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1211,12 +1248,15 @@ MVKMTLFunction MVKComputePipeline::getMTLFunction(const VkComputePipelineCreateI
|
||||
|
||||
MVKPipelineLayout* layout = (MVKPipelineLayout*)pCreateInfo->layout;
|
||||
layout->populateShaderConverterContext(shaderContext);
|
||||
_auxBufferIndex = layout->getAuxBufferIndex();
|
||||
shaderContext.options.auxBufferIndex = _auxBufferIndex.stages[kMVKShaderStageCompute];
|
||||
_swizzleBufferIndex = layout->getSwizzleBufferIndex();
|
||||
_bufferSizeBufferIndex = layout->getBufferSizeBufferIndex();
|
||||
shaderContext.options.swizzleBufferIndex = _swizzleBufferIndex.stages[kMVKShaderStageCompute];
|
||||
shaderContext.options.bufferSizeBufferIndex = _bufferSizeBufferIndex.stages[kMVKShaderStageCompute];
|
||||
|
||||
MVKShaderModule* mvkShdrMod = (MVKShaderModule*)pSS->module;
|
||||
MVKMTLFunction func = mvkShdrMod->getMTLFunction(&shaderContext, pSS->pSpecializationInfo, _pipelineCache);
|
||||
_needsAuxBuffer = shaderContext.options.needsAuxBuffer;
|
||||
_needsSwizzleBuffer = shaderContext.options.needsSwizzleBuffer;
|
||||
_needsBufferSizeBuffer = shaderContext.options.needsBufferSizeBuffer;
|
||||
return func;
|
||||
}
|
||||
|
||||
@ -1290,14 +1330,28 @@ namespace mvk {
|
||||
void serialize(Archive & archive, SPIRVToMSLConverterOptions& opt) {
|
||||
archive(opt.entryPointName,
|
||||
opt.entryPointStage,
|
||||
opt.tessPatchKind,
|
||||
opt.mslVersion,
|
||||
opt.texelBufferTextureWidth,
|
||||
opt.auxBufferIndex,
|
||||
opt.swizzleBufferIndex,
|
||||
opt.indirectParamsBufferIndex,
|
||||
opt.outputBufferIndex,
|
||||
opt.patchOutputBufferIndex,
|
||||
opt.tessLevelBufferIndex,
|
||||
opt.bufferSizeBufferIndex,
|
||||
opt.inputThreadgroupMemIndex,
|
||||
opt.numTessControlPoints,
|
||||
opt.shouldFlipVertexY,
|
||||
opt.isRenderingPoints,
|
||||
opt.shouldSwizzleTextureSamples,
|
||||
opt.shouldCaptureOutput,
|
||||
opt.tessDomainOriginInLowerLeft,
|
||||
opt.isRasterizationDisabled,
|
||||
opt.needsAuxBuffer);
|
||||
opt.needsSwizzleBuffer,
|
||||
opt.needsOutputBuffer,
|
||||
opt.needsPatchOutputBuffer,
|
||||
opt.needsBufferSizeBuffer,
|
||||
opt.needsInputThreadgroupMem);
|
||||
}
|
||||
|
||||
template<class Archive>
|
||||
|
@ -27,12 +27,6 @@
|
||||
using namespace mvk;
|
||||
using namespace std;
|
||||
|
||||
// Verify that the spvAux structure used to pass auxilliary info between MoltenVK and SPIRV-Cross has not changed.
|
||||
#define MVK_SUPPORTED_MSL_AUX_BUFFER_STRUCT_VERSION 1
|
||||
#if MVK_SUPPORTED_MSL_AUX_BUFFER_STRUCT_VERSION != SPIRV_CROSS_MSL_AUX_BUFFER_STRUCT_VERSION
|
||||
# error "The version number of the MSL spvAux struct used to pass auxilliary info to shaders does not match between MoltenVK and SPIRV-Cross. If the spvAux struct definition is not the same between MoltenVK and shaders created by SPRIV-Cross, memory errors will occur."
|
||||
#endif
|
||||
|
||||
|
||||
#pragma mark -
|
||||
#pragma mark SPIRVToMSLConverterContext
|
||||
@ -48,11 +42,12 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverterOptions::matches(const SPIRVToMSLConve
|
||||
if (entryPointStage != other.entryPointStage) { return false; }
|
||||
if (mslVersion != other.mslVersion) { return false; }
|
||||
if (texelBufferTextureWidth != other.texelBufferTextureWidth) { return false; }
|
||||
if (auxBufferIndex != other.auxBufferIndex) { return false; }
|
||||
if (swizzleBufferIndex != other.swizzleBufferIndex) { return false; }
|
||||
if (indirectParamsBufferIndex != other.indirectParamsBufferIndex) { return false; }
|
||||
if (outputBufferIndex != other.outputBufferIndex) { return false; }
|
||||
if (patchOutputBufferIndex != other.patchOutputBufferIndex) { return false; }
|
||||
if (tessLevelBufferIndex != other.tessLevelBufferIndex) { return false; }
|
||||
if (bufferSizeBufferIndex != other.bufferSizeBufferIndex) { return false; }
|
||||
if (inputThreadgroupMemIndex != other.inputThreadgroupMemIndex) { return false; }
|
||||
if (!!shouldFlipVertexY != !!other.shouldFlipVertexY) { return false; }
|
||||
if (!!isRenderingPoints != !!other.isRenderingPoints) { return false; }
|
||||
@ -165,9 +160,10 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverterContext::matches(const SPIRVToMSLConve
|
||||
MVK_PUBLIC_SYMBOL void SPIRVToMSLConverterContext::alignWith(const SPIRVToMSLConverterContext& srcContext) {
|
||||
|
||||
options.isRasterizationDisabled = srcContext.options.isRasterizationDisabled;
|
||||
options.needsAuxBuffer = srcContext.options.needsAuxBuffer;
|
||||
options.needsSwizzleBuffer = srcContext.options.needsSwizzleBuffer;
|
||||
options.needsOutputBuffer = srcContext.options.needsOutputBuffer;
|
||||
options.needsPatchOutputBuffer = srcContext.options.needsPatchOutputBuffer;
|
||||
options.needsBufferSizeBuffer = srcContext.options.needsBufferSizeBuffer;
|
||||
options.needsInputThreadgroupMem = srcContext.options.needsInputThreadgroupMem;
|
||||
|
||||
if (stageSupportsVertexAttributes()) {
|
||||
@ -249,11 +245,12 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConverterContext&
|
||||
mslOpts.platform = getCompilerMSLPlatform(context.options.platform);
|
||||
mslOpts.msl_version = context.options.mslVersion;
|
||||
mslOpts.texel_buffer_texture_width = context.options.texelBufferTextureWidth;
|
||||
mslOpts.aux_buffer_index = context.options.auxBufferIndex;
|
||||
mslOpts.swizzle_buffer_index = context.options.swizzleBufferIndex;
|
||||
mslOpts.indirect_params_buffer_index = context.options.indirectParamsBufferIndex;
|
||||
mslOpts.shader_output_buffer_index = context.options.outputBufferIndex;
|
||||
mslOpts.shader_patch_output_buffer_index = context.options.patchOutputBufferIndex;
|
||||
mslOpts.shader_tess_factor_buffer_index = context.options.tessLevelBufferIndex;
|
||||
mslOpts.buffer_size_buffer_index = context.options.bufferSizeBufferIndex;
|
||||
mslOpts.shader_input_wg_index = context.options.inputThreadgroupMemIndex;
|
||||
mslOpts.enable_point_size_builtin = context.options.isRenderingPoints;
|
||||
mslOpts.disable_rasterization = context.options.isRasterizationDisabled;
|
||||
@ -324,9 +321,10 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConverterContext&
|
||||
// which vertex attributes and resource bindings are used by the shader
|
||||
populateEntryPoint(_entryPoint, pMSLCompiler, context.options);
|
||||
context.options.isRasterizationDisabled = pMSLCompiler && pMSLCompiler->get_is_rasterization_disabled();
|
||||
context.options.needsAuxBuffer = pMSLCompiler && pMSLCompiler->needs_aux_buffer();
|
||||
context.options.needsSwizzleBuffer = pMSLCompiler && pMSLCompiler->needs_swizzle_buffer();
|
||||
context.options.needsOutputBuffer = pMSLCompiler && pMSLCompiler->needs_output_buffer();
|
||||
context.options.needsPatchOutputBuffer = pMSLCompiler && pMSLCompiler->needs_patch_output_buffer();
|
||||
context.options.needsBufferSizeBuffer = pMSLCompiler && pMSLCompiler->needs_buffer_size_buffer();
|
||||
context.options.needsInputThreadgroupMem = pMSLCompiler && pMSLCompiler->needs_input_threadgroup_mem();
|
||||
|
||||
if (context.stageSupportsVertexAttributes()) {
|
||||
|
@ -50,11 +50,12 @@ namespace mvk {
|
||||
uint32_t mslVersion = makeMSLVersion(2, 1);
|
||||
Platform platform = getNativePlatform();
|
||||
uint32_t texelBufferTextureWidth = 4096;
|
||||
uint32_t auxBufferIndex = 0;
|
||||
uint32_t swizzleBufferIndex = 0;
|
||||
uint32_t indirectParamsBufferIndex = 0;
|
||||
uint32_t outputBufferIndex = 0;
|
||||
uint32_t patchOutputBufferIndex = 0;
|
||||
uint32_t tessLevelBufferIndex = 0;
|
||||
uint32_t bufferSizeBufferIndex = 0;
|
||||
uint32_t inputThreadgroupMemIndex = 0;
|
||||
uint32_t numTessControlPoints = 0;
|
||||
bool shouldFlipVertexY = true;
|
||||
@ -64,9 +65,10 @@ namespace mvk {
|
||||
bool tessDomainOriginInLowerLeft = false;
|
||||
|
||||
bool isRasterizationDisabled = false;
|
||||
bool needsAuxBuffer = false;
|
||||
bool needsSwizzleBuffer = false;
|
||||
bool needsOutputBuffer = false;
|
||||
bool needsPatchOutputBuffer = false;
|
||||
bool needsBufferSizeBuffer = false;
|
||||
bool needsInputThreadgroupMem = false;
|
||||
|
||||
/**
|
||||
|
Loading…
x
Reference in New Issue
Block a user