Merge pull request #508 from cdavis5e/tessellation

Add support for tessellation.
This commit is contained in:
Bill Hollings 2019-03-15 16:39:45 -04:00 committed by GitHub
commit 0a536b2747
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 2761 additions and 695 deletions

View File

@ -50,6 +50,10 @@
/* End PBXAggregateTarget section */
/* Begin PBXBuildFile section */
450A4F65221C5A95007203D7 /* spirv_reflect.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 450A4F63221C5A95007203D7 /* spirv_reflect.hpp */; };
450A4F66221C5A95007203D7 /* spirv_reflect.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 450A4F63221C5A95007203D7 /* spirv_reflect.hpp */; };
450A4F67221C5A95007203D7 /* spirv_reflect.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 450A4F64221C5A95007203D7 /* spirv_reflect.cpp */; };
450A4F68221C5A95007203D7 /* spirv_reflect.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 450A4F64221C5A95007203D7 /* spirv_reflect.cpp */; };
A92C86AA2226CC3600102DD6 /* spirv_target_env.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A92C85312226CC3600102DD6 /* spirv_target_env.cpp */; };
A92C86AB2226CC3600102DD6 /* spirv_target_env.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A92C85312226CC3600102DD6 /* spirv_target_env.cpp */; };
A92C86AC2226CC3600102DD6 /* assembly_grammar.h in Headers */ = {isa = PBXBuildFile; fileRef = A92C85332226CC3600102DD6 /* assembly_grammar.h */; };
@ -1042,6 +1046,8 @@
/* End PBXContainerItemProxy section */
/* Begin PBXFileReference section */
450A4F63221C5A95007203D7 /* spirv_reflect.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = spirv_reflect.hpp; sourceTree = "<group>"; };
450A4F64221C5A95007203D7 /* spirv_reflect.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = spirv_reflect.cpp; sourceTree = "<group>"; };
A90FD89F21CC4EAB00B92BB2 /* libSPIRVCross.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libSPIRVCross.a; sourceTree = BUILT_PRODUCTS_DIR; };
A90FD9E421CC4EB900B92BB2 /* libSPIRVCross.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libSPIRVCross.a; sourceTree = BUILT_PRODUCTS_DIR; };
A92C85312226CC3600102DD6 /* spirv_target_env.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = spirv_target_env.cpp; sourceTree = "<group>"; };
@ -2205,6 +2211,8 @@
A976290221CC60BC00B52A68 /* spirv_msl.hpp */,
A976290421CC60BC00B52A68 /* spirv_parser.cpp */,
A976290C21CC60BC00B52A68 /* spirv_parser.hpp */,
450A4F64221C5A95007203D7 /* spirv_reflect.cpp */,
450A4F63221C5A95007203D7 /* spirv_reflect.hpp */,
);
path = "SPIRV-Cross";
sourceTree = "<group>";
@ -2714,6 +2722,7 @@
A976292421CC60BC00B52A68 /* spirv_parser.hpp in Headers */,
A976291221CC60BC00B52A68 /* spirv_cross.hpp in Headers */,
A976291021CC60BC00B52A68 /* spirv_msl.hpp in Headers */,
450A4F66221C5A95007203D7 /* spirv_reflect.hpp in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@ -2728,6 +2737,7 @@
A976292321CC60BC00B52A68 /* spirv_parser.hpp in Headers */,
A976291121CC60BC00B52A68 /* spirv_cross.hpp in Headers */,
A976290F21CC60BC00B52A68 /* spirv_msl.hpp in Headers */,
450A4F65221C5A95007203D7 /* spirv_reflect.hpp in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@ -3404,6 +3414,7 @@
A976291421CC60BC00B52A68 /* spirv_parser.cpp in Sources */,
A976292221CC60BC00B52A68 /* spirv_cfg.cpp in Sources */,
A976291621CC60BC00B52A68 /* spirv_cross.cpp in Sources */,
450A4F68221C5A95007203D7 /* spirv_reflect.cpp in Sources */,
A976291821CC60BC00B52A68 /* spirv_glsl.cpp in Sources */,
A976292821CC60BC00B52A68 /* spirv_cross_parsed_ir.cpp in Sources */,
);
@ -3417,6 +3428,7 @@
A976291321CC60BC00B52A68 /* spirv_parser.cpp in Sources */,
A976292121CC60BC00B52A68 /* spirv_cfg.cpp in Sources */,
A976291521CC60BC00B52A68 /* spirv_cross.cpp in Sources */,
450A4F67221C5A95007203D7 /* spirv_reflect.cpp in Sources */,
A976291721CC60BC00B52A68 /* spirv_glsl.cpp in Sources */,
A976292721CC60BC00B52A68 /* spirv_cross_parsed_ir.cpp in Sources */,
);

View File

@ -1 +1 @@
d9ed3dcc7a7e62e5f95fd8f24e3d35e7e402ae92
6f50806698c2de18ba32ef4bbd953ad761a00cf2

View File

@ -323,6 +323,16 @@ MTLSamplerMipFilter mvkMTLSamplerMipFilterFromVkSamplerMipmapMode(VkSamplerMipma
#pragma mark -
#pragma mark Render pipeline
/** Identifies a particular shading stage in a pipeline. */
typedef enum {
kMVKShaderStageVertex = 0,
kMVKShaderStageTessCtl,
kMVKShaderStageTessEval,
kMVKShaderStageFragment,
kMVKShaderStageCompute,
kMVKShaderStageMax
} MVKShaderStage;
/** Returns the Metal MTLColorWriteMask corresponding to the specified Vulkan VkColorComponentFlags. */
MTLColorWriteMask mvkMTLColorWriteMaskFromVkChannelFlags(VkColorComponentFlags vkWriteFlags);
@ -380,6 +390,17 @@ MTLIndexType mvkMTLIndexTypeFromVkIndexType(VkIndexType vkIdxType);
/** Returns the size, in bytes, of a vertex index of the specified type. */
size_t mvkMTLIndexTypeSizeInBytes(MTLIndexType mtlIdxType);
/** Returns the MVKShaderStage corresponding to the specified Vulkan VkShaderStageFlagBits. */
MVKShaderStage mvkShaderStageFromVkShaderStageFlagBits(VkShaderStageFlagBits vkStage);
/** Returns the VkShaderStageFlagBits corresponding to the specified MoltenVK MVKShaderStage. */
VkShaderStageFlagBits mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage mvkStage);
/** Returns the MTLWinding corresponding to the specified spv::ExecutionMode. */
MTLWinding mvkMTLWindingFromSpvExecutionMode(uint32_t spvMode);
/** Returns the MTLTessellationPartitionMode corresponding to the specified spv::ExecutionMode. */
MTLTessellationPartitionMode mvkMTLTessellationPartitionModeFromSpvExecutionMode(uint32_t spvMode);
#pragma mark -
#pragma mark Geometry conversions

View File

@ -20,6 +20,7 @@
#include "MVKCommandBuffer.h"
#include "MVKCommandPool.h"
#include "MVKBuffer.h"
#include "MVKPipeline.h"
#include "MVKFoundation.h"
#include "mvk_datatypes.h"
@ -45,7 +46,7 @@ void MVKCmdBindVertexBuffers::setContent(uint32_t startBinding,
}
void MVKCmdBindVertexBuffers::encode(MVKCommandEncoder* cmdEncoder) {
for (auto& b : _bindings) { cmdEncoder->_graphicsResourcesState.bindVertexBuffer(b); }
for (auto& b : _bindings) { cmdEncoder->_graphicsResourcesState.bindBuffer(kMVKShaderStageVertex, b); }
}
MVKCmdBindVertexBuffers::MVKCmdBindVertexBuffers(MVKCommandTypePool<MVKCmdBindVertexBuffers>* pool)
@ -93,20 +94,155 @@ void MVKCmdDraw::setContent(uint32_t vertexCount,
void MVKCmdDraw::encode(MVKCommandEncoder* cmdEncoder) {
cmdEncoder->finalizeDrawState(); // Ensure all updated state has been submitted to Metal
auto* pipeline = (MVKGraphicsPipeline*)cmdEncoder->_graphicsPipelineState.getPipeline();
if (cmdEncoder->_pDeviceMetalFeatures->baseVertexInstanceDrawing) {
[cmdEncoder->_mtlRenderEncoder drawPrimitives: cmdEncoder->_mtlPrimitiveType
vertexStart: _firstVertex
vertexCount: _vertexCount
instanceCount: _instanceCount
baseInstance: _firstInstance];
} else {
[cmdEncoder->_mtlRenderEncoder drawPrimitives: cmdEncoder->_mtlPrimitiveType
vertexStart: _firstVertex
vertexCount: _vertexCount
instanceCount: _instanceCount];
}
MVKVectorInline<uint32_t, 4> stages;
pipeline->getStages(stages);
const MVKMTLBufferAllocation* vtxOutBuff = nullptr;
const MVKMTLBufferAllocation* tcOutBuff = nullptr;
const MVKMTLBufferAllocation* tcPatchOutBuff = nullptr;
const MVKMTLBufferAllocation* tcLevelBuff = nullptr;
uint32_t patchCount = 0;
uint32_t inControlPointCount = 0, outControlPointCount = 0;
if (pipeline->isTessellationPipeline()) {
inControlPointCount = pipeline->getInputControlPointCount();
outControlPointCount = pipeline->getOutputControlPointCount();
patchCount = (uint32_t)mvkCeilingDivide(_vertexCount * _instanceCount, inControlPointCount);
}
for (uint32_t s : stages) {
auto stage = MVKGraphicsStage(s);
cmdEncoder->finalizeDrawState(stage); // Ensure all updated state has been submitted to Metal
id<MTLComputeCommandEncoder> mtlTessCtlEncoder = nil;
switch (stage) {
case kMVKGraphicsStageVertex:
if (pipeline->needsVertexOutputBuffer()) {
vtxOutBuff = cmdEncoder->getTempMTLBuffer(_vertexCount * _instanceCount * 4 * cmdEncoder->_pDeviceProperties->limits.maxVertexOutputComponents);
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: vtxOutBuff->_mtlBuffer
offset: vtxOutBuff->_offset
atIndex: pipeline->getOutputBufferIndex().stages[kMVKShaderStageVertex]];
// The shader only needs the number of vertices, so that's all we'll give it.
cmdEncoder->setVertexBytes(cmdEncoder->_mtlRenderEncoder,
&_vertexCount,
sizeof(_vertexCount),
pipeline->getIndirectParamsIndex().stages[kMVKShaderStageVertex]);
}
if (cmdEncoder->_pDeviceMetalFeatures->baseVertexInstanceDrawing) {
[cmdEncoder->_mtlRenderEncoder drawPrimitives: MTLPrimitiveTypePoint
vertexStart: _firstVertex
vertexCount: _vertexCount
instanceCount: _instanceCount
baseInstance: _firstInstance];
} else {
[cmdEncoder->_mtlRenderEncoder drawPrimitives: MTLPrimitiveTypePoint
vertexStart: _firstVertex
vertexCount: _vertexCount
instanceCount: _instanceCount];
}
// Mark pipeline, resources, and tess control push constants as dirty
// so I apply them during the next stage.
cmdEncoder->_graphicsPipelineState.beginMetalRenderPass();
cmdEncoder->_graphicsResourcesState.beginMetalRenderPass();
cmdEncoder->getPushConstants(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)->beginMetalRenderPass();
break;
case kMVKGraphicsStageTessControl:
mtlTessCtlEncoder = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationControl);
if (pipeline->needsTessCtlOutputBuffer()) {
tcOutBuff = cmdEncoder->getTempMTLBuffer(outControlPointCount * patchCount * _instanceCount * 4 * cmdEncoder->_pDeviceProperties->limits.maxTessellationControlPerVertexOutputComponents);
[mtlTessCtlEncoder setBuffer: tcOutBuff->_mtlBuffer
offset: tcOutBuff->_offset
atIndex: pipeline->getOutputBufferIndex().stages[kMVKShaderStageTessCtl]];
}
if (pipeline->needsTessCtlPatchOutputBuffer()) {
tcPatchOutBuff = cmdEncoder->getTempMTLBuffer(_instanceCount * patchCount * 4 * cmdEncoder->_pDeviceProperties->limits.maxTessellationControlPerPatchOutputComponents);
[mtlTessCtlEncoder setBuffer: tcPatchOutBuff->_mtlBuffer
offset: tcPatchOutBuff->_offset
atIndex: pipeline->getTessCtlPatchOutputBufferIndex()];
}
tcLevelBuff = cmdEncoder->getTempMTLBuffer(_instanceCount * patchCount * sizeof(MTLQuadTessellationFactorsHalf));
[mtlTessCtlEncoder setBuffer: tcLevelBuff->_mtlBuffer
offset: tcLevelBuff->_offset
atIndex: pipeline->getTessCtlLevelBufferIndex()];
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&inControlPointCount,
sizeof(inControlPointCount),
pipeline->getIndirectParamsIndex().stages[kMVKShaderStageTessCtl]);
if (pipeline->needsVertexOutputBuffer()) {
[mtlTessCtlEncoder setBuffer: vtxOutBuff->_mtlBuffer
offset: vtxOutBuff->_offset
atIndex: kMVKTessCtlInputBufferIndex];
[mtlTessCtlEncoder setStageInRegion: MTLRegionMake1D(0, _instanceCount * std::max(_vertexCount, outControlPointCount * patchCount))];
}
if (outControlPointCount > inControlPointCount) {
// In this case, we use an index buffer to avoid stepping over some of the input points.
const MVKMTLBufferAllocation* tcIndexBuff = cmdEncoder->getTempMTLBuffer(_instanceCount * patchCount * outControlPointCount * 4);
auto* indices = (uint32_t*)tcIndexBuff->getContents();
uint32_t index = 0;
for (uint32_t i = 0; i < outControlPointCount * patchCount; i++) {
if ((i % outControlPointCount) < inControlPointCount) {
indices[i] = index++;
} else {
indices[i] = 0;
}
}
[mtlTessCtlEncoder setBuffer: tcIndexBuff->_mtlBuffer
offset: tcIndexBuff->_offset
atIndex: kMVKTessCtlIndexBufferIndex];
}
[mtlTessCtlEncoder dispatchThreadgroups: MTLSizeMake(_instanceCount * patchCount, 1, 1)
threadsPerThreadgroup: MTLSizeMake(std::max(inControlPointCount, outControlPointCount), 1, 1)];
// Running this stage prematurely ended the render pass, so we have to start it up again.
// TODO: On iOS, maybe we could use a tile shader to avoid this.
cmdEncoder->beginMetalRenderPass();
break;
case kMVKGraphicsStageRasterization:
if (pipeline->isTessellationPipeline()) {
if (pipeline->needsTessCtlOutputBuffer()) {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcOutBuff->_mtlBuffer
offset: tcOutBuff->_offset
atIndex: kMVKTessEvalInputBufferIndex];
}
if (pipeline->needsTessCtlPatchOutputBuffer()) {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcPatchOutBuff->_mtlBuffer
offset: tcPatchOutBuff->_offset
atIndex: kMVKTessEvalPatchInputBufferIndex];
}
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcLevelBuff->_mtlBuffer
offset: tcLevelBuff->_offset
atIndex: kMVKTessEvalLevelBufferIndex];
[cmdEncoder->_mtlRenderEncoder setTessellationFactorBuffer: tcLevelBuff->_mtlBuffer
offset: tcLevelBuff->_offset
instanceStride: 0];
[cmdEncoder->_mtlRenderEncoder drawPatches: outControlPointCount
patchStart: 0
patchCount: _instanceCount * patchCount
patchIndexBuffer: nil
patchIndexBufferOffset: 0
instanceCount: 1
baseInstance: 0];
// Mark pipeline, resources, and tess control push constants as dirty
// so I apply them during the next stage.
cmdEncoder->_graphicsPipelineState.beginMetalRenderPass();
cmdEncoder->_graphicsResourcesState.beginMetalRenderPass();
cmdEncoder->getPushConstants(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)->beginMetalRenderPass();
} else {
if (cmdEncoder->_pDeviceMetalFeatures->baseVertexInstanceDrawing) {
[cmdEncoder->_mtlRenderEncoder drawPrimitives: cmdEncoder->_mtlPrimitiveType
vertexStart: _firstVertex
vertexCount: _vertexCount
instanceCount: _instanceCount
baseInstance: _firstInstance];
} else {
[cmdEncoder->_mtlRenderEncoder drawPrimitives: cmdEncoder->_mtlPrimitiveType
vertexStart: _firstVertex
vertexCount: _vertexCount
instanceCount: _instanceCount];
}
}
break;
}
}
}
MVKCmdDraw::MVKCmdDraw(MVKCommandTypePool<MVKCmdDraw>* pool)
@ -140,29 +276,199 @@ void MVKCmdDrawIndexed::setContent(uint32_t indexCount,
void MVKCmdDrawIndexed::encode(MVKCommandEncoder* cmdEncoder) {
cmdEncoder->finalizeDrawState(); // Ensure all updated state has been submitted to Metal
auto* pipeline = (MVKGraphicsPipeline*)cmdEncoder->_graphicsPipelineState.getPipeline();
MVKVectorInline<uint32_t, 4> stages;
pipeline->getStages(stages);
MVKIndexMTLBufferBinding& ibb = cmdEncoder->_graphicsResourcesState._mtlIndexBufferBinding;
size_t idxSize = mvkMTLIndexTypeSizeInBytes(ibb.mtlIndexType);
VkDeviceSize idxBuffOffset = ibb.offset + (_firstIndex * idxSize);
size_t idxSize = mvkMTLIndexTypeSizeInBytes(ibb.mtlIndexType);
VkDeviceSize idxBuffOffset = ibb.offset + (_firstIndex * idxSize);
if (cmdEncoder->_pDeviceMetalFeatures->baseVertexInstanceDrawing) {
[cmdEncoder->_mtlRenderEncoder drawIndexedPrimitives: cmdEncoder->_mtlPrimitiveType
indexCount: _indexCount
indexType: ibb.mtlIndexType
indexBuffer: ibb.mtlBuffer
indexBufferOffset: idxBuffOffset
instanceCount: _instanceCount
baseVertex: _vertexOffset
baseInstance: _firstInstance];
} else {
[cmdEncoder->_mtlRenderEncoder drawIndexedPrimitives: cmdEncoder->_mtlPrimitiveType
indexCount: _indexCount
indexType: ibb.mtlIndexType
indexBuffer: ibb.mtlBuffer
indexBufferOffset: idxBuffOffset
instanceCount: _instanceCount];
}
const MVKMTLBufferAllocation* vtxOutBuff = nullptr;
const MVKMTLBufferAllocation* tcOutBuff = nullptr;
const MVKMTLBufferAllocation* tcPatchOutBuff = nullptr;
const MVKMTLBufferAllocation* tcLevelBuff = nullptr;
const MVKMTLBufferAllocation* tcIndexBuff = nullptr;
uint32_t patchCount = 0;
uint32_t inControlPointCount = 0, outControlPointCount = 0;
if (pipeline->isTessellationPipeline()) {
inControlPointCount = pipeline->getInputControlPointCount();
outControlPointCount = pipeline->getOutputControlPointCount();
patchCount = (uint32_t)mvkCeilingDivide(_indexCount * _instanceCount, inControlPointCount);
}
for (uint32_t s : stages) {
auto stage = MVKGraphicsStage(s);
id<MTLComputeCommandEncoder> mtlTessCtlEncoder = nil;
if (stage == kMVKGraphicsStageTessControl && (outControlPointCount > inControlPointCount || _instanceCount > 1)) {
// We need make a copy of the old index buffer so we can insert gaps where
// there are more output points than input points, and also to add more indices
// to handle instancing. Do it now, before finalizing draw state, or the
// pipeline will get overridden.
// Yeah, this sucks. But there aren't many good ways for dealing with this issue.
mtlTessCtlEncoder = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationControl);
tcIndexBuff = cmdEncoder->getTempMTLBuffer(_instanceCount * patchCount * outControlPointCount * idxSize);
id<MTLComputePipelineState> mtlCopyIndexState = getCommandEncodingPool()->getCmdDrawIndexedCopyIndexBufferMTLComputePipelineState(ibb.mtlIndexType);
[mtlTessCtlEncoder setComputePipelineState: mtlCopyIndexState];
[mtlTessCtlEncoder setBuffer: ibb.mtlBuffer
offset: ibb.offset
atIndex: 0];
[mtlTessCtlEncoder setBuffer: tcIndexBuff->_mtlBuffer
offset: tcIndexBuff->_offset
atIndex: 1];
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&inControlPointCount,
sizeof(inControlPointCount),
2);
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&outControlPointCount,
sizeof(outControlPointCount),
3);
MTLDrawIndexedPrimitivesIndirectArguments params;
params.indexCount = _indexCount;
params.instanceCount = _instanceCount;
params.indexStart = _firstIndex;
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&params,
sizeof(params),
4);
[mtlTessCtlEncoder dispatchThreadgroups: MTLSizeMake(1, 1, 1) threadsPerThreadgroup: MTLSizeMake(1, 1, 1)];
}
cmdEncoder->finalizeDrawState(stage); // Ensure all updated state has been submitted to Metal
switch (stage) {
case kMVKGraphicsStageVertex:
if (pipeline->needsVertexOutputBuffer()) {
vtxOutBuff = cmdEncoder->getTempMTLBuffer(_indexCount * _instanceCount * 4 * cmdEncoder->_pDeviceProperties->limits.maxVertexOutputComponents);
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: vtxOutBuff->_mtlBuffer
offset: vtxOutBuff->_offset
atIndex: pipeline->getOutputBufferIndex().stages[kMVKShaderStageVertex]];
// The shader only needs the number of vertices, so that's all we'll give it.
cmdEncoder->setVertexBytes(cmdEncoder->_mtlRenderEncoder,
&_indexCount,
sizeof(_indexCount),
pipeline->getIndirectParamsIndex().stages[kMVKShaderStageVertex]);
}
if (cmdEncoder->_pDeviceMetalFeatures->baseVertexInstanceDrawing) {
[cmdEncoder->_mtlRenderEncoder drawIndexedPrimitives: MTLPrimitiveTypePoint
indexCount: _indexCount
indexType: ibb.mtlIndexType
indexBuffer: ibb.mtlBuffer
indexBufferOffset: idxBuffOffset
instanceCount: _instanceCount
baseVertex: _vertexOffset
baseInstance: _firstInstance];
} else {
[cmdEncoder->_mtlRenderEncoder drawIndexedPrimitives: MTLPrimitiveTypePoint
indexCount: _indexCount
indexType: ibb.mtlIndexType
indexBuffer: ibb.mtlBuffer
indexBufferOffset: idxBuffOffset
instanceCount: _instanceCount];
}
// Mark pipeline, resources, and tess control push constants as dirty
// so I apply them during the next stage.
cmdEncoder->_graphicsPipelineState.beginMetalRenderPass();
cmdEncoder->_graphicsResourcesState.beginMetalRenderPass();
cmdEncoder->getPushConstants(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)->beginMetalRenderPass();
break;
case kMVKGraphicsStageTessControl:
mtlTessCtlEncoder = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationControl);
if (pipeline->needsTessCtlOutputBuffer()) {
tcOutBuff = cmdEncoder->getTempMTLBuffer(outControlPointCount * patchCount * _instanceCount * 4 * cmdEncoder->_pDeviceProperties->limits.maxTessellationControlPerVertexOutputComponents);
[mtlTessCtlEncoder setBuffer: tcOutBuff->_mtlBuffer
offset: tcOutBuff->_offset
atIndex: pipeline->getOutputBufferIndex().stages[kMVKShaderStageTessCtl]];
}
if (pipeline->needsTessCtlPatchOutputBuffer()) {
tcPatchOutBuff = cmdEncoder->getTempMTLBuffer(_instanceCount * patchCount * 4 * cmdEncoder->_pDeviceProperties->limits.maxTessellationControlPerPatchOutputComponents);
[mtlTessCtlEncoder setBuffer: tcPatchOutBuff->_mtlBuffer
offset: tcPatchOutBuff->_offset
atIndex: pipeline->getTessCtlPatchOutputBufferIndex()];
}
tcLevelBuff = cmdEncoder->getTempMTLBuffer(_instanceCount * patchCount * sizeof(MTLQuadTessellationFactorsHalf));
[mtlTessCtlEncoder setBuffer: tcLevelBuff->_mtlBuffer
offset: tcLevelBuff->_offset
atIndex: pipeline->getTessCtlLevelBufferIndex()];
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&inControlPointCount,
sizeof(inControlPointCount),
pipeline->getIndirectParamsIndex().stages[kMVKShaderStageTessCtl]);
if (pipeline->needsVertexOutputBuffer()) {
[mtlTessCtlEncoder setBuffer: vtxOutBuff->_mtlBuffer
offset: vtxOutBuff->_offset
atIndex: kMVKTessCtlInputBufferIndex];
[mtlTessCtlEncoder setStageInRegion: MTLRegionMake1D(0, _instanceCount * std::max(_indexCount, outControlPointCount * patchCount))];
}
if (outControlPointCount > inControlPointCount || _instanceCount > 1) {
[mtlTessCtlEncoder setBuffer: tcIndexBuff->_mtlBuffer
offset: tcIndexBuff->_offset
atIndex: kMVKTessCtlIndexBufferIndex];
} else {
[mtlTessCtlEncoder setBuffer: ibb.mtlBuffer
offset: idxBuffOffset
atIndex: kMVKTessCtlIndexBufferIndex];
}
[mtlTessCtlEncoder dispatchThreadgroups: MTLSizeMake(_instanceCount * patchCount, 1, 1)
threadsPerThreadgroup: MTLSizeMake(std::max(inControlPointCount, outControlPointCount), 1, 1)];
// Running this stage prematurely ended the render pass, so we have to start it up again.
// TODO: On iOS, maybe we could use a tile shader to avoid this.
cmdEncoder->beginMetalRenderPass();
break;
case kMVKGraphicsStageRasterization:
if (pipeline->isTessellationPipeline()) {
if (pipeline->needsTessCtlOutputBuffer()) {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcOutBuff->_mtlBuffer
offset: tcOutBuff->_offset
atIndex: kMVKTessEvalInputBufferIndex];
}
if (pipeline->needsTessCtlPatchOutputBuffer()) {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcPatchOutBuff->_mtlBuffer
offset: tcPatchOutBuff->_offset
atIndex: kMVKTessEvalPatchInputBufferIndex];
}
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcLevelBuff->_mtlBuffer
offset: tcLevelBuff->_offset
atIndex: kMVKTessEvalLevelBufferIndex];
[cmdEncoder->_mtlRenderEncoder setTessellationFactorBuffer: tcLevelBuff->_mtlBuffer
offset: tcLevelBuff->_offset
instanceStride: 0];
// The tessellation control shader produced output in the correct order, so there's no need to use
// an index buffer here.
[cmdEncoder->_mtlRenderEncoder drawPatches: outControlPointCount
patchStart: 0
patchCount: _instanceCount * patchCount
patchIndexBuffer: nil
patchIndexBufferOffset: 0
instanceCount: 1
baseInstance: 0];
// Mark pipeline, resources, and tess control push constants as dirty
// so I apply them during the next stage.
cmdEncoder->_graphicsPipelineState.beginMetalRenderPass();
cmdEncoder->_graphicsResourcesState.beginMetalRenderPass();
cmdEncoder->getPushConstants(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)->beginMetalRenderPass();
} else {
if (cmdEncoder->_pDeviceMetalFeatures->baseVertexInstanceDrawing) {
[cmdEncoder->_mtlRenderEncoder drawIndexedPrimitives: cmdEncoder->_mtlPrimitiveType
indexCount: _indexCount
indexType: ibb.mtlIndexType
indexBuffer: ibb.mtlBuffer
indexBufferOffset: idxBuffOffset
instanceCount: _instanceCount
baseVertex: _vertexOffset
baseInstance: _firstInstance];
} else {
[cmdEncoder->_mtlRenderEncoder drawIndexedPrimitives: cmdEncoder->_mtlPrimitiveType
indexCount: _indexCount
indexType: ibb.mtlIndexType
indexBuffer: ibb.mtlBuffer
indexBufferOffset: idxBuffOffset
instanceCount: _instanceCount];
}
}
break;
}
}
}
MVKCmdDrawIndexed::MVKCmdDrawIndexed(MVKCommandTypePool<MVKCmdDrawIndexed>* pool)
@ -189,17 +495,215 @@ void MVKCmdDrawIndirect::setContent(VkBuffer buffer,
}
}
// This is totally arbitrary, but we're forced to do this because we don't know how many vertices
// there are at encoding time. And this will probably be inadequate for large instanced draws.
// TODO: Consider breaking up such draws using different base instance values. But this will
// require yet more munging of the indirect buffers...
static const uint32_t kMVKDrawIndirectVertexCountUpperBound = 131072;
void MVKCmdDrawIndirect::encode(MVKCommandEncoder* cmdEncoder) {
cmdEncoder->finalizeDrawState(); // Ensure all updated state has been submitted to Metal
VkDeviceSize mtlIndBuffOfst = _mtlIndirectBufferOffset;
for (uint32_t drawIdx = 0; drawIdx < _drawCount; drawIdx++) {
[cmdEncoder->_mtlRenderEncoder drawPrimitives: cmdEncoder->_mtlPrimitiveType
indirectBuffer: _mtlIndirectBuffer
indirectBufferOffset: mtlIndBuffOfst];
mtlIndBuffOfst += _mtlIndirectBufferStride;
}
auto* pipeline = (MVKGraphicsPipeline*)cmdEncoder->_graphicsPipelineState.getPipeline();
// The indirect calls for dispatchThreadgroups:... and drawPatches:... have different formats.
// We have to convert from the drawPrimitives:... format to them.
// While we're at it, we can create the temporary output buffers once and reuse them
// for each draw.
const MVKMTLBufferAllocation* tcIndirectBuff = nullptr;
const MVKMTLBufferAllocation* vtxOutBuff = nullptr;
const MVKMTLBufferAllocation* tcOutBuff = nullptr;
const MVKMTLBufferAllocation* tcPatchOutBuff = nullptr;
const MVKMTLBufferAllocation* tcLevelBuff = nullptr;
const MVKMTLBufferAllocation* tcIndexBuff = nullptr;
uint32_t patchCount = 0, vertexCount = 0;
uint32_t inControlPointCount = 0, outControlPointCount = 0;
if (pipeline->isTessellationPipeline()) {
// We can't read the indirect buffer CPU-side, since it may change between
// encoding and execution. So we don't know how big to make the buffers.
// We must assume an arbitrarily large number of vertices may be submitted.
// But not too many, or we'll exhaust available VRAM.
inControlPointCount = pipeline->getInputControlPointCount();
outControlPointCount = pipeline->getOutputControlPointCount();
vertexCount = kMVKDrawIndirectVertexCountUpperBound;
patchCount = (uint32_t)mvkCeilingDivide(vertexCount, inControlPointCount);
VkDeviceSize indirectSize = (sizeof(MTLDispatchThreadgroupsIndirectArguments) + sizeof(MTLDrawPatchIndirectArguments)) * _drawCount;
if (cmdEncoder->_pDeviceMetalFeatures->mslVersion >= 20100) {
indirectSize += sizeof(MTLStageInRegionIndirectArguments) * _drawCount;
}
tcIndirectBuff = cmdEncoder->getTempMTLBuffer(indirectSize);
if (pipeline->needsVertexOutputBuffer()) {
vtxOutBuff = cmdEncoder->getTempMTLBuffer(vertexCount * 4 * cmdEncoder->_pDeviceProperties->limits.maxVertexOutputComponents);
}
if (pipeline->needsTessCtlOutputBuffer()) {
tcOutBuff = cmdEncoder->getTempMTLBuffer(outControlPointCount * patchCount * 4 * cmdEncoder->_pDeviceProperties->limits.maxTessellationControlPerVertexOutputComponents);
}
if (pipeline->needsTessCtlPatchOutputBuffer()) {
tcPatchOutBuff = cmdEncoder->getTempMTLBuffer(patchCount * 4 * cmdEncoder->_pDeviceProperties->limits.maxTessellationControlPerPatchOutputComponents);
}
tcLevelBuff = cmdEncoder->getTempMTLBuffer(patchCount * sizeof(MTLQuadTessellationFactorsHalf));
if (outControlPointCount > inControlPointCount) {
// In this case, we use an index buffer to avoid stepping over some of the input points.
tcIndexBuff = cmdEncoder->getTempMTLBuffer(patchCount * outControlPointCount * 4);
auto* indices = (uint32_t*)tcIndexBuff->getContents();
uint32_t index = 0;
for (uint32_t i = 0; i < tcIndexBuff->_length / 4; i++) {
if ((i % outControlPointCount) < inControlPointCount) {
indices[i] = index++;
} else {
indices[i] = 0;
}
}
}
}
MVKVectorInline<uint32_t, 4> stages;
pipeline->getStages(stages);
VkDeviceSize mtlIndBuffOfst = _mtlIndirectBufferOffset;
VkDeviceSize mtlTCIndBuffOfst = tcIndirectBuff ? tcIndirectBuff->_offset : 0;
for (uint32_t drawIdx = 0; drawIdx < _drawCount; drawIdx++) {
for (uint32_t s : stages) {
auto stage = MVKGraphicsStage(s);
id<MTLComputeCommandEncoder> mtlTessCtlEncoder = nil;
if (drawIdx == 0 && stage == kMVKGraphicsStageTessControl) {
// We need the indirect buffers now. This must be done before finalizing
// draw state, or the pipeline will get overridden. This is a good time
// to do it, since it will require switching to compute anyway. Do it all
// at once to get it over with.
mtlTessCtlEncoder = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationControl);
id<MTLComputePipelineState> mtlConvertState = getCommandEncodingPool()->getCmdDrawIndirectConvertBuffersMTLComputePipelineState(false);
[mtlTessCtlEncoder setComputePipelineState: mtlConvertState];
[mtlTessCtlEncoder setBuffer: _mtlIndirectBuffer
offset: _mtlIndirectBufferOffset
atIndex: 0];
[mtlTessCtlEncoder setBuffer: tcIndirectBuff->_mtlBuffer
offset: tcIndirectBuff->_offset
atIndex: 1];
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&_mtlIndirectBufferStride,
sizeof(_mtlIndirectBufferStride),
2);
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&inControlPointCount,
sizeof(inControlPointCount),
3);
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&outControlPointCount,
sizeof(inControlPointCount),
4);
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&_drawCount,
sizeof(_drawCount),
5);
[mtlTessCtlEncoder dispatchThreadgroups: MTLSizeMake(mvkCeilingDivide(_drawCount, mtlConvertState.threadExecutionWidth), 1, 1)
threadsPerThreadgroup: MTLSizeMake(mtlConvertState.threadExecutionWidth, 0, 0)];
}
cmdEncoder->finalizeDrawState(stage); // Ensure all updated state has been submitted to Metal
switch (stage) {
case kMVKGraphicsStageVertex:
if (pipeline->needsVertexOutputBuffer()) {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: vtxOutBuff->_mtlBuffer
offset: vtxOutBuff->_offset
atIndex: pipeline->getOutputBufferIndex().stages[kMVKShaderStageVertex]];
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: _mtlIndirectBuffer
offset: mtlIndBuffOfst
atIndex: pipeline->getIndirectParamsIndex().stages[kMVKShaderStageVertex]];
}
[cmdEncoder->_mtlRenderEncoder drawPrimitives: MTLPrimitiveTypePoint
indirectBuffer: _mtlIndirectBuffer
indirectBufferOffset: mtlIndBuffOfst];
mtlIndBuffOfst += _mtlIndirectBufferStride;
// Mark pipeline, resources, and tess control push constants as dirty
// so I apply them during the next stage.
cmdEncoder->_graphicsPipelineState.beginMetalRenderPass();
cmdEncoder->_graphicsResourcesState.beginMetalRenderPass();
cmdEncoder->getPushConstants(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)->beginMetalRenderPass();
break;
case kMVKGraphicsStageTessControl:
mtlTessCtlEncoder = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationControl);
if (pipeline->needsTessCtlOutputBuffer()) {
[mtlTessCtlEncoder setBuffer: tcOutBuff->_mtlBuffer
offset: tcOutBuff->_offset
atIndex: pipeline->getOutputBufferIndex().stages[kMVKShaderStageTessCtl]];
}
if (pipeline->needsTessCtlPatchOutputBuffer()) {
[mtlTessCtlEncoder setBuffer: tcPatchOutBuff->_mtlBuffer
offset: tcPatchOutBuff->_offset
atIndex: pipeline->getTessCtlPatchOutputBufferIndex()];
}
[mtlTessCtlEncoder setBuffer: tcLevelBuff->_mtlBuffer
offset: tcLevelBuff->_offset
atIndex: pipeline->getTessCtlLevelBufferIndex()];
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&inControlPointCount,
sizeof(inControlPointCount),
pipeline->getIndirectParamsIndex().stages[kMVKShaderStageTessCtl]);
if (pipeline->needsVertexOutputBuffer()) {
[mtlTessCtlEncoder setBuffer: vtxOutBuff->_mtlBuffer
offset: vtxOutBuff->_offset
atIndex: kMVKTessCtlInputBufferIndex];
if ([mtlTessCtlEncoder respondsToSelector: @selector(setStageInRegionWithIndirectBuffer:indirectBufferOffset:)]) {
[mtlTessCtlEncoder setStageInRegionWithIndirectBuffer: tcIndirectBuff->_mtlBuffer
indirectBufferOffset: mtlTCIndBuffOfst];
mtlTCIndBuffOfst += sizeof(MTLStageInRegionIndirectArguments);
} else {
// We must assume we can read up to the maximum number of vertices.
[mtlTessCtlEncoder setStageInRegion: MTLRegionMake1D(0, std::max(inControlPointCount, outControlPointCount) * patchCount)];
}
}
if (outControlPointCount > inControlPointCount) {
[mtlTessCtlEncoder setBuffer: tcIndexBuff->_mtlBuffer
offset: tcIndexBuff->_offset
atIndex: kMVKTessCtlIndexBufferIndex];
}
[mtlTessCtlEncoder dispatchThreadgroupsWithIndirectBuffer: tcIndirectBuff->_mtlBuffer
indirectBufferOffset: mtlTCIndBuffOfst
threadsPerThreadgroup: MTLSizeMake(std::max(inControlPointCount, outControlPointCount), 1, 1)];
mtlTCIndBuffOfst += sizeof(MTLDispatchThreadgroupsIndirectArguments);
// Running this stage prematurely ended the render pass, so we have to start it up again.
// TODO: On iOS, maybe we could use a tile shader to avoid this.
cmdEncoder->beginMetalRenderPass();
break;
case kMVKGraphicsStageRasterization:
if (pipeline->isTessellationPipeline()) {
if (pipeline->needsTessCtlOutputBuffer()) {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcOutBuff->_mtlBuffer
offset: tcOutBuff->_offset
atIndex: kMVKTessEvalInputBufferIndex];
}
if (pipeline->needsTessCtlPatchOutputBuffer()) {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcPatchOutBuff->_mtlBuffer
offset: tcPatchOutBuff->_offset
atIndex: kMVKTessEvalPatchInputBufferIndex];
}
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcLevelBuff->_mtlBuffer
offset: tcLevelBuff->_offset
atIndex: kMVKTessEvalLevelBufferIndex];
[cmdEncoder->_mtlRenderEncoder setTessellationFactorBuffer: tcLevelBuff->_mtlBuffer
offset: tcLevelBuff->_offset
instanceStride: 0];
[cmdEncoder->_mtlRenderEncoder drawPatches: outControlPointCount
patchIndexBuffer: nil
patchIndexBufferOffset: 0
indirectBuffer: tcIndirectBuff->_mtlBuffer
indirectBufferOffset: mtlTCIndBuffOfst];
mtlTCIndBuffOfst += sizeof(MTLDrawPatchIndirectArguments);
// Mark pipeline, resources, and tess control push constants as dirty
// so I apply them during the next stage.
cmdEncoder->_graphicsPipelineState.beginMetalRenderPass();
cmdEncoder->_graphicsResourcesState.beginMetalRenderPass();
cmdEncoder->getPushConstants(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)->beginMetalRenderPass();
} else {
[cmdEncoder->_mtlRenderEncoder drawPrimitives: cmdEncoder->_mtlPrimitiveType
indirectBuffer: _mtlIndirectBuffer
indirectBufferOffset: mtlIndBuffOfst];
mtlIndBuffOfst += _mtlIndirectBufferStride;
}
break;
}
}
}
}
MVKCmdDrawIndirect::MVKCmdDrawIndirect(MVKCommandTypePool<MVKCmdDrawIndirect>* pool)
@ -228,20 +732,227 @@ void MVKCmdDrawIndexedIndirect::setContent(VkBuffer buffer,
void MVKCmdDrawIndexedIndirect::encode(MVKCommandEncoder* cmdEncoder) {
cmdEncoder->finalizeDrawState(); // Ensure all updated state has been submitted to Metal
MVKIndexMTLBufferBinding& ibb = cmdEncoder->_graphicsResourcesState._mtlIndexBufferBinding;
size_t idxSize = mvkMTLIndexTypeSizeInBytes(ibb.mtlIndexType);
auto* pipeline = (MVKGraphicsPipeline*)cmdEncoder->_graphicsPipelineState.getPipeline();
// The indirect calls for dispatchThreadgroups:... and drawPatches:... have different formats.
// We have to convert from the drawIndexedPrimitives:... format to them.
// While we're at it, we can create the temporary output buffers once and reuse them
// for each draw.
const MVKMTLBufferAllocation* tcIndirectBuff = nullptr;
const MVKMTLBufferAllocation* vtxOutBuff = nullptr;
const MVKMTLBufferAllocation* tcOutBuff = nullptr;
const MVKMTLBufferAllocation* tcPatchOutBuff = nullptr;
const MVKMTLBufferAllocation* tcLevelBuff = nullptr;
const MVKMTLBufferAllocation* tcIndexBuff = nullptr;
uint32_t patchCount = 0, vertexCount = 0;
uint32_t inControlPointCount = 0, outControlPointCount = 0;
if (pipeline->isTessellationPipeline()) {
// We can't read the indirect buffer CPU-side, since it may change between
// encoding and execution. So we don't know how big to make the buffers.
// We must assume an arbitrarily large number of vertices may be submitted.
// But not too many, or we'll exhaust available VRAM.
inControlPointCount = pipeline->getInputControlPointCount();
outControlPointCount = pipeline->getOutputControlPointCount();
vertexCount = kMVKDrawIndirectVertexCountUpperBound;
patchCount = (uint32_t)mvkCeilingDivide(vertexCount, inControlPointCount);
VkDeviceSize indirectSize = (sizeof(MTLDispatchThreadgroupsIndirectArguments) + sizeof(MTLDrawPatchIndirectArguments)) * _drawCount;
if (cmdEncoder->_pDeviceMetalFeatures->mslVersion >= 20100) {
indirectSize += sizeof(MTLStageInRegionIndirectArguments) * _drawCount;
}
tcIndirectBuff = cmdEncoder->getTempMTLBuffer(indirectSize);
if (pipeline->needsVertexOutputBuffer()) {
vtxOutBuff = cmdEncoder->getTempMTLBuffer(vertexCount * 4 * cmdEncoder->_pDeviceProperties->limits.maxVertexOutputComponents);
}
if (pipeline->needsTessCtlOutputBuffer()) {
tcOutBuff = cmdEncoder->getTempMTLBuffer(outControlPointCount * patchCount * 4 * cmdEncoder->_pDeviceProperties->limits.maxTessellationControlPerVertexOutputComponents);
}
if (pipeline->needsTessCtlPatchOutputBuffer()) {
tcPatchOutBuff = cmdEncoder->getTempMTLBuffer(patchCount * 4 * cmdEncoder->_pDeviceProperties->limits.maxTessellationControlPerPatchOutputComponents);
}
tcLevelBuff = cmdEncoder->getTempMTLBuffer(patchCount * sizeof(MTLQuadTessellationFactorsHalf));
tcIndexBuff = cmdEncoder->getTempMTLBuffer(patchCount * outControlPointCount * idxSize);
}
MVKVectorInline<uint32_t, 4> stages;
pipeline->getStages(stages);
VkDeviceSize mtlIndBuffOfst = _mtlIndirectBufferOffset;
for (uint32_t drawIdx = 0; drawIdx < _drawCount; drawIdx++) {
[cmdEncoder->_mtlRenderEncoder drawIndexedPrimitives: cmdEncoder->_mtlPrimitiveType
indexType: ibb.mtlIndexType
indexBuffer: ibb.mtlBuffer
indexBufferOffset: ibb.offset
indirectBuffer: _mtlIndirectBuffer
indirectBufferOffset: mtlIndBuffOfst];
mtlIndBuffOfst += _mtlIndirectBufferStride;
}
VkDeviceSize mtlTCIndBuffOfst = tcIndirectBuff ? tcIndirectBuff->_offset : 0;
for (uint32_t drawIdx = 0; drawIdx < _drawCount; drawIdx++) {
for (uint32_t s : stages) {
auto stage = MVKGraphicsStage(s);
id<MTLComputeCommandEncoder> mtlTessCtlEncoder = nil;
if (stage == kMVKGraphicsStageTessControl) {
mtlTessCtlEncoder = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationControl);
// We need the indirect buffers now. This must be done before finalizing
// draw state, or the pipeline will get overridden. This is a good time
// to do it, since it will require switching to compute anyway. Do it all
// at once to get it over with.
if (drawIdx == 0) {
id<MTLComputePipelineState> mtlConvertState = getCommandEncodingPool()->getCmdDrawIndirectConvertBuffersMTLComputePipelineState(true);
[mtlTessCtlEncoder setComputePipelineState: mtlConvertState];
[mtlTessCtlEncoder setBuffer: _mtlIndirectBuffer
offset: _mtlIndirectBufferOffset
atIndex: 0];
[mtlTessCtlEncoder setBuffer: tcIndirectBuff->_mtlBuffer
offset: tcIndirectBuff->_offset
atIndex: 1];
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&_mtlIndirectBufferStride,
sizeof(_mtlIndirectBufferStride),
2);
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&inControlPointCount,
sizeof(inControlPointCount),
3);
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&outControlPointCount,
sizeof(inControlPointCount),
4);
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&_drawCount,
sizeof(_drawCount),
5);
[mtlTessCtlEncoder dispatchThreadgroups: MTLSizeMake(mvkCeilingDivide(_drawCount, mtlConvertState.threadExecutionWidth), 1, 1)
threadsPerThreadgroup: MTLSizeMake(mtlConvertState.threadExecutionWidth, 1, 1)];
}
// We actually need to make a copy of the index buffer, regardless of whether
// or not there are gaps in it, because there's no way to tell Metal to
// offset an index buffer from a value in an indirect buffer. This also
// means that, to make a copy, we have to use a compute shader.
id<MTLComputePipelineState> mtlCopyIndexState = getCommandEncodingPool()->getCmdDrawIndexedCopyIndexBufferMTLComputePipelineState(ibb.mtlIndexType);
[mtlTessCtlEncoder setComputePipelineState: mtlCopyIndexState];
[mtlTessCtlEncoder setBuffer: ibb.mtlBuffer
offset: ibb.offset
atIndex: 0];
[mtlTessCtlEncoder setBuffer: tcIndexBuff->_mtlBuffer
offset: tcIndexBuff->_offset
atIndex: 1];
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&inControlPointCount,
sizeof(inControlPointCount),
2);
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&outControlPointCount,
sizeof(outControlPointCount),
3);
[mtlTessCtlEncoder setBuffer: tcIndirectBuff->_mtlBuffer
offset: mtlTCIndBuffOfst
atIndex: 4];
[mtlTessCtlEncoder dispatchThreadgroups: MTLSizeMake(1, 1, 1) threadsPerThreadgroup: MTLSizeMake(1, 1, 1)];
}
cmdEncoder->finalizeDrawState(stage); // Ensure all updated state has been submitted to Metal
switch (stage) {
case kMVKGraphicsStageVertex:
if (pipeline->needsVertexOutputBuffer()) {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: vtxOutBuff->_mtlBuffer
offset: vtxOutBuff->_offset
atIndex: pipeline->getOutputBufferIndex().stages[kMVKShaderStageVertex]];
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: _mtlIndirectBuffer
offset: mtlIndBuffOfst
atIndex: pipeline->getIndirectParamsIndex().stages[kMVKShaderStageVertex]];
}
[cmdEncoder->_mtlRenderEncoder drawIndexedPrimitives: MTLPrimitiveTypePoint
indexType: ibb.mtlIndexType
indexBuffer: ibb.mtlBuffer
indexBufferOffset: ibb.offset
indirectBuffer: _mtlIndirectBuffer
indirectBufferOffset: mtlIndBuffOfst];
mtlIndBuffOfst += _mtlIndirectBufferStride;
// Mark pipeline, resources, and tess control push constants as dirty
// so I apply them during the next stage.
cmdEncoder->_graphicsPipelineState.beginMetalRenderPass();
cmdEncoder->_graphicsResourcesState.beginMetalRenderPass();
cmdEncoder->getPushConstants(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)->beginMetalRenderPass();
break;
case kMVKGraphicsStageTessControl:
mtlTessCtlEncoder = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationControl);
if (pipeline->needsTessCtlOutputBuffer()) {
[mtlTessCtlEncoder setBuffer: tcOutBuff->_mtlBuffer
offset: tcOutBuff->_offset
atIndex: pipeline->getOutputBufferIndex().stages[kMVKShaderStageTessCtl]];
}
if (pipeline->needsTessCtlPatchOutputBuffer()) {
[mtlTessCtlEncoder setBuffer: tcPatchOutBuff->_mtlBuffer
offset: tcPatchOutBuff->_offset
atIndex: pipeline->getTessCtlPatchOutputBufferIndex()];
}
[mtlTessCtlEncoder setBuffer: tcLevelBuff->_mtlBuffer
offset: tcLevelBuff->_offset
atIndex: pipeline->getTessCtlLevelBufferIndex()];
cmdEncoder->setComputeBytes(mtlTessCtlEncoder,
&inControlPointCount,
sizeof(inControlPointCount),
pipeline->getIndirectParamsIndex().stages[kMVKShaderStageTessCtl]);
if (pipeline->needsVertexOutputBuffer()) {
[mtlTessCtlEncoder setBuffer: vtxOutBuff->_mtlBuffer
offset: vtxOutBuff->_offset
atIndex: kMVKTessCtlInputBufferIndex];
if ([mtlTessCtlEncoder respondsToSelector: @selector(setStageInRegionWithIndirectBuffer:indirectBufferOffset:)]) {
[mtlTessCtlEncoder setStageInRegionWithIndirectBuffer: tcIndirectBuff->_mtlBuffer
indirectBufferOffset: mtlTCIndBuffOfst];
mtlTCIndBuffOfst += sizeof(MTLStageInRegionIndirectArguments);
} else {
// We must assume we can read up to the maximum number of vertices.
[mtlTessCtlEncoder setStageInRegion: MTLRegionMake1D(0, std::max(inControlPointCount, outControlPointCount) * patchCount)];
}
}
[mtlTessCtlEncoder setBuffer: tcIndexBuff->_mtlBuffer
offset: tcIndexBuff->_offset
atIndex: kMVKTessCtlIndexBufferIndex];
[mtlTessCtlEncoder dispatchThreadgroupsWithIndirectBuffer: tcIndirectBuff->_mtlBuffer
indirectBufferOffset: mtlTCIndBuffOfst
threadsPerThreadgroup: MTLSizeMake(std::max(inControlPointCount, outControlPointCount), 1, 1)];
mtlTCIndBuffOfst += sizeof(MTLDispatchThreadgroupsIndirectArguments);
// Running this stage prematurely ended the render pass, so we have to start it up again.
// TODO: On iOS, maybe we could use a tile shader to avoid this.
cmdEncoder->beginMetalRenderPass();
break;
case kMVKGraphicsStageRasterization:
if (pipeline->isTessellationPipeline()) {
if (pipeline->needsTessCtlOutputBuffer()) {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcOutBuff->_mtlBuffer
offset: tcOutBuff->_offset
atIndex: kMVKTessEvalInputBufferIndex];
}
if (pipeline->needsTessCtlPatchOutputBuffer()) {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcPatchOutBuff->_mtlBuffer
offset: tcPatchOutBuff->_offset
atIndex: kMVKTessEvalPatchInputBufferIndex];
}
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcLevelBuff->_mtlBuffer
offset: tcLevelBuff->_offset
atIndex: kMVKTessEvalLevelBufferIndex];
[cmdEncoder->_mtlRenderEncoder setTessellationFactorBuffer: tcLevelBuff->_mtlBuffer
offset: tcLevelBuff->_offset
instanceStride: 0];
[cmdEncoder->_mtlRenderEncoder drawPatches: outControlPointCount
patchIndexBuffer: nil
patchIndexBufferOffset: 0
indirectBuffer: tcIndirectBuff->_mtlBuffer
indirectBufferOffset: mtlTCIndBuffOfst];
mtlTCIndBuffOfst += sizeof(MTLDrawPatchIndirectArguments);
// Mark pipeline, resources, and tess control push constants as dirty
// so I apply them during the next stage.
cmdEncoder->_graphicsPipelineState.beginMetalRenderPass();
cmdEncoder->_graphicsResourcesState.beginMetalRenderPass();
cmdEncoder->getPushConstants(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)->beginMetalRenderPass();
} else {
[cmdEncoder->_mtlRenderEncoder drawIndexedPrimitives: cmdEncoder->_mtlPrimitiveType
indexType: ibb.mtlIndexType
indexBuffer: ibb.mtlBuffer
indexBufferOffset: ibb.offset
indirectBuffer: _mtlIndirectBuffer
indirectBufferOffset: mtlIndBuffOfst];
mtlIndBuffOfst += _mtlIndirectBufferStride;
}
break;
}
}
}
}
MVKCmdDrawIndexedIndirect::MVKCmdDrawIndexedIndirect(MVKCommandTypePool<MVKCmdDrawIndexedIndirect>* pool)

View File

@ -155,7 +155,7 @@ protected:
// + vkCmdBindDescriptorSets() : _graphicsResourcesState & _computeResourcesState
// + vkCmdBindVertexBuffers() : _graphicsResourcesState
// + vkCmdBindIndexBuffer() : _graphicsResourcesState
// + vkCmdPushConstants() : _vertexPushConstants & _fragmentPushConstants & _computePushConstants
// + vkCmdPushConstants() : _vertexPushConstants & _tessCtlPushConstants & _tessEvalPushConstants & _fragmentPushConstants & _computePushConstants
// + vkCmdSetViewport() : _viewportState
// + vkCmdSetDepthBias() : _depthBiasState
// + vkCmdSetScissor() : _scissorState
@ -186,9 +186,9 @@ protected:
// + setTriangleFillMode : _graphicsPipelineState
// + setViewport : _viewportState
// + setVisibilityResultMode : _occlusionQueryState
// + setVertexBuffer : _graphicsResourcesState & _vertexPushConstants
// + setVertexBuffer : _graphicsResourcesState & _vertexPushConstants & _tessEvalPushConstants
// + setVertexBuffers (unused) : _graphicsResourcesState
// + setVertexBytes : _vertexPushConstants
// + setVertexBytes : _vertexPushConstants & _tessEvalPushConstants
// + setVertexBufferOffset (unused) : _graphicsResourcesState
// + setVertexTexture : _graphicsResourcesState
// + setVertexTextures (unused) : _graphicsResourcesState
@ -204,15 +204,15 @@ protected:
// + setFragmentSamplerStates : (unused) : _graphicsResourcesState
// The above list of Vulkan commands covers the following corresponding MTLComputeCommandEncoder state:
// + setComputePipelineState : _computePipelineState
// + setBuffer : _computeResourcesState & _computePushConstants
// + setBuffers (unused) : _computeResourcesState
// + setBytes : _computePushConstants
// + setBufferOffset (unused) : _computeResourcesState
// + setTexture : _computeResourcesState
// + setTextures (unused) : _computeResourcesState
// + setSamplerState : _computeResourcesState
// + setSamplerStates : (unused) : _computeResourcesState
// + setComputePipelineState : _computePipelineState & _graphicsPipelineState
// + setBuffer : _computeResourcesState & _computePushConstants & _graphicsResourcesState & _tessCtlPushConstants
// + setBuffers (unused) : _computeResourcesState & _graphicsResourcesState
// + setBytes : _computePushConstants & _tessCtlPushConstants
// + setBufferOffset (unused) : _computeResourcesState & _graphicsResourcesState
// + setTexture : _computeResourcesState & _graphicsResourcesState
// + setTextures (unused) : _computeResourcesState & _graphicsResourcesState
// + setSamplerState : _computeResourcesState & _graphicsResourcesState
// + setSamplerStates : (unused) : _computeResourcesState & _graphicsResourcesState
/*** Holds a collection of active queries for each query pool. */
@ -245,6 +245,9 @@ public:
/** Begins the next render subpass. */
void beginNextSubpass(VkSubpassContents renderpassContents);
/** Begins a Metal render pass for the current render subpass. */
void beginMetalRenderPass();
/** Returns the render subpass that is currently active. */
MVKRenderSubpass* getSubpass();
@ -261,7 +264,7 @@ public:
MTLScissorRect clipToRenderArea(MTLScissorRect mtlScissor);
/** Called by each graphics draw command to establish any outstanding state just prior to performing the draw. */
void finalizeDrawState();
void finalizeDrawState(MVKGraphicsStage stage);
/** Called by each compute dispatch command to establish any outstanding state just prior to performing the dispatch. */
void finalizeDispatchState();
@ -307,6 +310,9 @@ public:
/** Copy bytes into the Metal encoder at a Metal compute buffer index. */
void setComputeBytes(id<MTLComputeCommandEncoder> mtlEncoder, const void* bytes, NSUInteger length, uint32_t mtlBuffIndex);
/** Get a temporary MTLBuffer that will be returned to a pool after the command buffer is finished. */
const MVKMTLBufferAllocation* getTempMTLBuffer(NSUInteger length);
/** Returns the command encoding pool. */
MVKCommandEncodingPool* getCommandEncodingPool();
@ -393,7 +399,6 @@ protected:
void addActivatedQuery(MVKQueryPool* pQueryPool, uint32_t query);
void finishQueries();
void setSubpass(VkSubpassContents subpassContents, uint32_t subpassIndex);
void beginMetalRenderPass();
void clearRenderArea();
const MVKMTLBufferAllocation* copyToTempMTLBufferAllocation(const void* bytes, NSUInteger length);
NSString* getMTLRenderCommandEncoderName();
@ -409,6 +414,8 @@ protected:
id<MTLBlitCommandEncoder> _mtlBlitEncoder;
MVKCommandUse _mtlBlitEncoderUse;
MVKPushConstantsCommandEncoderState _vertexPushConstants;
MVKPushConstantsCommandEncoderState _tessCtlPushConstants;
MVKPushConstantsCommandEncoderState _tessEvalPushConstants;
MVKPushConstantsCommandEncoderState _fragmentPushConstants;
MVKPushConstantsCommandEncoderState _computePushConstants;
MVKOcclusionQueryCommandEncoderState _occlusionQueryState;

View File

@ -265,6 +265,8 @@ void MVKCommandEncoder::beginMetalRenderPass() {
_depthBiasState.beginMetalRenderPass();
_blendColorState.beginMetalRenderPass();
_vertexPushConstants.beginMetalRenderPass();
_tessCtlPushConstants.beginMetalRenderPass();
_tessEvalPushConstants.beginMetalRenderPass();
_fragmentPushConstants.beginMetalRenderPass();
_depthStencilState.beginMetalRenderPass();
_stencilReferenceValueState.beginMetalRenderPass();
@ -314,18 +316,20 @@ MTLScissorRect MVKCommandEncoder::clipToRenderArea(MTLScissorRect mtlScissor) {
return mtlScissor;
}
void MVKCommandEncoder::finalizeDrawState() {
_graphicsPipelineState.encode(); // Must do first..it sets others
_graphicsResourcesState.encode();
_viewportState.encode();
_scissorState.encode();
_depthBiasState.encode();
_blendColorState.encode();
_vertexPushConstants.encode();
_fragmentPushConstants.encode();
_depthStencilState.encode();
_stencilReferenceValueState.encode();
_occlusionQueryState.encode();
void MVKCommandEncoder::finalizeDrawState(MVKGraphicsStage stage) {
_graphicsPipelineState.encode(stage); // Must do first..it sets others
_graphicsResourcesState.encode(stage);
_viewportState.encode(stage);
_scissorState.encode(stage);
_depthBiasState.encode(stage);
_blendColorState.encode(stage);
_vertexPushConstants.encode(stage);
_tessCtlPushConstants.encode(stage);
_tessEvalPushConstants.encode(stage);
_fragmentPushConstants.encode(stage);
_depthStencilState.encode(stage);
_stencilReferenceValueState.encode(stage);
_occlusionQueryState.encode(stage);
}
// Clears the render area of the framebuffer attachments.
@ -399,9 +403,11 @@ id<MTLBlitCommandEncoder> MVKCommandEncoder::getMTLBlitEncoder(MVKCommandUse cmd
}
MVKPushConstantsCommandEncoderState* MVKCommandEncoder::getPushConstants(VkShaderStageFlagBits shaderStage) {
switch (shaderStage) {
case VK_SHADER_STAGE_VERTEX_BIT: return &_vertexPushConstants;
case VK_SHADER_STAGE_FRAGMENT_BIT: return &_fragmentPushConstants;
case VK_SHADER_STAGE_COMPUTE_BIT: return &_computePushConstants;
case VK_SHADER_STAGE_VERTEX_BIT: return &_vertexPushConstants;
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return &_tessCtlPushConstants;
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return &_tessEvalPushConstants;
case VK_SHADER_STAGE_FRAGMENT_BIT: return &_fragmentPushConstants;
case VK_SHADER_STAGE_COMPUTE_BIT: return &_computePushConstants;
default:
MVKAssert(false, "Invalid shader stage: %u", shaderStage);
return nullptr;
@ -444,19 +450,26 @@ void MVKCommandEncoder::setComputeBytes(id<MTLComputeCommandEncoder> mtlEncoder,
}
}
const MVKMTLBufferAllocation* MVKCommandEncoder::getTempMTLBuffer(NSUInteger length) {
const MVKMTLBufferAllocation* mtlBuffAlloc = getCommandEncodingPool()->acquireMTLBufferAllocation(length);
MVKMTLBufferAllocationPool* pool = mtlBuffAlloc->getPool();
// Return the MTLBuffer allocation to the pool once the command buffer is done with it
[_mtlCmdBuffer addCompletedHandler: ^(id<MTLCommandBuffer> mcb) {
pool->returnObjectSafely((MVKMTLBufferAllocation*)mtlBuffAlloc);
}];
return mtlBuffAlloc;
}
MVKCommandEncodingPool* MVKCommandEncoder::getCommandEncodingPool() { return _cmdBuffer->_commandPool->getCommandEncodingPool(); }
// Copies the specified bytes into a temporary allocation within a pooled MTLBuffer, and returns the MTLBuffer allocation.
const MVKMTLBufferAllocation* MVKCommandEncoder::copyToTempMTLBufferAllocation(const void* bytes, NSUInteger length) {
const MVKMTLBufferAllocation* mtlBuffAlloc = getCommandEncodingPool()->acquireMTLBufferAllocation(length);
const MVKMTLBufferAllocation* mtlBuffAlloc = getTempMTLBuffer(length);
void* pBuffData = mtlBuffAlloc->getContents();
memcpy(pBuffData, bytes, length);
// Return the MTLBuffer allocation to the pool once the command buffer is done with it
[_mtlCmdBuffer addCompletedHandler: ^(id<MTLCommandBuffer> mcb) {
((MVKMTLBufferAllocation*)mtlBuffAlloc)->returnToPool();
}];
return mtlBuffAlloc;
}
@ -509,6 +522,8 @@ MVKCommandEncoder::MVKCommandEncoder(MVKCommandBuffer* cmdBuffer) : MVKBaseDevic
_depthBiasState(this),
_blendColorState(this),
_vertexPushConstants(this, VK_SHADER_STAGE_VERTEX_BIT),
_tessCtlPushConstants(this, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT),
_tessEvalPushConstants(this, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT),
_fragmentPushConstants(this, VK_SHADER_STAGE_FRAGMENT_BIT),
_computePushConstants(this, VK_SHADER_STAGE_COMPUTE_BIT),
_depthStencilState(this),
@ -573,11 +588,14 @@ NSString* mvkMTLBlitCommandEncoderLabel(MVKCommandUse cmdUse) {
}
NSString* mvkMTLComputeCommandEncoderLabel(MVKCommandUse cmdUse) {
switch (cmdUse) {
case kMVKCommandUseDispatch: return @"vkCmdDispatch ComputeEncoder";
case kMVKCommandUseCopyBuffer: return @"vkCmdCopyBuffer ComputeEncoder";
case kMVKCommandUseFillBuffer: return @"vkCmdFillBuffer ComputeEncoder";
default: return @"Unknown Use ComputeEncoder";
}
switch (cmdUse) {
case kMVKCommandUseDispatch: return @"vkCmdDispatch ComputeEncoder";
case kMVKCommandUseCopyBuffer: return @"vkCmdCopyBuffer ComputeEncoder";
case kMVKCommandUseCopyBufferToImage: return @"vkCmdCopyBufferToImage ComputeEncoder";
case kMVKCommandUseCopyImageToBuffer: return @"vkCmdCopyImageToBuffer ComputeEncoder";
case kMVKCommandUseFillBuffer: return @"vkCmdFillBuffer ComputeEncoder";
case kMVKCommandUseTessellationControl: return @"vkCmdDraw (tess control stage) ComputeEncoder";
default: return @"Unknown Use ComputeEncoder";
}
}

View File

@ -25,7 +25,7 @@
class MVKCommandEncoder;
class MVKOcclusionQueryPool;
struct MVKShaderAuxBufferBinding;
struct MVKShaderImplicitRezBinding;
#pragma mark -
@ -65,11 +65,11 @@ public:
* and calls the encodeImpl() function to encode the content onto the Metal encoder.
* Subclasses must override the encodeImpl() function to do the actual work.
*/
void encode() {
void encode(uint32_t stage = 0) {
if ( !_isDirty ) { return; }
_isDirty = false;
encodeImpl();
encodeImpl(stage);
}
/**
@ -87,7 +87,7 @@ public:
MVKCommandEncoderState(MVKCommandEncoder* cmdEncoder) : _cmdEncoder(cmdEncoder) {}
protected:
virtual void encodeImpl() = 0;
virtual void encodeImpl(uint32_t stage) = 0;
virtual void resetImpl() = 0;
MVKCommandEncoder* _cmdEncoder;
@ -115,7 +115,7 @@ public:
: MVKCommandEncoderState(cmdEncoder) {}
protected:
void encodeImpl() override;
void encodeImpl(uint32_t stage) override;
void resetImpl() override;
MVKPipeline* _pipeline = nullptr;
@ -144,7 +144,7 @@ public:
: MVKCommandEncoderState(cmdEncoder) {}
protected:
void encodeImpl() override;
void encodeImpl(uint32_t stage) override;
void resetImpl() override;
MVKVectorInline<MTLViewport, kMVKCachedViewportCount> _mtlViewports;
@ -173,7 +173,7 @@ public:
: MVKCommandEncoderState(cmdEncoder) {}
protected:
void encodeImpl() override;
void encodeImpl(uint32_t stage) override;
void resetImpl() override;
MVKVectorInline<MTLScissorRect, kMVKCachedScissorCount> _mtlScissors;
@ -200,7 +200,7 @@ public:
: MVKCommandEncoderState(cmdEncoder), _shaderStage(shaderStage) {}
protected:
void encodeImpl() override;
void encodeImpl(uint32_t stage) override;
void resetImpl() override;
MVKVectorInline<char, 128> _pushConstants;
@ -237,7 +237,7 @@ public:
: MVKCommandEncoderState(cmdEncoder) {}
protected:
void encodeImpl() override;
void encodeImpl(uint32_t stage) override;
void resetImpl() override;
void setStencilState(MVKMTLStencilDescriptorData& stencilInfo,
const VkStencilOpState& vkStencil,
@ -266,7 +266,7 @@ public:
: MVKCommandEncoderState(cmdEncoder) {}
protected:
void encodeImpl() override;
void encodeImpl(uint32_t stage) override;
void resetImpl() override;
uint32_t _frontFaceValue = 0;
@ -295,7 +295,7 @@ public:
: MVKCommandEncoderState(cmdEncoder) {}
protected:
void encodeImpl() override;
void encodeImpl(uint32_t stage) override;
void resetImpl() override;
float _depthBiasConstantFactor = 0;
@ -323,7 +323,7 @@ public:
: MVKCommandEncoderState(cmdEncoder) {}
protected:
void encodeImpl() override;
void encodeImpl(uint32_t stage) override;
void resetImpl() override;
float _red = 0;
@ -409,23 +409,14 @@ class MVKGraphicsResourcesCommandEncoderState : public MVKResourcesCommandEncode
public:
/** Binds the specified vertex buffer. */
void bindVertexBuffer(const MVKMTLBufferBinding& binding);
/** Binds the specified buffer for the specified shader stage. */
void bindBuffer(MVKShaderStage stage, const MVKMTLBufferBinding& binding);
/** Binds the specified fragment buffer. */
void bindFragmentBuffer(const MVKMTLBufferBinding& binding);
/** Binds the specified texture for the specified shader stage. */
void bindTexture(MVKShaderStage stage, const MVKMTLTextureBinding& binding);
/** Binds the specified vertex texture. */
void bindVertexTexture(const MVKMTLTextureBinding& binding);
/** Binds the specified fragment texture. */
void bindFragmentTexture(const MVKMTLTextureBinding& binding);
/** Binds the specified vertex sampler state. */
void bindVertexSamplerState(const MVKMTLSamplerStateBinding& binding);
/** Binds the specified fragment sampler state. */
void bindFragmentSamplerState(const MVKMTLSamplerStateBinding& binding);
/** Binds the specified sampler state for the specified shader stage. */
void bindSamplerState(MVKShaderStage stage, const MVKMTLSamplerStateBinding& binding);
/** The type of index that will be used to render primitives. Exposed directly. */
MVKIndexMTLBufferBinding _mtlIndexBufferBinding;
@ -436,10 +427,19 @@ public:
}
/** Sets the current auxiliary buffer state. */
void bindAuxBuffer(const MVKShaderAuxBufferBinding& binding,
bool needVertexAuxBuffer,
bool needFragmentAuxBuffer);
void bindAuxBuffer(const MVKShaderImplicitRezBinding& binding,
bool needVertexAuxBuffer,
bool needTessCtlAuxBuffer,
bool needTessEvalAuxBuffer,
bool needFragmentAuxBuffer);
void encodeBindings(MVKShaderStage stage,
const char* pStageName,
bool fullImageViewSwizzle,
std::function<void(MVKCommandEncoder*, MVKMTLBufferBinding&)> bindBuffer,
std::function<void(MVKCommandEncoder*, MVKMTLBufferBinding&, MVKVector<uint32_t>&)> bindAuxBuffer,
std::function<void(MVKCommandEncoder*, MVKMTLTextureBinding&)> bindTexture,
std::function<void(MVKCommandEncoder*, MVKMTLSamplerStateBinding&)> bindSampler);
#pragma mark Construction
@ -447,30 +447,25 @@ public:
MVKGraphicsResourcesCommandEncoderState(MVKCommandEncoder* cmdEncoder) : MVKResourcesCommandEncoderState(cmdEncoder) {}
protected:
void encodeImpl() override;
void encodeImpl(uint32_t stage) override;
void resetImpl() override;
void markDirty() override;
MVKVectorInline<MVKMTLBufferBinding, 8> _vertexBufferBindings;
MVKVectorInline<MVKMTLBufferBinding, 8> _fragmentBufferBindings;
MVKVectorInline<MVKMTLTextureBinding, 8> _vertexTextureBindings;
MVKVectorInline<MVKMTLTextureBinding, 8> _fragmentTextureBindings;
MVKVectorInline<MVKMTLSamplerStateBinding, 8> _vertexSamplerStateBindings;
MVKVectorInline<MVKMTLSamplerStateBinding, 8> _fragmentSamplerStateBindings;
MVKVectorInline<uint32_t, 8> _vertexSwizzleConstants;
MVKVectorInline<uint32_t, 8> _fragmentSwizzleConstants;
MVKMTLBufferBinding _vertexAuxBufferBinding;
MVKMTLBufferBinding _fragmentAuxBufferBinding;
struct ShaderStage {
MVKVectorInline<MVKMTLBufferBinding, 8> bufferBindings;
MVKVectorInline<MVKMTLTextureBinding, 8> textureBindings;
MVKVectorInline<MVKMTLSamplerStateBinding, 8> samplerStateBindings;
MVKVectorInline<uint32_t, 8> swizzleConstants;
MVKMTLBufferBinding auxBufferBinding;
bool _areVertexBufferBindingsDirty = false;
bool _areFragmentBufferBindingsDirty = false;
bool _areVertexTextureBindingsDirty = false;
bool _areFragmentTextureBindingsDirty = false;
bool _areVertexSamplerStateBindingsDirty = false;
bool _areFragmentSamplerStateBindingsDirty = false;
bool _needsVertexSwizzle = false;
bool _needsFragmentSwizzle = false;
bool areBufferBindingsDirty = false;
bool areTextureBindingsDirty = false;
bool areSamplerStateBindingsDirty = false;
bool needsSwizzle = false;
};
ShaderStage _shaderStages[4];
};
@ -492,7 +487,7 @@ public:
void bindSamplerState(const MVKMTLSamplerStateBinding& binding);
/** Sets the current auxiliary buffer state. */
void bindAuxBuffer(const MVKShaderAuxBufferBinding& binding, bool needAuxBuffer);
void bindAuxBuffer(const MVKShaderImplicitRezBinding& binding, bool needAuxBuffer);
#pragma mark Construction
@ -500,7 +495,7 @@ public:
MVKComputeResourcesCommandEncoderState(MVKCommandEncoder* cmdEncoder) : MVKResourcesCommandEncoderState(cmdEncoder) {}
protected:
void encodeImpl() override;
void encodeImpl(uint32_t) override;
void resetImpl() override;
void markDirty() override;
@ -539,7 +534,7 @@ public:
MVKOcclusionQueryCommandEncoderState(MVKCommandEncoder* cmdEncoder);
protected:
void encodeImpl() override;
void encodeImpl(uint32_t) override;
void resetImpl() override;
id<MTLBuffer> _visibilityResultMTLBuffer = nil;

View File

@ -38,8 +38,8 @@ void MVKPipelineCommandEncoderState::setPipeline(MVKPipeline* pipeline) {
MVKPipeline* MVKPipelineCommandEncoderState::getPipeline() { return _pipeline; }
void MVKPipelineCommandEncoderState::encodeImpl() {
if (_pipeline) { _pipeline->encode(_cmdEncoder); }
void MVKPipelineCommandEncoderState::encodeImpl(uint32_t stage) {
if (_pipeline) { _pipeline->encode(_cmdEncoder, stage); }
}
void MVKPipelineCommandEncoderState::resetImpl() {
@ -57,7 +57,7 @@ void MVKViewportCommandEncoderState::setViewports(const MVKVector<MTLViewport> &
bool mustSetDynamically = _cmdEncoder->supportsDynamicState(VK_DYNAMIC_STATE_VIEWPORT);
uint32_t maxViewports = _cmdEncoder->getDevice()->_pProperties->limits.maxViewports;
if ((mustSetDynamically == isSettingDynamically) &&
if ((mustSetDynamically == isSettingDynamically) && mtlViewports.size() > 0 &&
(firstViewport + mtlViewports.size() <= maxViewports) &&
(firstViewport < maxViewports)) {
@ -70,7 +70,8 @@ void MVKViewportCommandEncoderState::setViewports(const MVKVector<MTLViewport> &
}
}
void MVKViewportCommandEncoderState::encodeImpl() {
void MVKViewportCommandEncoderState::encodeImpl(uint32_t stage) {
if (stage != kMVKGraphicsStageRasterization) { return; }
MVKAssert(!_mtlViewports.empty(), "Must specify at least one viewport");
if (_cmdEncoder->getDevice()->_pFeatures->multiViewport) {
[_cmdEncoder->_mtlRenderEncoder setViewports: &_mtlViewports[0] count: _mtlViewports.size()];
@ -94,7 +95,7 @@ void MVKScissorCommandEncoderState::setScissors(const MVKVector<MTLScissorRect>
bool mustSetDynamically = _cmdEncoder->supportsDynamicState(VK_DYNAMIC_STATE_SCISSOR);
uint32_t maxScissors = _cmdEncoder->getDevice()->_pProperties->limits.maxViewports;
if ((mustSetDynamically == isSettingDynamically) &&
if ((mustSetDynamically == isSettingDynamically) && mtlScissors.size() > 0 &&
(firstScissor + mtlScissors.size() <= maxScissors) &&
(firstScissor < maxScissors)) {
@ -107,7 +108,8 @@ void MVKScissorCommandEncoderState::setScissors(const MVKVector<MTLScissorRect>
}
}
void MVKScissorCommandEncoderState::encodeImpl() {
void MVKScissorCommandEncoderState::encodeImpl(uint32_t stage) {
if (stage != kMVKGraphicsStageRasterization) { return; }
MVKAssert(!_mtlScissors.empty(), "Must specify at least one scissor rect");
auto clippedScissors(_mtlScissors);
std::for_each(clippedScissors.begin(), clippedScissors.end(), [this](MTLScissorRect& scissor) {
@ -142,21 +144,42 @@ void MVKPushConstantsCommandEncoderState::setMTLBufferIndex(uint32_t mtlBufferIn
}
}
void MVKPushConstantsCommandEncoderState::encodeImpl() {
void MVKPushConstantsCommandEncoderState::encodeImpl(uint32_t stage) {
if (_pushConstants.empty() ) { return; }
bool forTessellation = ((MVKGraphicsPipeline*)_cmdEncoder->_graphicsPipelineState.getPipeline())->isTessellationPipeline();
switch (_shaderStage) {
case VK_SHADER_STAGE_VERTEX_BIT:
_cmdEncoder->setVertexBytes(_cmdEncoder->_mtlRenderEncoder,
_pushConstants.data(),
_pushConstants.size(),
_mtlBufferIndex);
if (stage == (forTessellation ? kMVKGraphicsStageVertex : kMVKGraphicsStageRasterization)) {
_cmdEncoder->setVertexBytes(_cmdEncoder->_mtlRenderEncoder,
_pushConstants.data(),
_pushConstants.size(),
_mtlBufferIndex);
}
break;
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
if (stage == kMVKGraphicsStageTessControl) {
_cmdEncoder->setComputeBytes(_cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationControl),
_pushConstants.data(),
_pushConstants.size(),
_mtlBufferIndex);
}
break;
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
if (forTessellation && stage == kMVKGraphicsStageRasterization) {
_cmdEncoder->setVertexBytes(_cmdEncoder->_mtlRenderEncoder,
_pushConstants.data(),
_pushConstants.size(),
_mtlBufferIndex);
}
break;
case VK_SHADER_STAGE_FRAGMENT_BIT:
_cmdEncoder->setFragmentBytes(_cmdEncoder->_mtlRenderEncoder,
_pushConstants.data(),
_pushConstants.size(),
_mtlBufferIndex);
if (stage == kMVKGraphicsStageRasterization) {
_cmdEncoder->setFragmentBytes(_cmdEncoder->_mtlRenderEncoder,
_pushConstants.data(),
_pushConstants.size(),
_mtlBufferIndex);
}
break;
case VK_SHADER_STAGE_COMPUTE_BIT:
_cmdEncoder->setComputeBytes(_cmdEncoder->getMTLComputeEncoder(kMVKCommandUseDispatch),
@ -249,7 +272,8 @@ void MVKDepthStencilCommandEncoderState::setStencilWriteMask(VkStencilFaceFlags
markDirty();
}
void MVKDepthStencilCommandEncoderState::encodeImpl() {
void MVKDepthStencilCommandEncoderState::encodeImpl(uint32_t stage) {
if (stage != kMVKGraphicsStageRasterization) { return; }
MVKRenderSubpass *subpass = _cmdEncoder->getSubpass();
id<MTLDepthStencilState> mtlDSS = nil;
if (subpass->getDepthStencilFormat() != VK_FORMAT_UNDEFINED) {
@ -301,7 +325,8 @@ void MVKStencilReferenceValueCommandEncoderState::setReferenceValues(VkStencilFa
markDirty();
}
void MVKStencilReferenceValueCommandEncoderState::encodeImpl() {
void MVKStencilReferenceValueCommandEncoderState::encodeImpl(uint32_t stage) {
if (stage != kMVKGraphicsStageRasterization) { return; }
[_cmdEncoder->_mtlRenderEncoder setStencilFrontReferenceValue: _frontFaceValue
backReferenceValue: _backFaceValue];
}
@ -342,7 +367,8 @@ void MVKDepthBiasCommandEncoderState::setDepthBias(float depthBiasConstantFactor
markDirty();
}
void MVKDepthBiasCommandEncoderState::encodeImpl() {
void MVKDepthBiasCommandEncoderState::encodeImpl(uint32_t stage) {
if (stage != kMVKGraphicsStageRasterization) { return; }
if (_isEnabled) {
[_cmdEncoder->_mtlRenderEncoder setDepthBias: _depthBiasConstantFactor
slopeScale: _depthBiasSlopeFactor
@ -378,7 +404,8 @@ void MVKBlendColorCommandEncoderState::setBlendColor(float red, float green,
markDirty();
}
void MVKBlendColorCommandEncoderState::encodeImpl() {
void MVKBlendColorCommandEncoderState::encodeImpl(uint32_t stage) {
if (stage != kMVKGraphicsStageRasterization) { return; }
[_cmdEncoder->_mtlRenderEncoder setBlendColorRed: _red green: _green blue: _blue alpha: _alpha];
}
@ -420,147 +447,184 @@ static void assertMissingSwizzles(bool needsSwizzle, const char* stageName, MVKV
#pragma mark -
#pragma mark MVKGraphicsResourcesCommandEncoderState
void MVKGraphicsResourcesCommandEncoderState::bindVertexBuffer(const MVKMTLBufferBinding& binding) {
bind(binding, _vertexBufferBindings, _areVertexBufferBindingsDirty);
void MVKGraphicsResourcesCommandEncoderState::bindBuffer(MVKShaderStage stage, const MVKMTLBufferBinding& binding) {
bind(binding, _shaderStages[stage].bufferBindings, _shaderStages[stage].areBufferBindingsDirty);
}
void MVKGraphicsResourcesCommandEncoderState::bindFragmentBuffer(const MVKMTLBufferBinding& binding) {
bind(binding, _fragmentBufferBindings, _areFragmentBufferBindingsDirty);
void MVKGraphicsResourcesCommandEncoderState::bindTexture(MVKShaderStage stage, const MVKMTLTextureBinding& binding) {
bind(binding, _shaderStages[stage].textureBindings, _shaderStages[stage].areTextureBindingsDirty, _shaderStages[stage].needsSwizzle);
}
void MVKGraphicsResourcesCommandEncoderState::bindVertexTexture(const MVKMTLTextureBinding& binding) {
bind(binding, _vertexTextureBindings, _areVertexTextureBindingsDirty, _needsVertexSwizzle);
void MVKGraphicsResourcesCommandEncoderState::bindSamplerState(MVKShaderStage stage, const MVKMTLSamplerStateBinding& binding) {
bind(binding, _shaderStages[stage].samplerStateBindings, _shaderStages[stage].areSamplerStateBindingsDirty);
}
void MVKGraphicsResourcesCommandEncoderState::bindFragmentTexture(const MVKMTLTextureBinding& binding) {
bind(binding, _fragmentTextureBindings, _areFragmentTextureBindingsDirty, _needsFragmentSwizzle);
}
void MVKGraphicsResourcesCommandEncoderState::bindVertexSamplerState(const MVKMTLSamplerStateBinding& binding) {
bind(binding, _vertexSamplerStateBindings, _areVertexSamplerStateBindingsDirty);
}
void MVKGraphicsResourcesCommandEncoderState::bindFragmentSamplerState(const MVKMTLSamplerStateBinding& binding) {
bind(binding, _fragmentSamplerStateBindings, _areFragmentSamplerStateBindingsDirty);
}
void MVKGraphicsResourcesCommandEncoderState::bindAuxBuffer(const MVKShaderAuxBufferBinding& binding,
void MVKGraphicsResourcesCommandEncoderState::bindAuxBuffer(const MVKShaderImplicitRezBinding& binding,
bool needVertexAuxBuffer,
bool needTessCtlAuxBuffer,
bool needTessEvalAuxBuffer,
bool needFragmentAuxBuffer) {
_vertexAuxBufferBinding.index = binding.vertex;
_vertexAuxBufferBinding.isDirty = needVertexAuxBuffer;
_fragmentAuxBufferBinding.index = binding.fragment;
_fragmentAuxBufferBinding.isDirty = needFragmentAuxBuffer;
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCompute; i++) {
_shaderStages[i].auxBufferBinding.index = binding.stages[i];
}
_shaderStages[kMVKShaderStageVertex].auxBufferBinding.isDirty = needVertexAuxBuffer;
_shaderStages[kMVKShaderStageTessCtl].auxBufferBinding.isDirty = needTessCtlAuxBuffer;
_shaderStages[kMVKShaderStageTessEval].auxBufferBinding.isDirty = needTessEvalAuxBuffer;
_shaderStages[kMVKShaderStageFragment].auxBufferBinding.isDirty = needFragmentAuxBuffer;
}
void MVKGraphicsResourcesCommandEncoderState::encodeBindings(MVKShaderStage stage,
const char* pStageName,
bool fullImageViewSwizzle,
std::function<void(MVKCommandEncoder*, MVKMTLBufferBinding&)> bindBuffer,
std::function<void(MVKCommandEncoder*, MVKMTLBufferBinding&, MVKVector<uint32_t>&)> bindAuxBuffer,
std::function<void(MVKCommandEncoder*, MVKMTLTextureBinding&)> bindTexture,
std::function<void(MVKCommandEncoder*, MVKMTLSamplerStateBinding&)> bindSampler) {
auto& shaderStage = _shaderStages[stage];
encodeBinding<MVKMTLBufferBinding>(shaderStage.bufferBindings, shaderStage.areBufferBindingsDirty, bindBuffer);
if (shaderStage.auxBufferBinding.isDirty) {
for (auto& b : shaderStage.textureBindings) {
if (b.isDirty) { updateSwizzle(shaderStage.swizzleConstants, b.index, b.swizzle); }
}
bindAuxBuffer(_cmdEncoder, shaderStage.auxBufferBinding, shaderStage.swizzleConstants);
} else {
assertMissingSwizzles(shaderStage.needsSwizzle && !fullImageViewSwizzle, pStageName, shaderStage.textureBindings);
}
encodeBinding<MVKMTLTextureBinding>(shaderStage.textureBindings, shaderStage.areTextureBindingsDirty, bindTexture);
encodeBinding<MVKMTLSamplerStateBinding>(shaderStage.samplerStateBindings, shaderStage.areSamplerStateBindingsDirty, bindSampler);
}
// Mark everything as dirty
void MVKGraphicsResourcesCommandEncoderState::markDirty() {
MVKCommandEncoderState::markDirty();
MVKResourcesCommandEncoderState::markDirty(_vertexBufferBindings, _areVertexBufferBindingsDirty);
MVKResourcesCommandEncoderState::markDirty(_fragmentBufferBindings, _areFragmentBufferBindingsDirty);
MVKResourcesCommandEncoderState::markDirty(_vertexTextureBindings, _areVertexTextureBindingsDirty);
MVKResourcesCommandEncoderState::markDirty(_fragmentTextureBindings, _areFragmentTextureBindingsDirty);
MVKResourcesCommandEncoderState::markDirty(_vertexSamplerStateBindings, _areVertexSamplerStateBindingsDirty);
MVKResourcesCommandEncoderState::markDirty(_fragmentSamplerStateBindings, _areFragmentSamplerStateBindingsDirty);
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCompute; i++) {
MVKResourcesCommandEncoderState::markDirty(_shaderStages[i].bufferBindings, _shaderStages[i].areBufferBindingsDirty);
MVKResourcesCommandEncoderState::markDirty(_shaderStages[i].textureBindings, _shaderStages[i].areTextureBindingsDirty);
MVKResourcesCommandEncoderState::markDirty(_shaderStages[i].samplerStateBindings, _shaderStages[i].areSamplerStateBindingsDirty);
}
}
void MVKGraphicsResourcesCommandEncoderState::encodeImpl() {
void MVKGraphicsResourcesCommandEncoderState::encodeImpl(uint32_t stage) {
bool fullImageViewSwizzle = false;
MVKPipeline* pipeline = _cmdEncoder->_graphicsPipelineState.getPipeline();
if (pipeline)
fullImageViewSwizzle = pipeline->fullImageViewSwizzle();
bool fullImageViewSwizzle = pipeline->fullImageViewSwizzle();
bool forTessellation = ((MVKGraphicsPipeline*)pipeline)->isTessellationPipeline();
encodeBinding<MVKMTLBufferBinding>(_vertexBufferBindings, _areVertexBufferBindingsDirty,
[](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: b.mtlBuffer
offset: b.offset
atIndex: b.index];
});
if (stage == (forTessellation ? kMVKGraphicsStageVertex : kMVKGraphicsStageRasterization)) {
encodeBindings(kMVKShaderStageVertex, "vertex", fullImageViewSwizzle,
[](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: b.mtlBuffer
offset: b.offset
atIndex: b.index];
},
[](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b, MVKVector<uint32_t>& s)->void {
cmdEncoder->setVertexBytes(cmdEncoder->_mtlRenderEncoder,
s.data(),
s.size() * sizeof(uint32_t),
b.index);
},
[](MVKCommandEncoder* cmdEncoder, MVKMTLTextureBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setVertexTexture: b.mtlTexture
atIndex: b.index];
},
[](MVKCommandEncoder* cmdEncoder, MVKMTLSamplerStateBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setVertexSamplerState: b.mtlSamplerState
atIndex: b.index];
});
encodeBinding<MVKMTLBufferBinding>(_fragmentBufferBindings, _areFragmentBufferBindingsDirty,
[](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setFragmentBuffer: b.mtlBuffer
offset: b.offset
atIndex: b.index];
});
if (_vertexAuxBufferBinding.isDirty) {
for (auto& b : _vertexTextureBindings) {
if (b.isDirty) { updateSwizzle(_vertexSwizzleConstants, b.index, b.swizzle); }
}
_cmdEncoder->setVertexBytes(_cmdEncoder->_mtlRenderEncoder,
_vertexSwizzleConstants.data(),
_vertexSwizzleConstants.size() * sizeof(uint32_t),
_vertexAuxBufferBinding.index);
} else {
assertMissingSwizzles(_needsVertexSwizzle && !fullImageViewSwizzle, "vertex", _vertexTextureBindings);
}
if (_fragmentAuxBufferBinding.isDirty) {
for (auto& b : _fragmentTextureBindings) {
if (b.isDirty) { updateSwizzle(_fragmentSwizzleConstants, b.index, b.swizzle); }
}
_cmdEncoder->setFragmentBytes(_cmdEncoder->_mtlRenderEncoder,
_fragmentSwizzleConstants.data(),
_fragmentSwizzleConstants.size() * sizeof(uint32_t),
_fragmentAuxBufferBinding.index);
} else {
assertMissingSwizzles(_needsFragmentSwizzle && !fullImageViewSwizzle, "fragment", _fragmentTextureBindings);
}
encodeBinding<MVKMTLTextureBinding>(_vertexTextureBindings, _areVertexTextureBindingsDirty,
[](MVKCommandEncoder* cmdEncoder, MVKMTLTextureBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setVertexTexture: b.mtlTexture
atIndex: b.index];
});
if (stage == kMVKGraphicsStageTessControl) {
encodeBindings(kMVKShaderStageTessCtl, "tessellation control", fullImageViewSwizzle,
[](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b)->void {
[cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationControl) setBuffer: b.mtlBuffer
offset: b.offset
atIndex: b.index];
},
[](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b, MVKVector<uint32_t>& s)->void {
cmdEncoder->setComputeBytes(cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationControl),
s.data(),
s.size() * sizeof(uint32_t),
b.index);
},
[](MVKCommandEncoder* cmdEncoder, MVKMTLTextureBinding& b)->void {
[cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationControl) setTexture: b.mtlTexture
atIndex: b.index];
},
[](MVKCommandEncoder* cmdEncoder, MVKMTLSamplerStateBinding& b)->void {
[cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationControl) setSamplerState: b.mtlSamplerState
atIndex: b.index];
});
encodeBinding<MVKMTLTextureBinding>(_fragmentTextureBindings, _areFragmentTextureBindingsDirty,
[](MVKCommandEncoder* cmdEncoder, MVKMTLTextureBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setFragmentTexture: b.mtlTexture
atIndex: b.index];
});
}
encodeBinding<MVKMTLSamplerStateBinding>(_vertexSamplerStateBindings, _areVertexSamplerStateBindingsDirty,
[](MVKCommandEncoder* cmdEncoder, MVKMTLSamplerStateBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setVertexSamplerState: b.mtlSamplerState
atIndex: b.index];
});
if (forTessellation && stage == kMVKGraphicsStageRasterization) {
encodeBindings(kMVKShaderStageTessEval, "tessellation evaluation", fullImageViewSwizzle,
[](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setVertexBuffer: b.mtlBuffer
offset: b.offset
atIndex: b.index];
},
[](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b, MVKVector<uint32_t>& s)->void {
cmdEncoder->setVertexBytes(cmdEncoder->_mtlRenderEncoder,
s.data(),
s.size() * sizeof(uint32_t),
b.index);
},
[](MVKCommandEncoder* cmdEncoder, MVKMTLTextureBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setVertexTexture: b.mtlTexture
atIndex: b.index];
},
[](MVKCommandEncoder* cmdEncoder, MVKMTLSamplerStateBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setVertexSamplerState: b.mtlSamplerState
atIndex: b.index];
});
encodeBinding<MVKMTLSamplerStateBinding>(_fragmentSamplerStateBindings, _areFragmentSamplerStateBindingsDirty,
[](MVKCommandEncoder* cmdEncoder, MVKMTLSamplerStateBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setFragmentSamplerState: b.mtlSamplerState
atIndex: b.index];
});
}
if (stage == kMVKGraphicsStageRasterization) {
encodeBindings(kMVKShaderStageFragment, "fragment", fullImageViewSwizzle,
[](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setFragmentBuffer: b.mtlBuffer
offset: b.offset
atIndex: b.index];
},
[](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b, MVKVector<uint32_t>& s)->void {
cmdEncoder->setFragmentBytes(cmdEncoder->_mtlRenderEncoder,
s.data(),
s.size() * sizeof(uint32_t),
b.index);
},
[](MVKCommandEncoder* cmdEncoder, MVKMTLTextureBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setFragmentTexture: b.mtlTexture
atIndex: b.index];
},
[](MVKCommandEncoder* cmdEncoder, MVKMTLSamplerStateBinding& b)->void {
[cmdEncoder->_mtlRenderEncoder setFragmentSamplerState: b.mtlSamplerState
atIndex: b.index];
});
}
}
void MVKGraphicsResourcesCommandEncoderState::resetImpl() {
_vertexBufferBindings.clear();
_fragmentBufferBindings.clear();
_vertexTextureBindings.clear();
_fragmentTextureBindings.clear();
_vertexSamplerStateBindings.clear();
_fragmentSamplerStateBindings.clear();
_vertexSwizzleConstants.clear();
_fragmentSwizzleConstants.clear();
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCompute; i++) {
_shaderStages[i].bufferBindings.clear();
_shaderStages[i].textureBindings.clear();
_shaderStages[i].samplerStateBindings.clear();
_shaderStages[i].swizzleConstants.clear();
_areVertexBufferBindingsDirty = false;
_areFragmentBufferBindingsDirty = false;
_areVertexTextureBindingsDirty = false;
_areFragmentTextureBindingsDirty = false;
_areVertexSamplerStateBindingsDirty = false;
_areFragmentSamplerStateBindingsDirty = false;
_vertexAuxBufferBinding.isDirty = false;
_fragmentAuxBufferBinding.isDirty = false;
_shaderStages[i].areBufferBindingsDirty = false;
_shaderStages[i].areTextureBindingsDirty = false;
_shaderStages[i].areSamplerStateBindingsDirty = false;
_shaderStages[i].auxBufferBinding.isDirty = false;
_needsVertexSwizzle = false;
_needsFragmentSwizzle = false;
_shaderStages[i].needsSwizzle = false;
}
}
@ -579,9 +643,9 @@ void MVKComputeResourcesCommandEncoderState::bindSamplerState(const MVKMTLSample
bind(binding, _samplerStateBindings, _areSamplerStateBindingsDirty);
}
void MVKComputeResourcesCommandEncoderState::bindAuxBuffer(const MVKShaderAuxBufferBinding& binding,
void MVKComputeResourcesCommandEncoderState::bindAuxBuffer(const MVKShaderImplicitRezBinding& binding,
bool needAuxBuffer) {
_auxBufferBinding.index = binding.compute;
_auxBufferBinding.index = binding.stages[kMVKShaderStageCompute];
_auxBufferBinding.isDirty = needAuxBuffer;
}
@ -593,7 +657,7 @@ void MVKComputeResourcesCommandEncoderState::markDirty() {
MVKResourcesCommandEncoderState::markDirty(_samplerStateBindings, _areSamplerStateBindingsDirty);
}
void MVKComputeResourcesCommandEncoderState::encodeImpl() {
void MVKComputeResourcesCommandEncoderState::encodeImpl(uint32_t) {
bool fullImageViewSwizzle = false;
MVKPipeline* pipeline = _cmdEncoder->_computePipelineState.getPipeline();
@ -677,7 +741,8 @@ void MVKOcclusionQueryCommandEncoderState::endOcclusionQuery(MVKOcclusionQueryPo
// If the MTLBuffer has not yet been set, see if the command buffer is configured with it
id<MTLBuffer> MVKOcclusionQueryCommandEncoderState::getVisibilityResultMTLBuffer() { return _visibilityResultMTLBuffer; }
void MVKOcclusionQueryCommandEncoderState::encodeImpl() {
void MVKOcclusionQueryCommandEncoderState::encodeImpl(uint32_t stage) {
if (stage != kMVKGraphicsStageRasterization) { return; }
[_cmdEncoder->_mtlRenderEncoder setVisibilityResultMode: _mtlVisibilityResultMode
offset: _mtlVisibilityResultOffset];
}

View File

@ -113,6 +113,12 @@ public:
/** Returns a MTLComputePipelineState for decompressing a buffer into a 3D image. */
id<MTLComputePipelineState> getCmdCopyBufferToImage3DDecompressMTLComputePipelineState(bool needsTempBuff);
/** Returns a MTLComputePipelineState for converting an indirect buffer for use in a tessellated draw. */
id<MTLComputePipelineState> getCmdDrawIndirectConvertBuffersMTLComputePipelineState(bool indexed);
/** Returns a MTLComputePipelineState for copying an index buffer for use in an indirect tessellated draw. */
id<MTLComputePipelineState> getCmdDrawIndexedCopyIndexBufferMTLComputePipelineState(MTLIndexType type);
/** Deletes all the internal resources. */
void clear();
@ -142,5 +148,7 @@ private:
id<MTLComputePipelineState> _mtlCopyBufferBytesComputePipelineState = nil;
id<MTLComputePipelineState> _mtlFillBufferComputePipelineState = nil;
id<MTLComputePipelineState> _mtlCopyBufferToImage3DDecompressComputePipelineState[2] = {nil, nil};
id<MTLComputePipelineState> _mtlDrawIndirectConvertBuffersComputePipelineState[2] = {nil, nil};
id<MTLComputePipelineState> _mtlDrawIndexedCopyIndexBufferComputePipelineState[2] = {nil, nil};
};

View File

@ -113,6 +113,14 @@ id<MTLComputePipelineState> MVKCommandEncodingPool::getCmdCopyBufferToImage3DDec
MVK_ENC_REZ_ACCESS(_mtlCopyBufferToImage3DDecompressComputePipelineState[needsTempBuff ? 1 : 0], newCmdCopyBufferToImage3DDecompressMTLComputePipelineState(needsTempBuff));
}
id<MTLComputePipelineState> MVKCommandEncodingPool::getCmdDrawIndirectConvertBuffersMTLComputePipelineState(bool indexed) {
MVK_ENC_REZ_ACCESS(_mtlDrawIndirectConvertBuffersComputePipelineState[indexed ? 1 : 0], newCmdDrawIndirectConvertBuffersMTLComputePipelineState(indexed));
}
id<MTLComputePipelineState> MVKCommandEncodingPool::getCmdDrawIndexedCopyIndexBufferMTLComputePipelineState(MTLIndexType type) {
MVK_ENC_REZ_ACCESS(_mtlDrawIndexedCopyIndexBufferComputePipelineState[type == MTLIndexTypeUInt16 ? 1 : 0], newCmdDrawIndexedCopyIndexBufferMTLComputePipelineState(type));
}
void MVKCommandEncodingPool::clear() {
lock_guard<mutex> lock(_lock);
destroyMetalResources();
@ -177,5 +185,15 @@ void MVKCommandEncodingPool::destroyMetalResources() {
[_mtlCopyBufferToImage3DDecompressComputePipelineState[1] release];
_mtlCopyBufferToImage3DDecompressComputePipelineState[0] = nil;
_mtlCopyBufferToImage3DDecompressComputePipelineState[1] = nil;
[_mtlDrawIndirectConvertBuffersComputePipelineState[0] release];
[_mtlDrawIndirectConvertBuffersComputePipelineState[1] release];
_mtlDrawIndirectConvertBuffersComputePipelineState[0] = nil;
_mtlDrawIndirectConvertBuffersComputePipelineState[1] = nil;
[_mtlDrawIndexedCopyIndexBufferComputePipelineState[0] release];
[_mtlDrawIndexedCopyIndexBufferComputePipelineState[1] release];
_mtlDrawIndexedCopyIndexBufferComputePipelineState[0] = nil;
_mtlDrawIndexedCopyIndexBufferComputePipelineState[1] = nil;
}

View File

@ -168,5 +168,110 @@ kernel void cmdCopyBufferToImage3DDecompressTempBufferDXTn(constant uint8_t* src
} \n\
} \n\
\n\
#if __METAL_VERSION__ == 210 \n\
// This structure is missing from the MSL headers. :/ \n\
struct MTLStageInRegionIndirectArguments { \n\
uint32_t stageInOrigin[3]; \n\
uint32_t stageInSize[3]; \n\
}; \n\
#endif \n\
\n\
#if __METAL_VERSION >= 120 \n\
kernel void cmdDrawIndirectConvertBuffers(const device char* srcBuff [[buffer(0)]], \n\
device char* destBuff [[buffer(1)]], \n\
constant uint32_t& srcStride [[buffer(2)]], \n\
constant uint32_t& inControlPointCount [[buffer(3)]], \n\
constant uint32_t& outControlPointCount [[buffer(4)]], \n\
constant uint32_t& drawCount [[buffer(5)]], \n\
uint idx [[thread_position_in_grid]]) { \n\
if (idx >= drawCount) { return; } \n\
const device auto& src = *reinterpret_cast<const device MTLDrawPrimitivesIndirectArguments*>(srcBuff + idx * srcStride);\n\
device char* dest = destBuff + idx * (sizeof(MTLDispatchThreadgroupsIndirectArguments) + sizeof(MTLDrawPatchIndirectArguments));\n\
#if __METAL_VERSION__ >= 210 \n\
device auto& destSI = *(device MTLStageInRegionIndirectArguments*)dest; \n\
dest += sizeof(MTLStageInRegionIndirectArguments); \n\
#endif \n\
device auto& destTC = *(device MTLDispatchThreadgroupsIndirectArguments*)dest; \n\
device auto& destTE = *(device MTLDrawPatchIndirectArguments*)(dest + sizeof(MTLDispatchThreadgroupsIndirectArguments));\n\
destTC.threadgroupsPerGrid[0] = (src.vertexCount * src.instanceCount + inControlPointCount - 1) / inControlPointCount;\n\
destTC.threadgroupsPerGrid[1] = destTC.threadgroupsPerGrid[2] = 1; \n\
destTE.patchCount = destTC.threadgroupsPerGrid[0]; \n\
destTE.instanceCount = 1; \n\
destTE.patchStart = destTE.baseInstance = 0; \n\
#if __METAL_VERSION__ >= 210 \n\
destSI.stageInOrigin[0] = destSI.stageInOrigin[1] = destSI.stageInOrigin[2] = 0; \n\
destSI.stageInSize[0] = src.instanceCount * max(src.vertexCount, outControlPointCount * destTE.patchCount); \n\
destSI.stageInSize[1] = destSI.stageInSize[2] = 1; \n\
#endif \n\
} \n\
\n\
kernel void cmdDrawIndexedIndirectConvertBuffers(const device char* srcBuff [[buffer(0)]], \n\
device char* destBuff [[buffer(1)]], \n\
constant uint32_t& srcStride [[buffer(2)]], \n\
constant uint32_t& inControlPointCount [[buffer(3)]], \n\
constant uint32_t& outControlPointCount [[buffer(4)]], \n\
constant uint32_t& drawCount [[buffer(5)]], \n\
uint idx [[thread_position_in_grid]]) { \n\
if (idx >= drawCount) { return; } \n\
const device auto& src = *reinterpret_cast<const device MTLDrawIndexedPrimitivesIndirectArguments*>(srcBuff + idx * srcStride);\n\
device char* dest = destBuff + idx * (sizeof(MTLDispatchThreadgroupsIndirectArguments) + sizeof(MTLDrawPatchIndirectArguments));\n\
#if __METAL_VERSION__ >= 210 \n\
device auto& destSI = *(device MTLStageInRegionIndirectArguments*)dest; \n\
dest += sizeof(MTLStageInRegionIndirectArguments); \n\
#endif \n\
device auto& destTC = *(device MTLDispatchThreadgroupsIndirectArguments*)dest; \n\
device auto& destTE = *(device MTLDrawPatchIndirectArguments*)(dest + sizeof(MTLDispatchThreadgroupsIndirectArguments));\n\
destTC.threadgroupsPerGrid[0] = (src.indexCount * src.instanceCount + inControlPointCount - 1) / inControlPointCount;\n\
destTC.threadgroupsPerGrid[1] = destTC.threadgroupsPerGrid[2] = 1; \n\
destTE.patchCount = destTC.threadgroupsPerGrid[0]; \n\
destTE.instanceCount = 1; \n\
destTE.patchStart = destTE.baseInstance = 0; \n\
#if __METAL_VERSION__ >= 210 \n\
destSI.stageInOrigin[0] = destSI.stageInOrigin[1] = destSI.stageInOrigin[2] = 0; \n\
destSI.stageInSize[0] = src.instanceCount * max(src.indexCount, outControlPointCount * destTE.patchCount); \n\
destSI.stageInSize[1] = destSI.stageInSize[2] = 1; \n\
#endif \n\
} \n\
\n\
kernel void cmdDrawIndexedCopyIndex16Buffer(const device uint16_t* srcBuff [[buffer(0)]], \n\
device uint16_t* destBuff [[buffer(1)]], \n\
constant uint32_t& inControlPointCount [[buffer(2)]], \n\
constant uint32_t& outControlPointCount [[buffer(3)]], \n\
const device MTLDrawIndexedPrimitivesIndirectArguments& params [[buffer(4)]]) {\n\
uint patchCount = (params.indexCount + inControlPointCount - 1) / inControlPointCount; \n\
for (uint i = 0; i < params.instanceCount; i++) { \n\
for (uint j = 0; j < patchCount; j++) { \n\
for (uint k = 0; k < max(inControlPointCount, outControlPointCount); k++) { \n\
if (k < inControlPointCount) { \n\
destBuff[i * params.indexCount + j * outControlPointCount + k] = srcBuff[params.indexStart + j * inControlPointCount + k] + i * params.indexCount;\n\
} else { \n\
destBuff[i * params.indexCount + j * outControlPointCount + k] = 0; \n\
} \n\
} \n\
} \n\
} \n\
} \n\
\n\
kernel void cmdDrawIndexedCopyIndex32Buffer(const device uint32_t* srcBuff [[buffer(0)]], \n\
device uint32_t* destBuff [[buffer(1)]], \n\
constant uint32_t& inControlPointCount [[buffer(2)]], \n\
constant uint32_t& outControlPointCount [[buffer(3)]], \n\
const device MTLDrawIndexedPrimitivesIndirectArguments& params [[buffer(4)]]) {\n\
uint patchCount = (params.indexCount + inControlPointCount - 1) / inControlPointCount; \n\
for (uint i = 0; i < params.instanceCount; i++) { \n\
for (uint j = 0; j < patchCount; j++) { \n\
for (uint k = 0; k < max(inControlPointCount, outControlPointCount); k++) { \n\
if (k < inControlPointCount) { \n\
destBuff[i * params.indexCount + j * outControlPointCount + k] = srcBuff[params.indexStart + j * inControlPointCount + k] + i * params.indexCount;\n\
} else { \n\
destBuff[i * params.indexCount + j * outControlPointCount + k] = 0; \n\
} \n\
} \n\
} \n\
} \n\
} \n\
\n\
#endif \n\
\n\
";

View File

@ -361,6 +361,12 @@ public:
/** Returns a new MTLComputePipelineState for copying between a buffer holding compressed data and a 3D image. */
id<MTLComputePipelineState> newCmdCopyBufferToImage3DDecompressMTLComputePipelineState(bool needTempBuf);
/** Returns a new MTLComputePipelineState for converting an indirect buffer for use in a tessellated draw. */
id<MTLComputePipelineState> newCmdDrawIndirectConvertBuffersMTLComputePipelineState(bool indexed);
/** Returns a new MTLComputePipelineState for copying an index buffer for use in a tessellated draw. */
id<MTLComputePipelineState> newCmdDrawIndexedCopyIndexBufferMTLComputePipelineState(MTLIndexType type);
#pragma mark Construction

View File

@ -385,6 +385,16 @@ id<MTLComputePipelineState> MVKCommandResourceFactory::newCmdCopyBufferToImage3D
"cmdCopyBufferToImage3DDecompressDXTn"));
}
id<MTLComputePipelineState> MVKCommandResourceFactory::newCmdDrawIndirectConvertBuffersMTLComputePipelineState(bool indexed) {
return newMTLComputePipelineState(getFunctionNamed(indexed ? "cmdDrawIndirectConvertBuffers" :
"cmdDrawIndexedIndirectConvertBuffers"));
}
id<MTLComputePipelineState> MVKCommandResourceFactory::newCmdDrawIndexedCopyIndexBufferMTLComputePipelineState(MTLIndexType type) {
return newMTLComputePipelineState(getFunctionNamed(type == MTLIndexTypeUInt16 ? "cmdDrawIndexedCopyIndex16Buffer" :
"cmdDrawIndexedCopyIndex32Buffer"));
}
#pragma mark Support methods

View File

@ -44,6 +44,9 @@ public:
*/
inline void* getContents() const { return (void*)((uintptr_t)_mtlBuffer.contents + _offset); }
/** Returns the pool whence this object was created. */
MVKMTLBufferAllocationPool* getPool() const { return _pool; }
/** Returns this object back to the pool that created it. This will reset the value of _next member. */
void returnToPool();

View File

@ -21,6 +21,7 @@
#include "MVKDevice.h"
#include "MVKImage.h"
#include "MVKVector.h"
#include "mvk_datatypes.h"
#include <MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h>
#include <unordered_set>
#include <unordered_map>
@ -55,9 +56,7 @@ typedef struct MVKShaderStageResourceBinding {
/** Indicates the Metal resource indexes used by each shader stage in a descriptor binding. */
typedef struct MVKShaderResourceBinding {
MVKShaderStageResourceBinding vertexStage;
MVKShaderStageResourceBinding fragmentStage;
MVKShaderStageResourceBinding computeStage;
MVKShaderStageResourceBinding stages[kMVKShaderStageMax];
uint32_t getMaxBufferIndex();
uint32_t getMaxTextureIndex();
@ -120,9 +119,7 @@ protected:
VkDescriptorSetLayoutBinding _info;
std::vector<MVKSampler*> _immutableSamplers;
MVKShaderResourceBinding _mtlResourceIndexOffsets;
bool _applyToVertexStage;
bool _applyToFragmentStage;
bool _applyToComputeStage;
bool _applyToStage[kMVKShaderStageMax];
};

View File

@ -47,29 +47,29 @@ MVK_PUBLIC_SYMBOL MVKShaderStageResourceBinding& MVKShaderStageResourceBinding::
#pragma mark MVKShaderResourceBinding
MVK_PUBLIC_SYMBOL uint32_t MVKShaderResourceBinding::getMaxBufferIndex() {
return max({vertexStage.bufferIndex, fragmentStage.bufferIndex, computeStage.bufferIndex});
return max({stages[kMVKShaderStageVertex].bufferIndex, stages[kMVKShaderStageTessCtl].bufferIndex, stages[kMVKShaderStageTessEval].bufferIndex, stages[kMVKShaderStageFragment].bufferIndex, stages[kMVKShaderStageCompute].bufferIndex});
}
MVK_PUBLIC_SYMBOL uint32_t MVKShaderResourceBinding::getMaxTextureIndex() {
return max({vertexStage.textureIndex, fragmentStage.textureIndex, computeStage.textureIndex});
return max({stages[kMVKShaderStageVertex].textureIndex, stages[kMVKShaderStageTessCtl].textureIndex, stages[kMVKShaderStageTessEval].textureIndex, stages[kMVKShaderStageFragment].textureIndex, stages[kMVKShaderStageCompute].textureIndex});
}
MVK_PUBLIC_SYMBOL uint32_t MVKShaderResourceBinding::getMaxSamplerIndex() {
return max({vertexStage.samplerIndex, fragmentStage.samplerIndex, computeStage.samplerIndex});
return max({stages[kMVKShaderStageVertex].samplerIndex, stages[kMVKShaderStageTessCtl].samplerIndex, stages[kMVKShaderStageTessEval].samplerIndex, stages[kMVKShaderStageFragment].samplerIndex, stages[kMVKShaderStageCompute].samplerIndex});
}
MVK_PUBLIC_SYMBOL MVKShaderResourceBinding MVKShaderResourceBinding::operator+ (const MVKShaderResourceBinding& rhs) {
MVKShaderResourceBinding rslt;
rslt.vertexStage = this->vertexStage + rhs.vertexStage;
rslt.fragmentStage = this->fragmentStage + rhs.fragmentStage;
rslt.computeStage = this->computeStage + rhs.computeStage;
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
rslt.stages[i] = this->stages[i] + rhs.stages[i];
}
return rslt;
}
MVK_PUBLIC_SYMBOL MVKShaderResourceBinding& MVKShaderResourceBinding::operator+= (const MVKShaderResourceBinding& rhs) {
this->vertexStage += rhs.vertexStage;
this->fragmentStage += rhs.fragmentStage;
this->computeStage += rhs.computeStage;
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
this->stages[i] += rhs.stages[i];
}
return *this;
}
@ -102,17 +102,15 @@ void MVKDescriptorSetLayoutBinding::bind(MVKCommandEncoder* cmdEncoder,
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
bb.mtlBuffer = descBinding._mtlBuffers[rezIdx];
bb.offset = descBinding._mtlBufferOffsets[rezIdx] + bufferDynamicOffset;
if (_applyToVertexStage) {
bb.index = mtlIdxs.vertexStage.bufferIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindVertexBuffer(bb);
}
if (_applyToFragmentStage) {
bb.index = mtlIdxs.fragmentStage.bufferIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindFragmentBuffer(bb);
}
if (_applyToComputeStage) {
bb.index = mtlIdxs.computeStage.bufferIndex + rezIdx;
cmdEncoder->_computeResourcesState.bindBuffer(bb);
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
if (_applyToStage[i]) {
bb.index = mtlIdxs.stages[i].bufferIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
cmdEncoder->_computeResourcesState.bindBuffer(bb);
} else {
cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb);
}
}
}
break;
}
@ -128,34 +126,30 @@ void MVKDescriptorSetLayoutBinding::bind(MVKCommandEncoder* cmdEncoder,
} else {
tb.swizzle = 0;
}
if (_applyToVertexStage) {
tb.index = mtlIdxs.vertexStage.textureIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindVertexTexture(tb);
}
if (_applyToFragmentStage) {
tb.index = mtlIdxs.fragmentStage.textureIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindFragmentTexture(tb);
}
if (_applyToComputeStage) {
tb.index = mtlIdxs.computeStage.textureIndex + rezIdx;
cmdEncoder->_computeResourcesState.bindTexture(tb);
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
if (_applyToStage[i]) {
tb.index = mtlIdxs.stages[i].textureIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
cmdEncoder->_computeResourcesState.bindTexture(tb);
} else {
cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb);
}
}
}
break;
}
case VK_DESCRIPTOR_TYPE_SAMPLER: {
sb.mtlSamplerState = descBinding._mtlSamplers[rezIdx];
if (_applyToVertexStage) {
sb.index = mtlIdxs.vertexStage.samplerIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindVertexSamplerState(sb);
}
if (_applyToFragmentStage) {
sb.index = mtlIdxs.fragmentStage.samplerIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindFragmentSamplerState(sb);
}
if (_applyToComputeStage) {
sb.index = mtlIdxs.computeStage.samplerIndex + rezIdx;
cmdEncoder->_computeResourcesState.bindSamplerState(sb);
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
if (_applyToStage[i]) {
sb.index = mtlIdxs.stages[i].samplerIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
cmdEncoder->_computeResourcesState.bindSamplerState(sb);
} else {
cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb);
}
}
}
break;
}
@ -168,23 +162,18 @@ void MVKDescriptorSetLayoutBinding::bind(MVKCommandEncoder* cmdEncoder,
tb.swizzle = 0;
}
sb.mtlSamplerState = descBinding._mtlSamplers[rezIdx];
if (_applyToVertexStage) {
tb.index = mtlIdxs.vertexStage.textureIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindVertexTexture(tb);
sb.index = mtlIdxs.vertexStage.samplerIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindVertexSamplerState(sb);
}
if (_applyToFragmentStage) {
tb.index = mtlIdxs.fragmentStage.textureIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindFragmentTexture(tb);
sb.index = mtlIdxs.fragmentStage.samplerIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindFragmentSamplerState(sb);
}
if (_applyToComputeStage) {
tb.index = mtlIdxs.computeStage.textureIndex + rezIdx;
cmdEncoder->_computeResourcesState.bindTexture(tb);
sb.index = mtlIdxs.computeStage.samplerIndex + rezIdx;
cmdEncoder->_computeResourcesState.bindSamplerState(sb);
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
if (_applyToStage[i]) {
tb.index = mtlIdxs.stages[i].textureIndex + rezIdx;
sb.index = mtlIdxs.stages[i].samplerIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
cmdEncoder->_computeResourcesState.bindTexture(tb);
cmdEncoder->_computeResourcesState.bindSamplerState(sb);
} else {
cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb);
cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb);
}
}
}
break;
}
@ -244,17 +233,15 @@ void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
MVKBuffer* buffer = (MVKBuffer*)bufferInfo.buffer;
bb.mtlBuffer = buffer->getMTLBuffer();
bb.offset = bufferInfo.offset;
if (_applyToVertexStage) {
bb.index = mtlIdxs.vertexStage.bufferIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindVertexBuffer(bb);
}
if (_applyToFragmentStage) {
bb.index = mtlIdxs.fragmentStage.bufferIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindFragmentBuffer(bb);
}
if (_applyToComputeStage) {
bb.index = mtlIdxs.computeStage.bufferIndex + rezIdx;
cmdEncoder->_computeResourcesState.bindBuffer(bb);
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
if (_applyToStage[i]) {
bb.index = mtlIdxs.stages[i].bufferIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
cmdEncoder->_computeResourcesState.bindBuffer(bb);
} else {
cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb);
}
}
}
break;
}
@ -270,17 +257,15 @@ void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
} else {
tb.swizzle = 0;
}
if (_applyToVertexStage) {
tb.index = mtlIdxs.vertexStage.textureIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindVertexTexture(tb);
}
if (_applyToFragmentStage) {
tb.index = mtlIdxs.fragmentStage.textureIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindFragmentTexture(tb);
}
if (_applyToComputeStage) {
tb.index = mtlIdxs.computeStage.textureIndex + rezIdx;
cmdEncoder->_computeResourcesState.bindTexture(tb);
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
if (_applyToStage[i]) {
tb.index = mtlIdxs.stages[i].textureIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
cmdEncoder->_computeResourcesState.bindTexture(tb);
} else {
cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb);
}
}
}
break;
}
@ -290,17 +275,15 @@ void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
auto* bufferView = get<MVKBufferView*>(pData, stride, rezIdx - dstArrayElement);
tb.mtlTexture = bufferView->getMTLTexture();
tb.swizzle = 0;
if (_applyToVertexStage) {
tb.index = mtlIdxs.vertexStage.textureIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindVertexTexture(tb);
}
if (_applyToFragmentStage) {
tb.index = mtlIdxs.fragmentStage.textureIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindFragmentTexture(tb);
}
if (_applyToComputeStage) {
tb.index = mtlIdxs.computeStage.textureIndex + rezIdx;
cmdEncoder->_computeResourcesState.bindTexture(tb);
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
if (_applyToStage[i]) {
tb.index = mtlIdxs.stages[i].textureIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
cmdEncoder->_computeResourcesState.bindTexture(tb);
} else {
cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb);
}
}
}
break;
}
@ -312,17 +295,15 @@ void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
else
sampler = _immutableSamplers[rezIdx];
sb.mtlSamplerState = sampler->getMTLSamplerState();
if (_applyToVertexStage) {
sb.index = mtlIdxs.vertexStage.samplerIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindVertexSamplerState(sb);
}
if (_applyToFragmentStage) {
sb.index = mtlIdxs.fragmentStage.samplerIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindFragmentSamplerState(sb);
}
if (_applyToComputeStage) {
sb.index = mtlIdxs.computeStage.samplerIndex + rezIdx;
cmdEncoder->_computeResourcesState.bindSamplerState(sb);
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
if (_applyToStage[i]) {
sb.index = mtlIdxs.stages[i].samplerIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
cmdEncoder->_computeResourcesState.bindSamplerState(sb);
} else {
cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb);
}
}
}
break;
}
@ -338,23 +319,18 @@ void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
tb.swizzle = 0;
}
sb.mtlSamplerState = sampler->getMTLSamplerState();
if (_applyToVertexStage) {
tb.index = mtlIdxs.vertexStage.textureIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindVertexTexture(tb);
sb.index = mtlIdxs.vertexStage.samplerIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindVertexSamplerState(sb);
}
if (_applyToFragmentStage) {
tb.index = mtlIdxs.fragmentStage.textureIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindFragmentTexture(tb);
sb.index = mtlIdxs.fragmentStage.samplerIndex + rezIdx;
cmdEncoder->_graphicsResourcesState.bindFragmentSamplerState(sb);
}
if (_applyToComputeStage) {
tb.index = mtlIdxs.computeStage.textureIndex + rezIdx;
cmdEncoder->_computeResourcesState.bindTexture(tb);
sb.index = mtlIdxs.computeStage.samplerIndex + rezIdx;
cmdEncoder->_computeResourcesState.bindSamplerState(sb);
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
if (_applyToStage[i]) {
tb.index = mtlIdxs.stages[i].textureIndex + rezIdx;
sb.index = mtlIdxs.stages[i].samplerIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
cmdEncoder->_computeResourcesState.bindTexture(tb);
cmdEncoder->_computeResourcesState.bindSamplerState(sb);
} else {
cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb);
cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb);
}
}
}
break;
}
@ -380,56 +356,36 @@ void MVKDescriptorSetLayoutBinding::populateShaderConverterContext(SPIRVToMSLCon
// Establish the resource indices to use, by combining the offsets of the DSL and this DSL binding.
MVKShaderResourceBinding mtlIdxs = _mtlResourceIndexOffsets + dslMTLRezIdxOffsets;
if (_applyToVertexStage) {
mvkPopulateShaderConverterContext(context,
mtlIdxs.vertexStage,
spv::ExecutionModelVertex,
dslIndex,
_info.binding);
}
if (_applyToFragmentStage) {
mvkPopulateShaderConverterContext(context,
mtlIdxs.fragmentStage,
spv::ExecutionModelFragment,
dslIndex,
_info.binding);
}
if (_applyToComputeStage) {
mvkPopulateShaderConverterContext(context,
mtlIdxs.computeStage,
spv::ExecutionModelGLCompute,
dslIndex,
_info.binding);
static const spv::ExecutionModel models[] = {
spv::ExecutionModelVertex,
spv::ExecutionModelTessellationControl,
spv::ExecutionModelTessellationEvaluation,
spv::ExecutionModelFragment,
spv::ExecutionModelGLCompute
};
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
if (_applyToStage[i]) {
mvkPopulateShaderConverterContext(context,
mtlIdxs.stages[i],
models[i],
dslIndex,
_info.binding);
}
}
}
MVKDescriptorSetLayoutBinding::MVKDescriptorSetLayoutBinding(MVKDevice* device,
MVKDescriptorSetLayout* layout,
const VkDescriptorSetLayoutBinding* pBinding) : MVKBaseDeviceObject(device) {
// Determine the shader stages used by this binding
_applyToVertexStage = mvkAreFlagsEnabled(pBinding->stageFlags, VK_SHADER_STAGE_VERTEX_BIT);
_applyToFragmentStage = mvkAreFlagsEnabled(pBinding->stageFlags, VK_SHADER_STAGE_FRAGMENT_BIT);
_applyToComputeStage = mvkAreFlagsEnabled(pBinding->stageFlags, VK_SHADER_STAGE_COMPUTE_BIT);
// If this binding is used by the vertex shader, set the Metal resource index
if (_applyToVertexStage) {
initMetalResourceIndexOffsets(&_mtlResourceIndexOffsets.vertexStage,
&layout->_mtlResourceCounts.vertexStage, pBinding);
}
// If this binding is used by the fragment shader, set the Metal resource index
if (_applyToFragmentStage) {
initMetalResourceIndexOffsets(&_mtlResourceIndexOffsets.fragmentStage,
&layout->_mtlResourceCounts.fragmentStage, pBinding);
}
// If this binding is used by a compute shader, set the Metal resource index
if (_applyToComputeStage) {
initMetalResourceIndexOffsets(&_mtlResourceIndexOffsets.computeStage,
&layout->_mtlResourceCounts.computeStage, pBinding);
}
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
// Determine if this binding is used by this shader stage
_applyToStage[i] = mvkAreFlagsEnabled(pBinding->stageFlags, mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage(i)));
// If this binding is used by the shader, set the Metal resource index
if (_applyToStage[i]) {
initMetalResourceIndexOffsets(&_mtlResourceIndexOffsets.stages[i],
&layout->_mtlResourceCounts.stages[i], pBinding);
}
}
// If immutable samplers are defined, copy them in
if ( pBinding->pImmutableSamplers &&
@ -448,9 +404,10 @@ MVKDescriptorSetLayoutBinding::MVKDescriptorSetLayoutBinding(MVKDevice* device,
MVKDescriptorSetLayoutBinding::MVKDescriptorSetLayoutBinding(const MVKDescriptorSetLayoutBinding& binding) :
MVKBaseDeviceObject(binding._device), _info(binding._info), _immutableSamplers(binding._immutableSamplers),
_mtlResourceIndexOffsets(binding._mtlResourceIndexOffsets),
_applyToVertexStage(binding._applyToVertexStage), _applyToFragmentStage(binding._applyToFragmentStage),
_applyToComputeStage(binding._applyToComputeStage) {
_mtlResourceIndexOffsets(binding._mtlResourceIndexOffsets) {
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
_applyToStage[i] = binding._applyToStage[i];
}
for (MVKSampler* sampler : _immutableSamplers) {
sampler->retain();
}

View File

@ -842,6 +842,11 @@ void MVKPhysicalDevice::initFeatures() {
if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily2_v4] ) {
_features.depthClamp = true;
}
if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily3_v2] ) {
_features.tessellationShader = true;
_features.shaderTessellationAndGeometryPointSize = true;
}
if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily4_v1] ) {
_features.imageCubeArray = true;
@ -861,7 +866,9 @@ void MVKPhysicalDevice::initFeatures() {
_features.fragmentStoresAndAtomics = true;
if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_macOS_GPUFamily1_v2] ) {
_features.tessellationShader = true;
_features.dualSrcBlend = true;
_features.shaderTessellationAndGeometryPointSize = true;
}
if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_macOS_GPUFamily1_v3] ) {
@ -880,7 +887,7 @@ void MVKPhysicalDevice::initFeatures() {
// VkBool32 imageCubeArray; // done
// VkBool32 independentBlend; // done
// VkBool32 geometryShader;
// VkBool32 tessellationShader;
// VkBool32 tessellationShader; // done
// VkBool32 sampleRateShading; // done
// VkBool32 dualSrcBlend; // done
// VkBool32 logicOp;
@ -902,7 +909,7 @@ void MVKPhysicalDevice::initFeatures() {
// VkBool32 pipelineStatisticsQuery;
// VkBool32 vertexPipelineStoresAndAtomics; // done
// VkBool32 fragmentStoresAndAtomics; // done
// VkBool32 shaderTessellationAndGeometryPointSize;
// VkBool32 shaderTessellationAndGeometryPointSize; // done
// VkBool32 shaderImageGatherExtended; // done
// VkBool32 shaderStorageImageExtendedFormats; // done
// VkBool32 shaderStorageImageMultisample;
@ -955,16 +962,7 @@ void MVKPhysicalDevice::initProperties() {
#endif
_properties.limits.maxFragmentOutputAttachments = _properties.limits.maxColorAttachments;
#if MVK_IOS
if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily1_v4] ) {
_properties.limits.maxFragmentDualSrcAttachments = 1;
} else {
_properties.limits.maxFragmentDualSrcAttachments = 0;
}
#endif
#if MVK_MACOS
_properties.limits.maxFragmentDualSrcAttachments = 1;
#endif
_properties.limits.maxFragmentDualSrcAttachments = _features.dualSrcBlend ? 1 : 0;
_properties.limits.framebufferColorSampleCounts = _metalFeatures.supportedSampleCounts;
_properties.limits.framebufferDepthSampleCounts = _metalFeatures.supportedSampleCounts;
@ -976,39 +974,24 @@ void MVKPhysicalDevice::initProperties() {
_properties.limits.sampledImageStencilSampleCounts = _metalFeatures.supportedSampleCounts;
_properties.limits.storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT;
_properties.limits.maxSampleMaskWords = 1;
_properties.limits.maxImageDimension1D = _metalFeatures.maxTextureDimension;
_properties.limits.maxImageDimension2D = _metalFeatures.maxTextureDimension;
_properties.limits.maxImageDimensionCube = _metalFeatures.maxTextureDimension;
_properties.limits.maxFramebufferWidth = _metalFeatures.maxTextureDimension;
_properties.limits.maxFramebufferHeight = _metalFeatures.maxTextureDimension;
if ( _metalFeatures.layeredRendering ) {
_properties.limits.maxFramebufferLayers = 256;
} else {
_properties.limits.maxFramebufferLayers = 1;
}
_properties.limits.maxFramebufferLayers = _metalFeatures.layeredRendering ? 256 : 1;
_properties.limits.maxViewportDimensions[0] = _metalFeatures.maxTextureDimension;
_properties.limits.maxViewportDimensions[1] = _metalFeatures.maxTextureDimension;
float maxVPDim = max(_properties.limits.maxViewportDimensions[0], _properties.limits.maxViewportDimensions[1]);
_properties.limits.viewportBoundsRange[0] = (-2.0 * maxVPDim);
_properties.limits.viewportBoundsRange[1] = (2.0 * maxVPDim) - 1;
_properties.limits.maxViewports = _features.multiViewport ? 16 : 1;
_properties.limits.maxImageDimension3D = (2 * KIBI);
_properties.limits.maxImageArrayLayers = (2 * KIBI);
#if MVK_MACOS
if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_macOS_GPUFamily1_v3] ) {
_properties.limits.maxViewports = 16;
} else {
_properties.limits.maxViewports = 1;
}
#endif
#if MVK_IOS
if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily5_v1] ) {
_properties.limits.maxViewports = 16;
} else {
_properties.limits.maxViewports = 1;
}
#endif
_properties.limits.maxSamplerAnisotropy = 16;
_properties.limits.maxVertexInputAttributes = 31;
@ -1027,14 +1010,14 @@ void MVKPhysicalDevice::initProperties() {
_properties.limits.maxPerStageResources = (_metalFeatures.maxPerStageBufferCount + _metalFeatures.maxPerStageTextureCount);
_properties.limits.maxFragmentCombinedOutputResources = _properties.limits.maxPerStageResources;
_properties.limits.maxDescriptorSetSamplers = (_properties.limits.maxPerStageDescriptorSamplers * 2);
_properties.limits.maxDescriptorSetUniformBuffers = (_properties.limits.maxPerStageDescriptorUniformBuffers * 2);
_properties.limits.maxDescriptorSetUniformBuffersDynamic = (_properties.limits.maxPerStageDescriptorUniformBuffers * 2);
_properties.limits.maxDescriptorSetStorageBuffers = (_properties.limits.maxPerStageDescriptorStorageBuffers * 2);
_properties.limits.maxDescriptorSetStorageBuffersDynamic = (_properties.limits.maxPerStageDescriptorStorageBuffers * 2);
_properties.limits.maxDescriptorSetSampledImages = (_properties.limits.maxPerStageDescriptorSampledImages * 2);
_properties.limits.maxDescriptorSetStorageImages = (_properties.limits.maxPerStageDescriptorStorageImages * 2);
_properties.limits.maxDescriptorSetInputAttachments = (_properties.limits.maxPerStageDescriptorInputAttachments * 2);
_properties.limits.maxDescriptorSetSamplers = (_properties.limits.maxPerStageDescriptorSamplers * 4);
_properties.limits.maxDescriptorSetUniformBuffers = (_properties.limits.maxPerStageDescriptorUniformBuffers * 4);
_properties.limits.maxDescriptorSetUniformBuffersDynamic = (_properties.limits.maxPerStageDescriptorUniformBuffers * 4);
_properties.limits.maxDescriptorSetStorageBuffers = (_properties.limits.maxPerStageDescriptorStorageBuffers * 4);
_properties.limits.maxDescriptorSetStorageBuffersDynamic = (_properties.limits.maxPerStageDescriptorStorageBuffers * 4);
_properties.limits.maxDescriptorSetSampledImages = (_properties.limits.maxPerStageDescriptorSampledImages * 4);
_properties.limits.maxDescriptorSetStorageImages = (_properties.limits.maxPerStageDescriptorStorageImages * 4);
_properties.limits.maxDescriptorSetInputAttachments = (_properties.limits.maxPerStageDescriptorInputAttachments * 4);
_properties.limits.maxTexelBufferElements = _properties.limits.maxImageDimension2D * _properties.limits.maxImageDimension2D;
_properties.limits.maxUniformBufferRange = (uint32_t)_metalFeatures.maxMTLBufferSize;
@ -1042,9 +1025,8 @@ void MVKPhysicalDevice::initProperties() {
_properties.limits.maxPushConstantsSize = (4 * KIBI);
_properties.limits.minMemoryMapAlignment = _metalFeatures.mtlBufferAlignment;
_properties.limits.minTexelBufferOffsetAlignment = _metalFeatures.mtlBufferAlignment;
_properties.limits.minUniformBufferOffsetAlignment = _metalFeatures.mtlBufferAlignment;
_properties.limits.minStorageBufferOffsetAlignment = _metalFeatures.mtlBufferAlignment;
_properties.limits.minStorageBufferOffsetAlignment = 4;
_properties.limits.bufferImageGranularity = _metalFeatures.mtlBufferAlignment;
_properties.limits.nonCoherentAtomSize = _metalFeatures.mtlBufferAlignment;
@ -1052,18 +1034,57 @@ void MVKPhysicalDevice::initProperties() {
_properties.limits.maxFragmentInputComponents = 60;
if ([_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily3_v1]) {
_properties.limits.minTexelBufferOffsetAlignment = 16;
_properties.limits.optimalBufferCopyOffsetAlignment = 16;
} else {
_properties.limits.minTexelBufferOffsetAlignment = 64;
_properties.limits.optimalBufferCopyOffsetAlignment = 64;
}
if ([_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily5_v1]) {
_properties.limits.maxTessellationGenerationLevel = 64;
_properties.limits.maxTessellationPatchSize = 32;
} else if ([_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily3_v2]) {
_properties.limits.maxTessellationGenerationLevel = 16;
_properties.limits.maxTessellationPatchSize = 32;
} else {
_properties.limits.maxTessellationGenerationLevel = 0;
_properties.limits.maxTessellationPatchSize = 0;
}
#endif
#if MVK_MACOS
_properties.limits.maxFragmentInputComponents = 128;
_properties.limits.minTexelBufferOffsetAlignment = 256;
_properties.limits.optimalBufferCopyOffsetAlignment = 256;
if ([_mtlDevice supportsFeatureSet: MTLFeatureSet_macOS_GPUFamily1_v2]) {
_properties.limits.maxTessellationGenerationLevel = 64;
_properties.limits.maxTessellationPatchSize = 32;
} else {
_properties.limits.maxTessellationGenerationLevel = 0;
_properties.limits.maxTessellationPatchSize = 0;
}
#endif
_properties.limits.maxVertexOutputComponents = _properties.limits.maxFragmentInputComponents;
if (_features.tessellationShader) {
_properties.limits.maxTessellationControlPerVertexInputComponents = _properties.limits.maxVertexOutputComponents;
_properties.limits.maxTessellationControlPerVertexOutputComponents = _properties.limits.maxTessellationControlPerVertexInputComponents;
// Reserve a few for the tessellation levels.
_properties.limits.maxTessellationControlPerPatchOutputComponents = _properties.limits.maxFragmentInputComponents - 8;
_properties.limits.maxTessellationControlTotalOutputComponents = _properties.limits.maxTessellationPatchSize * _properties.limits.maxTessellationControlPerVertexOutputComponents + _properties.limits.maxTessellationControlPerPatchOutputComponents;
_properties.limits.maxTessellationEvaluationInputComponents = _properties.limits.maxTessellationControlPerVertexInputComponents;
_properties.limits.maxTessellationEvaluationOutputComponents = _properties.limits.maxTessellationEvaluationInputComponents;
} else {
_properties.limits.maxTessellationControlPerVertexInputComponents = 0;
_properties.limits.maxTessellationControlPerVertexOutputComponents = 0;
_properties.limits.maxTessellationControlPerPatchOutputComponents = 0;
_properties.limits.maxTessellationControlTotalOutputComponents = 0;
_properties.limits.maxTessellationEvaluationInputComponents = 0;
_properties.limits.maxTessellationEvaluationOutputComponents = 0;
}
_properties.limits.optimalBufferCopyRowPitchAlignment = 1;
_properties.limits.timestampComputeAndGraphics = VK_TRUE;
@ -1074,7 +1095,7 @@ void MVKPhysicalDevice::initProperties() {
_properties.limits.pointSizeGranularity = 1;
_properties.limits.lineWidthRange[0] = 1;
_properties.limits.lineWidthRange[1] = 1;
_properties.limits.pointSizeGranularity = 1;
_properties.limits.lineWidthGranularity = 1;
_properties.limits.standardSampleLocations = VK_TRUE;
_properties.limits.strictLines = VK_FALSE;
@ -1102,6 +1123,11 @@ void MVKPhysicalDevice::initProperties() {
#endif
}
_properties.limits.minTexelOffset = -8;
_properties.limits.maxTexelOffset = 7;
_properties.limits.minTexelGatherOffset = _properties.limits.minTexelOffset;
_properties.limits.maxTexelGatherOffset = _properties.limits.maxTexelOffset;
// Features with no specific limits - default to unlimited int values
_properties.limits.maxMemoryAllocationCount = kMVKUndefinedLargeUInt32;
@ -1115,11 +1141,6 @@ void MVKPhysicalDevice::initProperties() {
_properties.limits.maxDrawIndexedIndexValue = numeric_limits<uint32_t>::max();
_properties.limits.maxDrawIndirectCount = kMVKUndefinedLargeUInt32;
_properties.limits.minTexelOffset = -8;
_properties.limits.maxTexelOffset = 7;
_properties.limits.minTexelGatherOffset = _properties.limits.minTexelOffset;
_properties.limits.maxTexelGatherOffset = _properties.limits.maxTexelOffset;
_properties.limits.maxClipDistances = kMVKUndefinedLargeUInt32;
_properties.limits.maxCullDistances = 0; // unsupported
_properties.limits.maxCombinedClipAndCullDistances = _properties.limits.maxClipDistances +
@ -1135,33 +1156,22 @@ void MVKPhysicalDevice::initProperties() {
_properties.limits.maxSamplerLodBias = 2;
_properties.limits.maxSampleMaskWords = 1;
_properties.limits.discreteQueuePriorities = 2;
_properties.limits.minInterpolationOffset = -0.5;
_properties.limits.maxInterpolationOffset = 0.5;
_properties.limits.subPixelInterpolationOffsetBits = 4;
// Unsupported features - set to zeros generally
_properties.limits.sparseAddressSpaceSize = 0;
_properties.limits.maxTessellationGenerationLevel = 0;
_properties.limits.maxTessellationPatchSize = 0;
_properties.limits.maxTessellationControlPerVertexInputComponents = 0;
_properties.limits.maxTessellationControlPerVertexOutputComponents = 0;
_properties.limits.maxTessellationControlPerPatchOutputComponents = 0;
_properties.limits.maxTessellationControlTotalOutputComponents = 0;
_properties.limits.maxTessellationEvaluationInputComponents = 0;
_properties.limits.maxTessellationEvaluationOutputComponents = 0;
_properties.limits.maxGeometryShaderInvocations = 0;
_properties.limits.maxGeometryInputComponents = 0;
_properties.limits.maxGeometryOutputComponents = 0;
_properties.limits.maxGeometryOutputVertices = 0;
_properties.limits.maxGeometryTotalOutputComponents = 0;
_properties.limits.minInterpolationOffset = 0.0;
_properties.limits.maxInterpolationOffset = 0.0;
_properties.limits.subPixelInterpolationOffsetBits = 0;
}

View File

@ -23,6 +23,7 @@
#include "MVKShaderModule.h"
#include "MVKSync.h"
#include "MVKVector.h"
#include <MoltenVKSPIRVToMSLConverter/SPIRVReflection.h>
#include <MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h>
#include <unordered_set>
#include <ostream>
@ -36,10 +37,8 @@ class MVKPipelineCache;
#pragma mark -
#pragma mark MVKPipelineLayout
struct MVKShaderAuxBufferBinding {
uint32_t vertex = 0;
uint32_t fragment = 0;
uint32_t compute = 0;
struct MVKShaderImplicitRezBinding {
uint32_t stages[kMVKShaderStageMax];
};
/** Represents a Vulkan pipeline layout. */
@ -68,7 +67,19 @@ public:
const void* pData);
/** Returns the current auxiliary buffer bindings. */
const MVKShaderAuxBufferBinding& getAuxBufferIndex() { return _auxBufferIndex; }
const MVKShaderImplicitRezBinding& getAuxBufferIndex() { return _auxBufferIndex; }
/** Returns the current indirect parameter buffer bindings. */
const MVKShaderImplicitRezBinding& getIndirectParamsIndex() { return _indirectParamsIndex; }
/** Returns the current captured output buffer bindings. */
const MVKShaderImplicitRezBinding& getOutputBufferIndex() { return _outputBufferIndex; }
/** Returns the current captured per-patch output buffer binding for the tess. control shader. */
uint32_t getTessCtlPatchOutputBufferIndex() { return _tessCtlPatchOutputBufferIndex; }
/** Returns the current tessellation level buffer binding for the tess. control shader. */
uint32_t getTessCtlLevelBufferIndex() { return _tessCtlLevelBufferIndex; }
/** Returns the number of textures in this layout. This is used to calculate the size of the auxiliary buffer. */
uint32_t getTextureCount() { return _pushConstantsMTLResourceIndexes.getMaxTextureIndex(); }
@ -81,23 +92,39 @@ protected:
MVKVectorInline<MVKShaderResourceBinding, 8> _dslMTLResourceIndexOffsets;
MVKVectorInline<VkPushConstantRange, 8> _pushConstants;
MVKShaderResourceBinding _pushConstantsMTLResourceIndexes;
MVKShaderAuxBufferBinding _auxBufferIndex;
MVKShaderImplicitRezBinding _auxBufferIndex;
MVKShaderImplicitRezBinding _indirectParamsIndex;
MVKShaderImplicitRezBinding _outputBufferIndex;
uint32_t _tessCtlPatchOutputBufferIndex = 0;
uint32_t _tessCtlLevelBufferIndex = 0;
};
#pragma mark -
#pragma mark MVKPipeline
static const uint32_t kMVKTessCtlInputBufferIndex = 30;
static const uint32_t kMVKTessCtlIndexBufferIndex = 29;
static const uint32_t kMVKTessCtlNumReservedBuffers = 2;
static const uint32_t kMVKTessEvalInputBufferIndex = 30;
static const uint32_t kMVKTessEvalPatchInputBufferIndex = 29;
static const uint32_t kMVKTessEvalLevelBufferIndex = 28;
static const uint32_t kMVKTessEvalNumReservedBuffers = 3;
/** Represents an abstract Vulkan pipeline. */
class MVKPipeline : public MVKBaseDeviceObject {
public:
/** Returns the order of stages in this pipeline. Draws and dispatches must encode this pipeline once per stage. */
virtual void getStages(MVKVector<uint32_t>& stages) = 0;
/** Binds this pipeline to the specified command encoder. */
virtual void encode(MVKCommandEncoder* cmdEncoder) = 0;
virtual void encode(MVKCommandEncoder* cmdEncoder, uint32_t stage = 0) = 0;
/** Returns the current auxiliary buffer bindings. */
const MVKShaderAuxBufferBinding& getAuxBufferIndex() { return _auxBufferIndex; }
const MVKShaderImplicitRezBinding& getAuxBufferIndex() { return _auxBufferIndex; }
/** Returns whether or not full image view swizzling is enabled for this pipeline. */
bool fullImageViewSwizzle() const { return _fullImageViewSwizzle; }
@ -109,7 +136,7 @@ public:
protected:
MVKPipelineCache* _pipelineCache;
MVKShaderAuxBufferBinding _auxBufferIndex;
MVKShaderImplicitRezBinding _auxBufferIndex;
bool _fullImageViewSwizzle;
};
@ -123,12 +150,45 @@ class MVKGraphicsPipeline : public MVKPipeline {
public:
/** Returns the number and order of stages in this pipeline. Draws and dispatches must encode this pipeline once per stage. */
void getStages(MVKVector<uint32_t>& stages) override;
/** Binds this pipeline to the specified command encoder. */
void encode(MVKCommandEncoder* cmdEncoder) override;
void encode(MVKCommandEncoder* cmdEncoder, uint32_t stage = 0) override;
/** Returns whether this pipeline permits dynamic setting of the specifie state. */
bool supportsDynamicState(VkDynamicState state);
/** Returns whether this pipeline has tessellation shaders. */
bool isTessellationPipeline() { return _tessInfo.patchControlPoints > 0; }
/** Returns the number of input tessellation patch control points. */
uint32_t getInputControlPointCount() { return _tessInfo.patchControlPoints; }
/** Returns the number of output tessellation patch control points. */
uint32_t getOutputControlPointCount() { return _outputControlPointCount; }
/** Returns the current indirect parameter buffer bindings. */
const MVKShaderImplicitRezBinding& getIndirectParamsIndex() { return _indirectParamsIndex; }
/** Returns the current captured output buffer bindings. */
const MVKShaderImplicitRezBinding& getOutputBufferIndex() { return _outputBufferIndex; }
/** Returns the current captured per-patch output buffer binding for the tess. control shader. */
uint32_t getTessCtlPatchOutputBufferIndex() { return _tessCtlPatchOutputBufferIndex; }
/** Returns the current tessellation level buffer binding for the tess. control shader. */
uint32_t getTessCtlLevelBufferIndex() { return _tessCtlLevelBufferIndex; }
/** Returns true if the vertex shader needs a buffer to store its output. */
bool needsVertexOutputBuffer() { return _needsVertexOutputBuffer; }
/** Returns true if the tessellation control shader needs a buffer to store its per-vertex output. */
bool needsTessCtlOutputBuffer() { return _needsTessCtlOutputBuffer; }
/** Returns true if the tessellation control shader needs a buffer to store its per-patch output. */
bool needsTessCtlPatchOutputBuffer() { return _needsTessCtlPatchOutputBuffer; }
/** Constructs an instance for the device and parent (which may be NULL). */
MVKGraphicsPipeline(MVKDevice* device,
MVKPipelineCache* pipelineCache,
@ -138,30 +198,70 @@ public:
~MVKGraphicsPipeline() override;
protected:
void initMTLRenderPipelineState(const VkGraphicsPipelineCreateInfo* pCreateInfo);
id<MTLRenderPipelineState> getOrCompilePipeline(MTLRenderPipelineDescriptor* plDesc, id<MTLRenderPipelineState>& plState);
id<MTLComputePipelineState> getOrCompilePipeline(MTLComputePipelineDescriptor* plDesc, id<MTLComputePipelineState>& plState);
void initMTLRenderPipelineState(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
void initMVKShaderConverterContext(SPIRVToMSLConverterContext& _shaderContext,
const VkGraphicsPipelineCreateInfo* pCreateInfo);
MTLRenderPipelineDescriptor* getMTLRenderPipelineDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo);
bool isRenderingPoints(const VkGraphicsPipelineCreateInfo* pCreateInfo);
const VkGraphicsPipelineCreateInfo* pCreateInfo,
const SPIRVTessReflectionData& reflectData);
void addVertexInputToShaderConverterContext(SPIRVToMSLConverterContext& shaderContext,
const VkGraphicsPipelineCreateInfo* pCreateInfo);
void addPrevStageOutputToShaderConverterContext(SPIRVToMSLConverterContext& shaderContext,
std::vector<SPIRVShaderOutput>& outputs);
MTLRenderPipelineDescriptor* getMTLRenderPipelineDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
MTLRenderPipelineDescriptor* getMTLTessVertexStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData, SPIRVToMSLConverterContext& shaderContext);
MTLComputePipelineDescriptor* getMTLTessControlStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData, SPIRVToMSLConverterContext& shaderContext);
MTLRenderPipelineDescriptor* getMTLTessRasterStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData, SPIRVToMSLConverterContext& shaderContext);
bool addVertexShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConverterContext& shaderContext);
bool addTessCtlShaderToPipeline(MTLComputePipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConverterContext& shaderContext, std::vector<SPIRVShaderOutput>& prevOutput);
bool addTessEvalShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConverterContext& shaderContext, std::vector<SPIRVShaderOutput>& prevOutput);
bool addFragmentShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConverterContext& shaderContext);
bool addVertexInputToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkPipelineVertexInputStateCreateInfo* pVI, const SPIRVToMSLConverterContext& shaderContext);
void addTessellationToPipeline(MTLRenderPipelineDescriptor* plDesc, const SPIRVTessReflectionData& reflectData, const VkPipelineTessellationStateCreateInfo* pTS);
void addFragmentOutputToPipeline(MTLRenderPipelineDescriptor* plDesc, const SPIRVTessReflectionData& reflectData, const VkGraphicsPipelineCreateInfo* pCreateInfo);
bool isRenderingPoints(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
const VkPipelineShaderStageCreateInfo* _pVertexSS = nullptr;
const VkPipelineShaderStageCreateInfo* _pTessCtlSS = nullptr;
const VkPipelineShaderStageCreateInfo* _pTessEvalSS = nullptr;
const VkPipelineShaderStageCreateInfo* _pFragmentSS = nullptr;
VkPipelineTessellationStateCreateInfo _tessInfo;
VkPipelineRasterizationStateCreateInfo _rasterInfo;
VkPipelineDepthStencilStateCreateInfo _depthStencilInfo;
MVKVectorInline<MTLViewport, kMVKCachedViewportCount> _mtlViewports;
MVKVectorInline<MTLScissorRect, kMVKCachedScissorCount> _mtlScissors;
MVKVectorInline<MTLViewport, kMVKCachedViewportCount> _mtlViewports;
MVKVectorInline<MTLScissorRect, kMVKCachedScissorCount> _mtlScissors;
id<MTLRenderPipelineState> _mtlPipelineState;
MTLComputePipelineDescriptor* _mtlTessControlStageDesc = nil;
id<MTLRenderPipelineState> _mtlTessVertexStageState = nil;
id<MTLComputePipelineState> _mtlTessControlStageState = nil;
id<MTLComputePipelineState> _mtlTessControlStageIndex16State = nil;
id<MTLComputePipelineState> _mtlTessControlStageIndex32State = nil;
id<MTLRenderPipelineState> _mtlPipelineState = nil;
MTLCullMode _mtlCullMode;
MTLWinding _mtlFrontWinding;
MTLTriangleFillMode _mtlFillMode;
MTLDepthClipMode _mtlDepthClipMode;
MTLPrimitiveType _mtlPrimitiveType;
MTLPrimitiveType _mtlPrimitiveType;
float _blendConstants[4] = { 0.0, 0.0, 0.0, 1.0 };
uint32_t _outputControlPointCount;
MVKShaderImplicitRezBinding _indirectParamsIndex;
MVKShaderImplicitRezBinding _outputBufferIndex;
uint32_t _tessCtlPatchOutputBufferIndex = 0;
uint32_t _tessCtlLevelBufferIndex = 0;
bool _dynamicStateEnabled[VK_DYNAMIC_STATE_RANGE_SIZE];
bool _hasDepthStencilInfo;
bool _needsVertexAuxBuffer = false;
bool _needsVertexOutputBuffer = false;
bool _needsTessCtlAuxBuffer = false;
bool _needsTessCtlOutputBuffer = false;
bool _needsTessCtlPatchOutputBuffer = false;
bool _needsTessCtlInput = false;
bool _needsTessEvalAuxBuffer = false;
bool _needsFragmentAuxBuffer = false;
};
@ -174,8 +274,11 @@ class MVKComputePipeline : public MVKPipeline {
public:
/** Returns the number and order of stages in this pipeline. Draws and dispatches must encode this pipeline once per stage. */
void getStages(MVKVector<uint32_t>& stages) override;
/** Binds this pipeline to the specified command encoder. */
void encode(MVKCommandEncoder* cmdEncoder) override;
void encode(MVKCommandEncoder* cmdEncoder, uint32_t = 0) override;
/** Constructs an instance for the device and parent (which may be NULL). */
MVKComputePipeline(MVKDevice* device,
@ -291,6 +394,14 @@ public:
*/
id<MTLComputePipelineState> newMTLComputePipelineState(id<MTLFunction> mtlFunction);
/**
* Returns a new (retained) MTLComputePipelineState object compiled from the MTLComputePipelineDescriptor.
*
* If the Metal pipeline compiler does not return within MVKConfiguration::metalCompileTimeout
* nanoseconds, an error will be generated and logged, and nil will be returned.
*/
id<MTLComputePipelineState> newMTLComputePipelineState(MTLComputePipelineDescriptor* plDesc);
#pragma mark Construction

File diff suppressed because it is too large Load Diff

View File

@ -151,6 +151,9 @@ public:
/** Convert the SPIR-V to MSL, using the specified shader conversion context. */
bool convert(SPIRVToMSLConverterContext* pContext);
/** Returns the original SPIR-V code that was specified when this object was created. */
inline const std::vector<uint32_t>& getSPIRV() { return _converter.getSPIRV(); }
/**
* Returns the Metal Shading Language source code as converted by the most recent
* call to convert() function, or set directly using the setMSL() function.

View File

@ -98,9 +98,17 @@ typedef enum {
kMVKCommandUseClearColorImage, /**< vkCmdClearColorImage. */
kMVKCommandUseClearDepthStencilImage, /**< vkCmdClearDepthStencilImage. */
kMVKCommandUseResetQueryPool, /**< vkCmdResetQueryPool. */
kMVKCommandUseDispatch, /**< vkCmdDispatch. */
kMVKCommandUseDispatch, /**< vkCmdDispatch. */
kMVKCommandUseTessellationControl /**< vkCmdDraw* - tessellation control stage. */
} MVKCommandUse;
/** Represents a given stage of a graphics pipeline. */
enum MVKGraphicsStage {
kMVKGraphicsStageVertex = 0, /**< The vertex shader stage. */
kMVKGraphicsStageTessControl, /**< The tessellation control shader stage. */
kMVKGraphicsStageRasterization /**< The rest of the pipeline. */
};
/** Returns the name of the result value. */
const char* mvkVkResultName(VkResult vkResult);

View File

@ -21,6 +21,7 @@
#include "MVKFoundation.h"
#include "MVKOSExtensions.h"
#include "MVKLogging.h"
#include <MoltenVKSPIRVToMSLConverter/SPIRVReflection.h>
#include <unordered_map>
#include <string>
#include <limits>
@ -1059,6 +1060,7 @@ MVK_PUBLIC_SYMBOL MTLPrimitiveType mvkMTLPrimitiveTypeFromVkPrimitiveTopology(Vk
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
return MTLPrimitiveTypeTriangle;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
@ -1066,7 +1068,6 @@ MVK_PUBLIC_SYMBOL MTLPrimitiveType mvkMTLPrimitiveTypeFromVkPrimitiveTopology(Vk
return MTLPrimitiveTypeTriangleStrip;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
default:
mvkNotifyErrorWithText(VK_ERROR_FORMAT_NOT_SUPPORTED, "VkPrimitiveTopology value %d is not supported for rendering.", vkTopology);
return MTLPrimitiveTypePoint;
@ -1089,9 +1090,9 @@ MVK_PUBLIC_SYMBOL MTLPrimitiveTopologyClass mvkMTLPrimitiveTopologyClassFromVkPr
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
return MTLPrimitiveTopologyClassTriangle;
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
default:
mvkNotifyErrorWithText(VK_ERROR_FORMAT_NOT_SUPPORTED, "VkPrimitiveTopology value %d is not supported for render pipelines.", vkTopology);
return MTLPrimitiveTopologyClassUnspecified;
@ -1216,6 +1217,56 @@ MVK_PUBLIC_SYMBOL size_t mvkMTLIndexTypeSizeInBytes(MTLIndexType mtlIdxType) {
}
}
MVK_PUBLIC_SYMBOL MVKShaderStage mvkShaderStageFromVkShaderStageFlagBits(VkShaderStageFlagBits vkStage) {
switch (vkStage) {
case VK_SHADER_STAGE_VERTEX_BIT: return kMVKShaderStageVertex;
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return kMVKShaderStageTessCtl;
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return kMVKShaderStageTessEval;
/* FIXME: VK_SHADER_STAGE_GEOMETRY_BIT */
case VK_SHADER_STAGE_FRAGMENT_BIT: return kMVKShaderStageFragment;
case VK_SHADER_STAGE_COMPUTE_BIT: return kMVKShaderStageCompute;
default:
mvkNotifyErrorWithText(VK_ERROR_FORMAT_NOT_SUPPORTED, "VkShaderStage %x is not supported.", vkStage);
return kMVKShaderStageMax;
}
}
MVK_PUBLIC_SYMBOL VkShaderStageFlagBits mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage mvkStage) {
switch (mvkStage) {
case kMVKShaderStageVertex: return VK_SHADER_STAGE_VERTEX_BIT;
case kMVKShaderStageTessCtl: return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
case kMVKShaderStageTessEval: return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
/* FIXME: kMVKShaderStageGeometry */
case kMVKShaderStageFragment: return VK_SHADER_STAGE_FRAGMENT_BIT;
case kMVKShaderStageCompute: return VK_SHADER_STAGE_COMPUTE_BIT;
case kMVKShaderStageMax:
assert(!"This function should never be called with kMVKShaderStageMax!");
return VK_SHADER_STAGE_ALL;
}
}
MVK_PUBLIC_SYMBOL MTLWinding mvkMTLWindingFromSpvExecutionMode(uint32_t spvMode) {
switch (spvMode) {
// These are reversed due to the vertex flip.
case spv::ExecutionModeVertexOrderCw: return MTLWindingCounterClockwise;
case spv::ExecutionModeVertexOrderCcw: return MTLWindingClockwise;
default:
mvkNotifyErrorWithText(VK_ERROR_FORMAT_NOT_SUPPORTED, "spv::ExecutionMode %u is not a winding order mode.\n", spvMode);
return MTLWindingCounterClockwise;
}
}
MVK_PUBLIC_SYMBOL MTLTessellationPartitionMode mvkMTLTessellationPartitionModeFromSpvExecutionMode(uint32_t spvMode) {
switch (spvMode) {
case spv::ExecutionModeSpacingEqual: return MTLTessellationPartitionModeInteger;
case spv::ExecutionModeSpacingFractionalEven: return MTLTessellationPartitionModeFractionalEven;
case spv::ExecutionModeSpacingFractionalOdd: return MTLTessellationPartitionModeFractionalOdd;
default:
mvkNotifyErrorWithText(VK_ERROR_FORMAT_NOT_SUPPORTED, "spv::ExecutionMode %u is not a tessellation partition mode.\n", spvMode);
return MTLTessellationPartitionModePow2;
}
}
#pragma mark -
#pragma mark Memory options

View File

@ -0,0 +1,208 @@
/*
* SPIRVReflection.cpp
*
* Copyright (c) 2019 Chip Davis for Codeweavers
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "SPIRVReflection.h"
#include "../SPIRV-Cross/spirv_parser.hpp"
#include "../SPIRV-Cross/spirv_reflect.hpp"
namespace mvk {
static const char missingPatchInputErr[] = "Neither tessellation shader specifies a patch input mode (Triangles, Quads, or Isolines).";
static const char missingWindingErr[] = "Neither tessellation shader specifies a winding order mode (VertexOrderCw or VertexOrderCcw).";
static const char missingPartitionErr[] = "Neither tessellation shader specifies a partition mode (SpacingEqual, SpacingFractionalOdd, or SpacingFractionalEven).";
static const char missingOutputVerticesErr[] = "Neither tessellation shader specifies the number of output control points.";
/** Given a tessellation control shader and a tessellation evaluation shader, both in SPIR-V format, returns tessellation reflection data. */
bool getTessReflectionData(const std::vector<uint32_t>& tesc, const std::string& tescEntryName, const std::vector<uint32_t>& tese, const std::string& teseEntryName, SPIRVTessReflectionData& reflectData, std::string& errorLog) {
#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS
try {
#endif
spirv_cross::CompilerReflection tescReflect(tesc);
spirv_cross::CompilerReflection teseReflect(tese);
if (!tescEntryName.empty()) {
tescReflect.set_entry_point(tescEntryName, spv::ExecutionModelTessellationControl);
}
if (!teseEntryName.empty()) {
teseReflect.set_entry_point(teseEntryName, spv::ExecutionModelTessellationEvaluation);
}
tescReflect.compile();
teseReflect.compile();
const spirv_cross::Bitset& tescModes = tescReflect.get_execution_mode_bitset();
const spirv_cross::Bitset& teseModes = teseReflect.get_execution_mode_bitset();
// Extract the parameters from the shaders.
if (tescModes.get(spv::ExecutionModeTriangles)) {
reflectData.patchKind = spv::ExecutionModeTriangles;
} else if (tescModes.get(spv::ExecutionModeQuads)) {
reflectData.patchKind = spv::ExecutionModeQuads;
} else if (tescModes.get(spv::ExecutionModeIsolines)) {
reflectData.patchKind = spv::ExecutionModeIsolines;
} else if (teseModes.get(spv::ExecutionModeTriangles)) {
reflectData.patchKind = spv::ExecutionModeTriangles;
} else if (teseModes.get(spv::ExecutionModeQuads)) {
reflectData.patchKind = spv::ExecutionModeQuads;
} else if (teseModes.get(spv::ExecutionModeIsolines)) {
reflectData.patchKind = spv::ExecutionModeIsolines;
} else {
errorLog = missingPatchInputErr;
return false;
}
if (tescModes.get(spv::ExecutionModeVertexOrderCw)) {
reflectData.windingOrder = spv::ExecutionModeVertexOrderCw;
} else if (tescModes.get(spv::ExecutionModeVertexOrderCcw)) {
reflectData.windingOrder = spv::ExecutionModeVertexOrderCcw;
} else if (teseModes.get(spv::ExecutionModeVertexOrderCw)) {
reflectData.windingOrder = spv::ExecutionModeVertexOrderCw;
} else if (teseModes.get(spv::ExecutionModeVertexOrderCcw)) {
reflectData.windingOrder = spv::ExecutionModeVertexOrderCcw;
} else {
errorLog = missingWindingErr;
return false;
}
reflectData.pointMode = tescModes.get(spv::ExecutionModePointMode) || teseModes.get(spv::ExecutionModePointMode);
if (tescModes.get(spv::ExecutionModeSpacingEqual)) {
reflectData.partitionMode = spv::ExecutionModeSpacingEqual;
} else if (tescModes.get(spv::ExecutionModeSpacingFractionalEven)) {
reflectData.partitionMode = spv::ExecutionModeSpacingFractionalEven;
} else if (tescModes.get(spv::ExecutionModeSpacingFractionalOdd)) {
reflectData.partitionMode = spv::ExecutionModeSpacingFractionalOdd;
} else if (teseModes.get(spv::ExecutionModeSpacingEqual)) {
reflectData.partitionMode = spv::ExecutionModeSpacingEqual;
} else if (teseModes.get(spv::ExecutionModeSpacingFractionalEven)) {
reflectData.partitionMode = spv::ExecutionModeSpacingFractionalEven;
} else if (teseModes.get(spv::ExecutionModeSpacingFractionalOdd)) {
reflectData.partitionMode = spv::ExecutionModeSpacingFractionalOdd;
} else {
errorLog = missingPartitionErr;
return false;
}
if (tescModes.get(spv::ExecutionModeOutputVertices)) {
reflectData.numControlPoints = tescReflect.get_execution_mode_argument(spv::ExecutionModeOutputVertices);
} else if (teseModes.get(spv::ExecutionModeOutputVertices)) {
reflectData.numControlPoints = teseReflect.get_execution_mode_argument(spv::ExecutionModeOutputVertices);
} else {
errorLog = missingOutputVerticesErr;
return false;
}
return true;
#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS
} catch (spirv_cross::CompilerError& ex) {
errorLog = ex.what();
return false;
}
#endif
}
/** Given a shader in SPIR-V format, returns output reflection data. */
bool getShaderOutputs(const std::vector<uint32_t>& spirv, spv::ExecutionModel model, const std::string& entryName, std::vector<SPIRVShaderOutput>& outputs, std::string& errorLog) {
#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS
try {
#endif
spirv_cross::Parser parser(spirv);
parser.parse();
spirv_cross::CompilerReflection reflect(parser.get_parsed_ir());
if (!entryName.empty()) {
reflect.set_entry_point(entryName, model);
}
reflect.compile();
outputs.clear();
auto addSat = [](uint32_t a, uint32_t b) { return a == uint32_t(-1) ? a : a + b; };
parser.get_parsed_ir().for_each_typed_id<spirv_cross::SPIRVariable>([&reflect, &outputs, model, addSat](uint32_t varID, const spirv_cross::SPIRVariable& var) {
if (var.storage != spv::StorageClassOutput) { return; }
const auto* type = &reflect.get_type(reflect.get_type_from_variable(varID).parent_type);
bool patch = reflect.has_decoration(varID, spv::DecorationPatch);
auto biType = spv::BuiltInMax;
if (reflect.has_decoration(varID, spv::DecorationBuiltIn)) {
biType = (spv::BuiltIn)reflect.get_decoration(varID, spv::DecorationBuiltIn);
}
uint32_t loc = -1;
if (reflect.has_decoration(varID, spv::DecorationLocation)) {
loc = reflect.get_decoration(varID, spv::DecorationLocation);
}
if (model == spv::ExecutionModelTessellationControl && !patch)
type = &reflect.get_type(type->parent_type);
if (type->basetype == spirv_cross::SPIRType::Struct) {
for (uint32_t i = 0; i < type->member_types.size(); i++) {
// Each member may have a location decoration. If not, each member
// gets an incrementing location.
uint32_t memberLoc = addSat(loc, i);
if (reflect.has_member_decoration(type->self, i, spv::DecorationLocation)) {
memberLoc = reflect.get_member_decoration(type->self, i, spv::DecorationLocation);
}
patch = reflect.has_member_decoration(type->self, i, spv::DecorationPatch);
if (reflect.has_member_decoration(type->self, i, spv::DecorationBuiltIn)) {
biType = (spv::BuiltIn)reflect.get_member_decoration(type->self, i, spv::DecorationBuiltIn);
}
const spirv_cross::SPIRType& memberType = reflect.get_type(type->member_types[i]);
if (memberType.columns > 1) {
for (uint32_t i = 0; i < memberType.columns; i++) {
outputs.push_back({memberType.basetype, memberType.vecsize, addSat(memberLoc, i), patch, biType});
}
} else if (!memberType.array.empty()) {
for (uint32_t i = 0; i < memberType.array[0]; i++) {
outputs.push_back({memberType.basetype, memberType.vecsize, addSat(memberLoc, i), patch, biType});
}
} else {
outputs.push_back({memberType.basetype, memberType.vecsize, memberLoc, patch, biType});
}
}
} else if (type->columns > 1) {
for (uint32_t i = 0; i < type->columns; i++) {
outputs.push_back({type->basetype, type->vecsize, addSat(loc, i), patch, biType});
}
} else if (!type->array.empty()) {
for (uint32_t i = 0; i < type->array[0]; i++) {
outputs.push_back({type->basetype, type->vecsize, addSat(loc, i), patch, biType});
}
} else {
outputs.push_back({type->basetype, type->vecsize, loc, patch, biType});
}
});
// Sort outputs by ascending location.
std::stable_sort(outputs.begin(), outputs.end(), [](const SPIRVShaderOutput& a, const SPIRVShaderOutput& b) {
return a.location < b.location;
});
// Assign locations to outputs that don't have one.
uint32_t loc = -1;
for (SPIRVShaderOutput& out : outputs) {
if (out.location == uint32_t(-1)) { out.location = loc + 1; }
loc = out.location;
}
return true;
#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS
} catch (spirv_cross::CompilerError& ex) {
errorLog = ex.what();
return false;
}
#endif
}
}

View File

@ -0,0 +1,81 @@
/*
* SPIRVReflection.h
*
* Copyright (c) 2019 Chip Davis for Codeweavers
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SPIRVReflection_h_
#define __SPIRVReflection_h_ 1
#include "../SPIRV-Cross/spirv.hpp"
#include "../SPIRV-Cross/spirv_common.hpp"
#include <string>
#include <vector>
namespace mvk {
#pragma mark -
#pragma mark SPIRVTessReflectionData
/** Reflection data for a pair of tessellation shaders. This contains the information needed to construct a tessellation pipeline. */
struct SPIRVTessReflectionData {
/** The partition mode, one of SpacingEqual, SpacingFractionalEven, or SpacingFractionalOdd. */
spv::ExecutionMode partitionMode = spv::ExecutionModeMax;
/** The winding order of generated triangles, one of VertexOrderCw or VertexOrderCcw. */
spv::ExecutionMode windingOrder = spv::ExecutionModeMax;
/** Whether or not tessellation should produce points instead of lines or triangles. */
bool pointMode = false;
/** The kind of patch expected as input, one of Triangles, Quads, or Isolines. */
spv::ExecutionMode patchKind = spv::ExecutionModeMax;
/** The number of control points output by the tessellation control shader. */
uint32_t numControlPoints = 0;
};
#pragma mark -
#pragma mark SPIRVShaderOutputData
/** Reflection data on a single output of a shader. This contains the information needed to construct a stage-input descriptor for the next stage of a pipeline. */
struct SPIRVShaderOutput {
/** The type of the output. */
spirv_cross::SPIRType::BaseType baseType;
/** The vector size, if a vector. */
uint32_t vecWidth;
/** The location number of the output. */
uint32_t location;
/** Whether this is a per-patch or per-vertex output. Only meaningful for tessellation control shaders. */
bool perPatch;
/** If this is a builtin, the kind of builtin this is. */
spv::BuiltIn builtin;
};
#pragma mark -
#pragma mark Functions
/** Given a tessellation control shader and a tessellation evaluation shader, both in SPIR-V format, returns tessellation reflection data. */
bool getTessReflectionData(const std::vector<uint32_t>& tesc, const std::string& tescEntryName, const std::vector<uint32_t>& tese, const std::string& teseEntryName, SPIRVTessReflectionData& reflectData, std::string& errorLog);
/** Given a shader in SPIR-V format, returns output reflection data. */
bool getShaderOutputs(const std::vector<uint32_t>& spirv, spv::ExecutionModel model, const std::string& entryName, std::vector<SPIRVShaderOutput>& outputs, std::string& errorLog);
}
#endif

View File

@ -48,9 +48,18 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverterOptions::matches(const SPIRVToMSLConve
if (mslVersion != other.mslVersion) { return false; }
if (texelBufferTextureWidth != other.texelBufferTextureWidth) { return false; }
if (auxBufferIndex != other.auxBufferIndex) { return false; }
if (indirectParamsBufferIndex != other.indirectParamsBufferIndex) { return false; }
if (outputBufferIndex != other.outputBufferIndex) { return false; }
if (patchOutputBufferIndex != other.patchOutputBufferIndex) { return false; }
if (tessLevelBufferIndex != other.tessLevelBufferIndex) { return false; }
if (inputThreadgroupMemIndex != other.inputThreadgroupMemIndex) { return false; }
if (!!shouldFlipVertexY != !!other.shouldFlipVertexY) { return false; }
if (!!isRenderingPoints != !!other.isRenderingPoints) { return false; }
if (!!shouldSwizzleTextureSamples != !!other.shouldSwizzleTextureSamples) { return false; }
if (!!shouldCaptureOutput != !!other.shouldCaptureOutput) { return false; }
if (!!tessDomainOriginInLowerLeft != !!other.tessDomainOriginInLowerLeft) { return false; }
if (tessPatchKind != other.tessPatchKind) { return false; }
if (numTessControlPoints != other.numTessControlPoints) { return false; }
if (entryPointName != other.entryPointName) { return false; }
return true;
}
@ -80,6 +89,7 @@ MVK_PUBLIC_SYMBOL bool MSLVertexAttribute::matches(const MSLVertexAttribute& oth
if (mslOffset != other.mslOffset) { return false; }
if (mslStride != other.mslStride) { return false; }
if (format != other.format) { return false; }
if (builtin != other.builtin) { return false; }
if (!!isPerInstance != !!other.isPerInstance) { return false; }
return true;
}
@ -140,8 +150,13 @@ MVK_PUBLIC_SYMBOL void SPIRVToMSLConverterContext::alignWith(const SPIRVToMSLCon
options.isRasterizationDisabled = srcContext.options.isRasterizationDisabled;
options.needsAuxBuffer = srcContext.options.needsAuxBuffer;
options.needsOutputBuffer = srcContext.options.needsOutputBuffer;
options.needsPatchOutputBuffer = srcContext.options.needsPatchOutputBuffer;
options.needsInputThreadgroupMem = srcContext.options.needsInputThreadgroupMem;
if (options.entryPointStage == spv::ExecutionModelVertex) {
if (options.entryPointStage == spv::ExecutionModelVertex ||
options.entryPointStage == spv::ExecutionModelTessellationControl ||
options.entryPointStage == spv::ExecutionModelTessellationEvaluation) {
for (auto& va : vertexAttributes) {
va.isUsedByShader = false;
for (auto& srcVA : srcContext.vertexAttributes) {
@ -207,6 +222,7 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConverterContext&
va.format = spirv_cross::MSL_VERTEX_FORMAT_UINT16;
break;
}
va.builtin = ctxVA.builtin;
va.used_by_shader = ctxVA.isUsedByShader;
vtxAttrs.push_back(va);
}
@ -236,6 +252,17 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConverterContext&
pMSLCompiler->set_entry_point(context.options.entryPointName, context.options.entryPointStage);
}
// Set up tessellation parameters if needed.
if (context.options.entryPointStage == spv::ExecutionModelTessellationControl ||
context.options.entryPointStage == spv::ExecutionModelTessellationEvaluation) {
if (context.options.tessPatchKind != spv::ExecutionModeMax) {
pMSLCompiler->set_execution_mode(context.options.tessPatchKind);
}
if (context.options.numTessControlPoints != 0) {
pMSLCompiler->set_execution_mode(spv::ExecutionModeOutputVertices, context.options.numTessControlPoints);
}
}
// Establish the MSL options for the compiler
// This needs to be done in two steps...for CompilerMSL and its superclass.
auto mslOpts = pMSLCompiler->get_msl_options();
@ -250,9 +277,16 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConverterContext&
mslOpts.msl_version = context.options.mslVersion;
mslOpts.texel_buffer_texture_width = context.options.texelBufferTextureWidth;
mslOpts.aux_buffer_index = context.options.auxBufferIndex;
mslOpts.indirect_params_buffer_index = context.options.indirectParamsBufferIndex;
mslOpts.shader_output_buffer_index = context.options.outputBufferIndex;
mslOpts.shader_patch_output_buffer_index = context.options.patchOutputBufferIndex;
mslOpts.shader_tess_factor_buffer_index = context.options.tessLevelBufferIndex;
mslOpts.shader_input_wg_index = context.options.inputThreadgroupMemIndex;
mslOpts.enable_point_size_builtin = context.options.isRenderingPoints;
mslOpts.disable_rasterization = context.options.isRasterizationDisabled;
mslOpts.swizzle_texture_samples = context.options.shouldSwizzleTextureSamples;
mslOpts.capture_output_to_buffer = context.options.shouldCaptureOutput;
mslOpts.tess_domain_origin_lower_left = context.options.tessDomainOriginInLowerLeft;
mslOpts.pad_fragment_output_components = true;
pMSLCompiler->set_msl_options(mslOpts);
@ -280,6 +314,9 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConverterContext&
populateEntryPoint(_entryPoint, pMSLCompiler, context.options);
context.options.isRasterizationDisabled = pMSLCompiler && pMSLCompiler->get_is_rasterization_disabled();
context.options.needsAuxBuffer = pMSLCompiler && pMSLCompiler->needs_aux_buffer();
context.options.needsOutputBuffer = pMSLCompiler && pMSLCompiler->needs_output_buffer();
context.options.needsPatchOutputBuffer = pMSLCompiler && pMSLCompiler->needs_patch_output_buffer();
context.options.needsInputThreadgroupMem = pMSLCompiler && pMSLCompiler->needs_input_threadgroup_mem();
delete pMSLCompiler;
// Copy whether the vertex attributes and resource bindings are used by the shader

View File

@ -34,16 +34,28 @@ namespace mvk {
typedef struct SPIRVToMSLConverterOptions {
std::string entryPointName;
spv::ExecutionModel entryPointStage = spv::ExecutionModelMax;
spv::ExecutionMode tessPatchKind = spv::ExecutionModeMax;
uint32_t mslVersion = makeMSLVersion(2);
uint32_t texelBufferTextureWidth = 4096;
uint32_t auxBufferIndex = 0;
uint32_t indirectParamsBufferIndex = 0;
uint32_t outputBufferIndex = 0;
uint32_t patchOutputBufferIndex = 0;
uint32_t tessLevelBufferIndex = 0;
uint32_t inputThreadgroupMemIndex = 0;
uint32_t numTessControlPoints = 0;
bool shouldFlipVertexY = true;
bool isRenderingPoints = false;
bool shouldSwizzleTextureSamples = false;
bool shouldCaptureOutput = false;
bool tessDomainOriginInLowerLeft = false;
bool isRasterizationDisabled = false;
bool needsAuxBuffer = false;
bool needsOutputBuffer = false;
bool needsPatchOutputBuffer = false;
bool needsInputThreadgroupMem = false;
/**
* Returns whether the specified options match this one.
@ -93,6 +105,7 @@ namespace mvk {
uint32_t mslOffset = 0;
uint32_t mslStride = 0;
MSLVertexFormat format = MSLVertexFormat::Other;
spv::BuiltIn builtin = spv::BuiltInMax;
bool isPerInstance = false;
bool isUsedByShader = false;

View File

@ -7,6 +7,10 @@
objects = {
/* Begin PBXBuildFile section */
450A4F5F220CB180007203D7 /* SPIRVReflection.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 450A4F5D220CB180007203D7 /* SPIRVReflection.cpp */; };
450A4F60220CB180007203D7 /* SPIRVReflection.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 450A4F5D220CB180007203D7 /* SPIRVReflection.cpp */; };
450A4F61220CB180007203D7 /* SPIRVReflection.h in Headers */ = {isa = PBXBuildFile; fileRef = 450A4F5E220CB180007203D7 /* SPIRVReflection.h */; };
450A4F62220CB180007203D7 /* SPIRVReflection.h in Headers */ = {isa = PBXBuildFile; fileRef = 450A4F5E220CB180007203D7 /* SPIRVReflection.h */; };
A909408A1C58013E0094110D /* SPIRVToMSLConverter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A9093F5A1C58013E0094110D /* SPIRVToMSLConverter.cpp */; };
A909408B1C58013E0094110D /* SPIRVToMSLConverter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A9093F5A1C58013E0094110D /* SPIRVToMSLConverter.cpp */; };
A909408C1C58013E0094110D /* SPIRVToMSLConverter.h in Headers */ = {isa = PBXBuildFile; fileRef = A9093F5B1C58013E0094110D /* SPIRVToMSLConverter.h */; };
@ -80,6 +84,8 @@
/* End PBXContainerItemProxy section */
/* Begin PBXFileReference section */
450A4F5D220CB180007203D7 /* SPIRVReflection.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SPIRVReflection.cpp; sourceTree = "<group>"; };
450A4F5E220CB180007203D7 /* SPIRVReflection.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SPIRVReflection.h; sourceTree = "<group>"; };
A9093F5A1C58013E0094110D /* SPIRVToMSLConverter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SPIRVToMSLConverter.cpp; sourceTree = "<group>"; };
A9093F5B1C58013E0094110D /* SPIRVToMSLConverter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SPIRVToMSLConverter.h; sourceTree = "<group>"; };
A90940A31C5808BB0094110D /* GLSLToSPIRVConverter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = GLSLToSPIRVConverter.cpp; sourceTree = "<group>"; };
@ -166,6 +172,8 @@
A9093C561C58013D0094110D /* MoltenVKSPIRVToMSLConverter */ = {
isa = PBXGroup;
children = (
450A4F5D220CB180007203D7 /* SPIRVReflection.cpp */,
450A4F5E220CB180007203D7 /* SPIRVReflection.h */,
A925B70B1C7754B2006E7ECD /* FileSupport.h */,
A925B70A1C7754B2006E7ECD /* FileSupport.mm */,
A928C9171D0488DC00071B88 /* SPIRVConversion.h */,
@ -284,6 +292,7 @@
isa = PBXHeadersBuildPhase;
buildActionMask = 2147483647;
files = (
450A4F61220CB180007203D7 /* SPIRVReflection.h in Headers */,
A98149681FB6A98A005F00B4 /* MVKStrings.h in Headers */,
A9C70F61221B321700FBA31A /* SPIRVSupport.h in Headers */,
A928C9191D0488DC00071B88 /* SPIRVConversion.h in Headers */,
@ -297,6 +306,7 @@
isa = PBXHeadersBuildPhase;
buildActionMask = 2147483647;
files = (
450A4F62220CB180007203D7 /* SPIRVReflection.h in Headers */,
A98149691FB6A98A005F00B4 /* MVKStrings.h in Headers */,
A9C70F62221B321700FBA31A /* SPIRVSupport.h in Headers */,
A928C91A1D0488DC00071B88 /* SPIRVConversion.h in Headers */,
@ -641,6 +651,7 @@
A909408A1C58013E0094110D /* SPIRVToMSLConverter.cpp in Sources */,
A9C70F66221B321700FBA31A /* SPIRVSupport.cpp in Sources */,
A928C91B1D0488DC00071B88 /* SPIRVConversion.mm in Sources */,
450A4F5F220CB180007203D7 /* SPIRVReflection.cpp in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@ -652,6 +663,7 @@
A909408B1C58013E0094110D /* SPIRVToMSLConverter.cpp in Sources */,
A9C70F67221B321700FBA31A /* SPIRVSupport.cpp in Sources */,
A928C91C1D0488DC00071B88 /* SPIRVConversion.mm in Sources */,
450A4F60220CB180007203D7 /* SPIRVReflection.cpp in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};