Ensure shaders using PhysicalStorageBufferAddresses encode the associated MTLBuffer.
- MVKDevice track VkBuffers marked with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT. - Add SPIRVToMSLConversionResultInfo::usesPhysicalStorageBufferAddressesCapability to detect and track shaders that use PhysicalStorageBufferAddresses capability, and track such shader stages within pipeline. - MVKResourcesCommandEncoderState encode usage of VkBuffers marked with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT when pipeline uses PhysicalStorageBufferAddresses capability. - Rename MVKResourcesCommandEncoderState::encodeArgumentBufferResourceUsage() to encodeResourceUsage(). - MVKDevice move some functions to public scope and remove friend classes. - MVKDeviceMemory ensure _vkMemAllocFlags is always initialized (unrelated). - Rename MVKFoundation template method contains() to mvkContains() (unrelated).
This commit is contained in:
parent
f99ea669ac
commit
13e8103651
@ -22,6 +22,7 @@ Released TBD
|
|||||||
- `VK_KHR_map_memory2`
|
- `VK_KHR_map_memory2`
|
||||||
- Support BC compression on iOS/tvOS where available (iOS/tvOS 16.4 and above and supported by the GPU).
|
- Support BC compression on iOS/tvOS where available (iOS/tvOS 16.4 and above and supported by the GPU).
|
||||||
- Fix memory leak when waiting on timeline semaphores.
|
- Fix memory leak when waiting on timeline semaphores.
|
||||||
|
- Ensure shaders that use `PhysicalStorageBufferAddresses` encode the use of the associated `MTLBuffer`.
|
||||||
- Add `MVK_ENABLE_EXPLICIT_LOD_WORKAROUND` environment variable to selectively
|
- Add `MVK_ENABLE_EXPLICIT_LOD_WORKAROUND` environment variable to selectively
|
||||||
disable recent fixes to handling LOD for arrayed depth images in shaders,
|
disable recent fixes to handling LOD for arrayed depth images in shaders,
|
||||||
on Apple Silicon, when those fixes cause regression in rendering behavior.
|
on Apple Silicon, when those fixes cause regression in rendering behavior.
|
||||||
|
@ -355,11 +355,11 @@ public:
|
|||||||
MVKArrayRef<uint32_t> dynamicOffsets,
|
MVKArrayRef<uint32_t> dynamicOffsets,
|
||||||
uint32_t& dynamicOffsetIndex);
|
uint32_t& dynamicOffsetIndex);
|
||||||
|
|
||||||
/** Encodes the Metal resource to the Metal command encoder. */
|
/** Encodes the indirect use of the Metal resource to the Metal command encoder. */
|
||||||
virtual void encodeArgumentBufferResourceUsage(MVKShaderStage stage,
|
virtual void encodeResourceUsage(MVKShaderStage stage,
|
||||||
id<MTLResource> mtlResource,
|
id<MTLResource> mtlResource,
|
||||||
MTLResourceUsage mtlUsage,
|
MTLResourceUsage mtlUsage,
|
||||||
MTLRenderStages mtlStages) = 0;
|
MTLRenderStages mtlStages) = 0;
|
||||||
|
|
||||||
void markDirty() override;
|
void markDirty() override;
|
||||||
|
|
||||||
@ -548,10 +548,10 @@ public:
|
|||||||
std::function<void(MVKCommandEncoder*, MVKMTLTextureBinding&)> bindTexture,
|
std::function<void(MVKCommandEncoder*, MVKMTLTextureBinding&)> bindTexture,
|
||||||
std::function<void(MVKCommandEncoder*, MVKMTLSamplerStateBinding&)> bindSampler);
|
std::function<void(MVKCommandEncoder*, MVKMTLSamplerStateBinding&)> bindSampler);
|
||||||
|
|
||||||
void encodeArgumentBufferResourceUsage(MVKShaderStage stage,
|
void encodeResourceUsage(MVKShaderStage stage,
|
||||||
id<MTLResource> mtlResource,
|
id<MTLResource> mtlResource,
|
||||||
MTLResourceUsage mtlUsage,
|
MTLResourceUsage mtlUsage,
|
||||||
MTLRenderStages mtlStages) override;
|
MTLRenderStages mtlStages) override;
|
||||||
|
|
||||||
/** Offset all buffers for vertex attribute bindings with zero divisors by the given number of strides. */
|
/** Offset all buffers for vertex attribute bindings with zero divisors by the given number of strides. */
|
||||||
void offsetZeroDivisorVertexBuffers(MVKGraphicsStage stage, MVKGraphicsPipeline* pipeline, uint32_t firstInstance);
|
void offsetZeroDivisorVertexBuffers(MVKGraphicsStage stage, MVKGraphicsPipeline* pipeline, uint32_t firstInstance);
|
||||||
@ -609,10 +609,10 @@ public:
|
|||||||
/** Sets the current dynamic offset buffer state. */
|
/** Sets the current dynamic offset buffer state. */
|
||||||
void bindDynamicOffsetBuffer(const MVKShaderImplicitRezBinding& binding, bool needDynamicOffsetBuffer);
|
void bindDynamicOffsetBuffer(const MVKShaderImplicitRezBinding& binding, bool needDynamicOffsetBuffer);
|
||||||
|
|
||||||
void encodeArgumentBufferResourceUsage(MVKShaderStage stage,
|
void encodeResourceUsage(MVKShaderStage stage,
|
||||||
id<MTLResource> mtlResource,
|
id<MTLResource> mtlResource,
|
||||||
MTLResourceUsage mtlUsage,
|
MTLResourceUsage mtlUsage,
|
||||||
MTLRenderStages mtlStages) override;
|
MTLRenderStages mtlStages) override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Marks the buffer binding using the index as having been overridden,
|
* Marks the buffer binding using the index as having been overridden,
|
||||||
|
@ -693,6 +693,11 @@ void MVKGraphicsResourcesCommandEncoderState::encodeBindings(MVKShaderStage stag
|
|||||||
|
|
||||||
encodeMetalArgumentBuffer(stage);
|
encodeMetalArgumentBuffer(stage);
|
||||||
|
|
||||||
|
MVKPipeline* pipeline = getPipeline();
|
||||||
|
if (pipeline && pipeline->usesPhysicalStorageBufferAddressesCapability(stage)) {
|
||||||
|
getDevice()->encodeGPUAddressableBuffers(this, stage);
|
||||||
|
}
|
||||||
|
|
||||||
auto& shaderStage = _shaderStageResourceBindings[stage];
|
auto& shaderStage = _shaderStageResourceBindings[stage];
|
||||||
|
|
||||||
if (shaderStage.swizzleBufferBinding.isDirty) {
|
if (shaderStage.swizzleBufferBinding.isDirty) {
|
||||||
@ -963,10 +968,10 @@ void MVKGraphicsResourcesCommandEncoderState::bindMetalArgumentBuffer(MVKShaderS
|
|||||||
bindBuffer(stage, buffBind);
|
bindBuffer(stage, buffBind);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MVKGraphicsResourcesCommandEncoderState::encodeArgumentBufferResourceUsage(MVKShaderStage stage,
|
void MVKGraphicsResourcesCommandEncoderState::encodeResourceUsage(MVKShaderStage stage,
|
||||||
id<MTLResource> mtlResource,
|
id<MTLResource> mtlResource,
|
||||||
MTLResourceUsage mtlUsage,
|
MTLResourceUsage mtlUsage,
|
||||||
MTLRenderStages mtlStages) {
|
MTLRenderStages mtlStages) {
|
||||||
if (mtlResource && mtlStages) {
|
if (mtlResource && mtlStages) {
|
||||||
if (stage == kMVKShaderStageTessCtl) {
|
if (stage == kMVKShaderStageTessCtl) {
|
||||||
auto* mtlCompEnc = _cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationVertexTessCtl);
|
auto* mtlCompEnc = _cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationVertexTessCtl);
|
||||||
@ -1039,8 +1044,10 @@ void MVKComputeResourcesCommandEncoderState::encodeImpl(uint32_t) {
|
|||||||
|
|
||||||
encodeMetalArgumentBuffer(kMVKShaderStageCompute);
|
encodeMetalArgumentBuffer(kMVKShaderStageCompute);
|
||||||
|
|
||||||
MVKPipeline* pipeline = getPipeline();
|
MVKPipeline* pipeline = getPipeline();
|
||||||
bool fullImageViewSwizzle = pipeline ? pipeline->fullImageViewSwizzle() : false;
|
if (pipeline && pipeline->usesPhysicalStorageBufferAddressesCapability(kMVKShaderStageCompute)) {
|
||||||
|
getDevice()->encodeGPUAddressableBuffers(this, kMVKShaderStageCompute);
|
||||||
|
}
|
||||||
|
|
||||||
if (_resourceBindings.swizzleBufferBinding.isDirty) {
|
if (_resourceBindings.swizzleBufferBinding.isDirty) {
|
||||||
for (auto& b : _resourceBindings.textureBindings) {
|
for (auto& b : _resourceBindings.textureBindings) {
|
||||||
@ -1053,6 +1060,7 @@ void MVKComputeResourcesCommandEncoderState::encodeImpl(uint32_t) {
|
|||||||
_resourceBindings.swizzleBufferBinding.index);
|
_resourceBindings.swizzleBufferBinding.index);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
bool fullImageViewSwizzle = pipeline ? pipeline->fullImageViewSwizzle() : false;
|
||||||
assertMissingSwizzles(_resourceBindings.needsSwizzle && !fullImageViewSwizzle, "compute", _resourceBindings.textureBindings.contents());
|
assertMissingSwizzles(_resourceBindings.needsSwizzle && !fullImageViewSwizzle, "compute", _resourceBindings.textureBindings.contents());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1116,10 +1124,10 @@ void MVKComputeResourcesCommandEncoderState::bindMetalArgumentBuffer(MVKShaderSt
|
|||||||
bindBuffer(buffBind);
|
bindBuffer(buffBind);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MVKComputeResourcesCommandEncoderState::encodeArgumentBufferResourceUsage(MVKShaderStage stage,
|
void MVKComputeResourcesCommandEncoderState::encodeResourceUsage(MVKShaderStage stage,
|
||||||
id<MTLResource> mtlResource,
|
id<MTLResource> mtlResource,
|
||||||
MTLResourceUsage mtlUsage,
|
MTLResourceUsage mtlUsage,
|
||||||
MTLRenderStages mtlStages) {
|
MTLRenderStages mtlStages) {
|
||||||
if (mtlResource) {
|
if (mtlResource) {
|
||||||
auto* mtlCompEnc = _cmdEncoder->getMTLComputeEncoder(kMVKCommandUseDispatch);
|
auto* mtlCompEnc = _cmdEncoder->getMTLComputeEncoder(kMVKCommandUseDispatch);
|
||||||
[mtlCompEnc useResource: mtlResource usage: mtlUsage];
|
[mtlCompEnc useResource: mtlResource usage: mtlUsage];
|
||||||
|
@ -786,10 +786,8 @@ void MVKBufferDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCommandEncoder
|
|||||||
atIndex: argIdx];
|
atIndex: argIdx];
|
||||||
}
|
}
|
||||||
if (encodeUsage) {
|
if (encodeUsage) {
|
||||||
rezEncState->encodeArgumentBufferResourceUsage(stage,
|
id<MTLBuffer> mtlBuffer = _mvkBuffer ? _mvkBuffer->getMTLBuffer() : nil;
|
||||||
_mvkBuffer ? _mvkBuffer->getMTLBuffer() : nil,
|
rezEncState->encodeResourceUsage(stage, mtlBuffer, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
|
||||||
getMTLResourceUsage(),
|
|
||||||
mvkDSLBind->getMTLRenderStages());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -876,10 +874,8 @@ void MVKInlineUniformBlockDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCo
|
|||||||
atIndex: argIdx];
|
atIndex: argIdx];
|
||||||
}
|
}
|
||||||
if (encodeUsage) {
|
if (encodeUsage) {
|
||||||
rezEncState->encodeArgumentBufferResourceUsage(stage,
|
id<MTLBuffer> mtlBuffer = _mvkMTLBufferAllocation ? _mvkMTLBufferAllocation->_mtlBuffer : nil;
|
||||||
_mvkMTLBufferAllocation ? _mvkMTLBufferAllocation->_mtlBuffer : nil,
|
rezEncState->encodeResourceUsage(stage, mtlBuffer, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
|
||||||
getMTLResourceUsage(),
|
|
||||||
mvkDSLBind->getMTLRenderStages());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -994,7 +990,7 @@ void MVKImageDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCommandEncoderS
|
|||||||
[mtlArgEncoder setTexture: mtlTexture atIndex: argIdx];
|
[mtlArgEncoder setTexture: mtlTexture atIndex: argIdx];
|
||||||
}
|
}
|
||||||
if (encodeUsage) {
|
if (encodeUsage) {
|
||||||
rezEncState->encodeArgumentBufferResourceUsage(stage, mtlTexture, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
|
rezEncState->encodeResourceUsage(stage, mtlTexture, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
|
||||||
}
|
}
|
||||||
if (descType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
|
if (descType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
|
||||||
id<MTLTexture> mtlTex = mtlTexture.parentTexture ? mtlTexture.parentTexture : mtlTexture;
|
id<MTLTexture> mtlTex = mtlTexture.parentTexture ? mtlTexture.parentTexture : mtlTexture;
|
||||||
@ -1005,7 +1001,7 @@ void MVKImageDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCommandEncoderS
|
|||||||
[mtlArgEncoder setBuffer: mtlBuff offset: mtlTex.bufferOffset atIndex: argIdx];
|
[mtlArgEncoder setBuffer: mtlBuff offset: mtlTex.bufferOffset atIndex: argIdx];
|
||||||
}
|
}
|
||||||
if (encodeUsage) {
|
if (encodeUsage) {
|
||||||
rezEncState->encodeArgumentBufferResourceUsage(stage, mtlBuff, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
|
rezEncState->encodeResourceUsage(stage, mtlBuff, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1294,7 +1290,7 @@ void MVKTexelBufferDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCommandEn
|
|||||||
[mtlArgEncoder setTexture: mtlTexture atIndex: argIdx];
|
[mtlArgEncoder setTexture: mtlTexture atIndex: argIdx];
|
||||||
}
|
}
|
||||||
if (encodeUsage) {
|
if (encodeUsage) {
|
||||||
rezEncState->encodeArgumentBufferResourceUsage(stage, mtlTexture, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
|
rezEncState->encodeResourceUsage(stage, mtlTexture, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (descType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
|
if (descType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
|
||||||
@ -1305,7 +1301,7 @@ void MVKTexelBufferDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCommandEn
|
|||||||
[mtlArgEncoder setBuffer: mtlBuff offset: mtlTexture.bufferOffset atIndex: argIdx];
|
[mtlArgEncoder setBuffer: mtlBuff offset: mtlTexture.bufferOffset atIndex: argIdx];
|
||||||
}
|
}
|
||||||
if (encodeUsage) {
|
if (encodeUsage) {
|
||||||
rezEncState->encodeArgumentBufferResourceUsage(stage, mtlBuff, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
|
rezEncState->encodeResourceUsage(stage, mtlBuff, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -63,6 +63,7 @@ class MVKSamplerYcbcrConversion;
|
|||||||
class MVKDescriptorSetLayout;
|
class MVKDescriptorSetLayout;
|
||||||
class MVKDescriptorPool;
|
class MVKDescriptorPool;
|
||||||
class MVKDescriptorUpdateTemplate;
|
class MVKDescriptorUpdateTemplate;
|
||||||
|
class MVKResourcesCommandEncoderState;
|
||||||
class MVKFramebuffer;
|
class MVKFramebuffer;
|
||||||
class MVKRenderPass;
|
class MVKRenderPass;
|
||||||
class MVKCommandPool;
|
class MVKCommandPool;
|
||||||
@ -660,6 +661,22 @@ public:
|
|||||||
|
|
||||||
#pragma mark Operations
|
#pragma mark Operations
|
||||||
|
|
||||||
|
/** Tell the GPU to be ready to use any of the GPU-addressable buffers. */
|
||||||
|
void encodeGPUAddressableBuffers(MVKResourcesCommandEncoderState* rezEncState,
|
||||||
|
MVKShaderStage stage);
|
||||||
|
|
||||||
|
/** Adds the specified host semaphore to be woken upon device loss. */
|
||||||
|
void addSemaphore(MVKSemaphoreImpl* sem4);
|
||||||
|
|
||||||
|
/** Removes the specified host semaphore. */
|
||||||
|
void removeSemaphore(MVKSemaphoreImpl* sem4);
|
||||||
|
|
||||||
|
/** Adds the specified timeline semaphore to be woken at the specified value upon device loss. */
|
||||||
|
void addTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value);
|
||||||
|
|
||||||
|
/** Removes the specified timeline semaphore. */
|
||||||
|
void removeTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value);
|
||||||
|
|
||||||
/** Applies the specified global memory barrier to all resource issued by this device. */
|
/** Applies the specified global memory barrier to all resource issued by this device. */
|
||||||
void applyMemoryBarrier(VkPipelineStageFlags srcStageMask,
|
void applyMemoryBarrier(VkPipelineStageFlags srcStageMask,
|
||||||
VkPipelineStageFlags dstStageMask,
|
VkPipelineStageFlags dstStageMask,
|
||||||
@ -855,19 +872,11 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
friend class MVKSemaphoreEmulated;
|
|
||||||
friend class MVKTimelineSemaphoreMTLEvent;
|
|
||||||
friend class MVKTimelineSemaphoreEmulated;
|
|
||||||
friend class MVKFence;
|
|
||||||
friend class MVKEventEmulated;
|
|
||||||
|
|
||||||
void propagateDebugName() override {}
|
void propagateDebugName() override {}
|
||||||
MVKResource* addResource(MVKResource* rez);
|
MVKBuffer* addBuffer(MVKBuffer* mvkBuff);
|
||||||
MVKResource* removeResource(MVKResource* rez);
|
MVKBuffer* removeBuffer(MVKBuffer* mvkBuff);
|
||||||
void addSemaphore(MVKSemaphoreImpl* sem4);
|
MVKImage* addImage(MVKImage* mvkImg);
|
||||||
void removeSemaphore(MVKSemaphoreImpl* sem4);
|
MVKImage* removeImage(MVKImage* mvkImg);
|
||||||
void addTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value);
|
|
||||||
void removeTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value);
|
|
||||||
void initPerformanceTracking();
|
void initPerformanceTracking();
|
||||||
void initPhysicalDevice(MVKPhysicalDevice* physicalDevice, const VkDeviceCreateInfo* pCreateInfo);
|
void initPhysicalDevice(MVKPhysicalDevice* physicalDevice, const VkDeviceCreateInfo* pCreateInfo);
|
||||||
void initQueues(const VkDeviceCreateInfo* pCreateInfo);
|
void initQueues(const VkDeviceCreateInfo* pCreateInfo);
|
||||||
@ -887,6 +896,7 @@ protected:
|
|||||||
MVKCommandResourceFactory* _commandResourceFactory = nullptr;
|
MVKCommandResourceFactory* _commandResourceFactory = nullptr;
|
||||||
MVKSmallVector<MVKSmallVector<MVKQueue*, kMVKQueueCountPerQueueFamily>, kMVKQueueFamilyCount> _queuesByQueueFamilyIndex;
|
MVKSmallVector<MVKSmallVector<MVKQueue*, kMVKQueueCountPerQueueFamily>, kMVKQueueFamilyCount> _queuesByQueueFamilyIndex;
|
||||||
MVKSmallVector<MVKResource*, 256> _resources;
|
MVKSmallVector<MVKResource*, 256> _resources;
|
||||||
|
MVKSmallVector<MVKBuffer*, 8> _gpuAddressableBuffers;
|
||||||
MVKSmallVector<MVKPrivateDataSlot*> _privateDataSlots;
|
MVKSmallVector<MVKPrivateDataSlot*> _privateDataSlots;
|
||||||
MVKSmallVector<bool> _privateDataSlotsAvailability;
|
MVKSmallVector<bool> _privateDataSlotsAvailability;
|
||||||
MVKSmallVector<MVKSemaphoreImpl*> _awaitingSemaphores;
|
MVKSmallVector<MVKSemaphoreImpl*> _awaitingSemaphores;
|
||||||
|
@ -3545,15 +3545,14 @@ uint32_t MVKDevice::getVulkanMemoryTypeIndex(MTLStorageMode mtlStorageMode) {
|
|||||||
|
|
||||||
MVKBuffer* MVKDevice::createBuffer(const VkBufferCreateInfo* pCreateInfo,
|
MVKBuffer* MVKDevice::createBuffer(const VkBufferCreateInfo* pCreateInfo,
|
||||||
const VkAllocationCallbacks* pAllocator) {
|
const VkAllocationCallbacks* pAllocator) {
|
||||||
return (MVKBuffer*)addResource(new MVKBuffer(this, pCreateInfo));
|
return addBuffer(new MVKBuffer(this, pCreateInfo));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MVKDevice::destroyBuffer(MVKBuffer* mvkBuff,
|
void MVKDevice::destroyBuffer(MVKBuffer* mvkBuff,
|
||||||
const VkAllocationCallbacks* pAllocator) {
|
const VkAllocationCallbacks* pAllocator) {
|
||||||
if (mvkBuff) {
|
if ( !mvkBuff ) { return; }
|
||||||
removeResource(mvkBuff);
|
removeBuffer(mvkBuff);
|
||||||
mvkBuff->destroy();
|
mvkBuff->destroy();
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MVKBufferView* MVKDevice::createBufferView(const VkBufferViewCreateInfo* pCreateInfo,
|
MVKBufferView* MVKDevice::createBufferView(const VkBufferViewCreateInfo* pCreateInfo,
|
||||||
@ -3582,20 +3581,14 @@ MVKImage* MVKDevice::createImage(const VkImageCreateInfo* pCreateInfo,
|
|||||||
MVKImage* mvkImg = (swapchainInfo)
|
MVKImage* mvkImg = (swapchainInfo)
|
||||||
? new MVKPeerSwapchainImage(this, pCreateInfo, (MVKSwapchain*)swapchainInfo->swapchain, uint32_t(-1))
|
? new MVKPeerSwapchainImage(this, pCreateInfo, (MVKSwapchain*)swapchainInfo->swapchain, uint32_t(-1))
|
||||||
: new MVKImage(this, pCreateInfo);
|
: new MVKImage(this, pCreateInfo);
|
||||||
for (auto& memoryBinding : mvkImg->_memoryBindings) {
|
return addImage(mvkImg);
|
||||||
addResource(memoryBinding);
|
|
||||||
}
|
|
||||||
return mvkImg;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MVKDevice::destroyImage(MVKImage* mvkImg,
|
void MVKDevice::destroyImage(MVKImage* mvkImg,
|
||||||
const VkAllocationCallbacks* pAllocator) {
|
const VkAllocationCallbacks* pAllocator) {
|
||||||
if (mvkImg) {
|
if ( !mvkImg ) { return; }
|
||||||
for (auto& memoryBinding : mvkImg->_memoryBindings) {
|
removeImage(mvkImg);
|
||||||
removeResource(memoryBinding);
|
mvkImg->destroy();
|
||||||
}
|
|
||||||
mvkImg->destroy();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MVKImageView* MVKDevice::createImageView(const VkImageViewCreateInfo* pCreateInfo,
|
MVKImageView* MVKDevice::createImageView(const VkImageViewCreateInfo* pCreateInfo,
|
||||||
@ -3636,22 +3629,16 @@ MVKPresentableSwapchainImage* MVKDevice::createPresentableSwapchainImage(const V
|
|||||||
MVKSwapchain* swapchain,
|
MVKSwapchain* swapchain,
|
||||||
uint32_t swapchainIndex,
|
uint32_t swapchainIndex,
|
||||||
const VkAllocationCallbacks* pAllocator) {
|
const VkAllocationCallbacks* pAllocator) {
|
||||||
MVKPresentableSwapchainImage* mvkImg = new MVKPresentableSwapchainImage(this, pCreateInfo,
|
auto* pImg = new MVKPresentableSwapchainImage(this, pCreateInfo, swapchain, swapchainIndex);
|
||||||
swapchain, swapchainIndex);
|
addImage(pImg);
|
||||||
for (auto& memoryBinding : mvkImg->_memoryBindings) {
|
return pImg;
|
||||||
addResource(memoryBinding);
|
|
||||||
}
|
|
||||||
return mvkImg;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MVKDevice::destroyPresentableSwapchainImage(MVKPresentableSwapchainImage* mvkImg,
|
void MVKDevice::destroyPresentableSwapchainImage(MVKPresentableSwapchainImage* mvkImg,
|
||||||
const VkAllocationCallbacks* pAllocator) {
|
const VkAllocationCallbacks* pAllocator) {
|
||||||
if (mvkImg) {
|
if ( !mvkImg ) { return; }
|
||||||
for (auto& memoryBinding : mvkImg->_memoryBindings) {
|
removeImage(mvkImg);
|
||||||
removeResource(memoryBinding);
|
mvkImg->destroy();
|
||||||
}
|
|
||||||
mvkImg->destroy();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MVKFence* MVKDevice::createFence(const VkFenceCreateInfo* pCreateInfo,
|
MVKFence* MVKDevice::createFence(const VkFenceCreateInfo* pCreateInfo,
|
||||||
@ -3987,42 +3974,79 @@ void MVKDevice::destroyPrivateDataSlot(VkPrivateDataSlotEXT privateDataSlot,
|
|||||||
mvkPDS->destroy();
|
mvkPDS->destroy();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#pragma mark Operations
|
#pragma mark Operations
|
||||||
|
|
||||||
// Adds the specified resource for tracking, and returns the added resource.
|
// If the underlying MTLBuffer is referenced in a shader only via its gpuAddress,
|
||||||
MVKResource* MVKDevice::addResource(MVKResource* rez) {
|
// the GPU might not be aware that the MTLBuffer needs to be made resident.
|
||||||
|
// Track the buffer as needing to be made resident if a shader is bound that uses
|
||||||
|
// PhysicalStorageBufferAddresses to access the contents of the underlying MTLBuffer.
|
||||||
|
MVKBuffer* MVKDevice::addBuffer(MVKBuffer* mvkBuff) {
|
||||||
|
if ( !mvkBuff ) { return mvkBuff; }
|
||||||
|
|
||||||
lock_guard<mutex> lock(_rezLock);
|
lock_guard<mutex> lock(_rezLock);
|
||||||
_resources.push_back(rez);
|
_resources.push_back(mvkBuff);
|
||||||
return rez;
|
if (mvkIsAnyFlagEnabled(mvkBuff->getUsage(), VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT)) {
|
||||||
|
_gpuAddressableBuffers.push_back(mvkBuff);
|
||||||
|
}
|
||||||
|
return mvkBuff;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removes the specified resource for tracking and returns the removed resource.
|
MVKBuffer* MVKDevice::removeBuffer(MVKBuffer* mvkBuff) {
|
||||||
MVKResource* MVKDevice::removeResource(MVKResource* rez) {
|
if ( !mvkBuff ) { return mvkBuff; }
|
||||||
|
|
||||||
lock_guard<mutex> lock(_rezLock);
|
lock_guard<mutex> lock(_rezLock);
|
||||||
mvkRemoveFirstOccurance(_resources, rez);
|
mvkRemoveFirstOccurance(_resources, mvkBuff);
|
||||||
return rez;
|
if (mvkIsAnyFlagEnabled(mvkBuff->getUsage(), VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT)) {
|
||||||
|
mvkRemoveFirstOccurance(_gpuAddressableBuffers, mvkBuff);
|
||||||
|
}
|
||||||
|
return mvkBuff;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MVKDevice::encodeGPUAddressableBuffers(MVKResourcesCommandEncoderState* rezEncState, MVKShaderStage stage) {
|
||||||
|
MTLResourceUsage mtlUsage = MTLResourceUsageRead | MTLResourceUsageWrite;
|
||||||
|
MTLRenderStages mtlRendStage = (stage == kMVKShaderStageFragment) ? MTLRenderStageFragment : MTLRenderStageVertex;
|
||||||
|
|
||||||
|
lock_guard<mutex> lock(_rezLock);
|
||||||
|
for (auto& buff : _gpuAddressableBuffers) {
|
||||||
|
rezEncState->encodeResourceUsage(stage, buff->getMTLBuffer(), mtlUsage, mtlRendStage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MVKImage* MVKDevice::addImage(MVKImage* mvkImg) {
|
||||||
|
if ( !mvkImg ) { return mvkImg; }
|
||||||
|
|
||||||
|
lock_guard<mutex> lock(_rezLock);
|
||||||
|
for (auto& mb : mvkImg->_memoryBindings) {
|
||||||
|
_resources.push_back(mb);
|
||||||
|
}
|
||||||
|
return mvkImg;
|
||||||
|
}
|
||||||
|
|
||||||
|
MVKImage* MVKDevice::removeImage(MVKImage* mvkImg) {
|
||||||
|
if ( !mvkImg ) { return mvkImg; }
|
||||||
|
|
||||||
|
lock_guard<mutex> lock(_rezLock);
|
||||||
|
for (auto& mb : mvkImg->_memoryBindings) {
|
||||||
|
mvkRemoveFirstOccurance(_resources, mb);
|
||||||
|
}
|
||||||
|
return mvkImg;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds the specified host semaphore to be woken upon device loss.
|
|
||||||
void MVKDevice::addSemaphore(MVKSemaphoreImpl* sem4) {
|
void MVKDevice::addSemaphore(MVKSemaphoreImpl* sem4) {
|
||||||
lock_guard<mutex> lock(_sem4Lock);
|
lock_guard<mutex> lock(_sem4Lock);
|
||||||
_awaitingSemaphores.push_back(sem4);
|
_awaitingSemaphores.push_back(sem4);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removes the specified host semaphore.
|
|
||||||
void MVKDevice::removeSemaphore(MVKSemaphoreImpl* sem4) {
|
void MVKDevice::removeSemaphore(MVKSemaphoreImpl* sem4) {
|
||||||
lock_guard<mutex> lock(_sem4Lock);
|
lock_guard<mutex> lock(_sem4Lock);
|
||||||
mvkRemoveFirstOccurance(_awaitingSemaphores, sem4);
|
mvkRemoveFirstOccurance(_awaitingSemaphores, sem4);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds the specified timeline semaphore to be woken at the specified value upon device loss.
|
|
||||||
void MVKDevice::addTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value) {
|
void MVKDevice::addTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value) {
|
||||||
lock_guard<mutex> lock(_sem4Lock);
|
lock_guard<mutex> lock(_sem4Lock);
|
||||||
_awaitingTimelineSem4s.emplace_back(sem4, value);
|
_awaitingTimelineSem4s.emplace_back(sem4, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removes the specified timeline semaphore.
|
|
||||||
void MVKDevice::removeTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value) {
|
void MVKDevice::removeTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value) {
|
||||||
lock_guard<mutex> lock(_sem4Lock);
|
lock_guard<mutex> lock(_sem4Lock);
|
||||||
mvkRemoveFirstOccurance(_awaitingTimelineSem4s, make_pair(sem4, value));
|
mvkRemoveFirstOccurance(_awaitingTimelineSem4s, make_pair(sem4, value));
|
||||||
|
@ -160,7 +160,7 @@ VkResult MVKDeviceMemory::addImageMemoryBinding(MVKImageMemoryBinding* mvkImg) {
|
|||||||
// If a dedicated alloc, ensure this image is the one and only image
|
// If a dedicated alloc, ensure this image is the one and only image
|
||||||
// I am dedicated to. If my image is aliasable, though, allow other aliasable
|
// I am dedicated to. If my image is aliasable, though, allow other aliasable
|
||||||
// images to bind to me.
|
// images to bind to me.
|
||||||
if (_isDedicated && (_imageMemoryBindings.empty() || !(contains(_imageMemoryBindings, mvkImg) || (_imageMemoryBindings[0]->_image->getIsAliasable() && mvkImg->_image->getIsAliasable()))) ) {
|
if (_isDedicated && (_imageMemoryBindings.empty() || !(mvkContains(_imageMemoryBindings, mvkImg) || (_imageMemoryBindings[0]->_image->getIsAliasable() && mvkImg->_image->getIsAliasable()))) ) {
|
||||||
return reportError(VK_ERROR_OUT_OF_DEVICE_MEMORY, "Could not bind VkImage %p to a VkDeviceMemory dedicated to resource %p. A dedicated allocation may only be used with the resource it was dedicated to.", mvkImg, getDedicatedResource() );
|
return reportError(VK_ERROR_OUT_OF_DEVICE_MEMORY, "Could not bind VkImage %p to a VkDeviceMemory dedicated to resource %p. A dedicated allocation may only be used with the resource it was dedicated to.", mvkImg, getDedicatedResource() );
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -180,7 +180,7 @@ bool MVKDeviceMemory::ensureMTLHeap() {
|
|||||||
|
|
||||||
if (_mtlHeap) { return true; }
|
if (_mtlHeap) { return true; }
|
||||||
|
|
||||||
// Can't create a MTLHeap on a imported memory
|
// Can't create a MTLHeap on imported memory
|
||||||
if (_isHostMemImported) { return true; }
|
if (_isHostMemImported) { return true; }
|
||||||
|
|
||||||
// Don't bother if we don't have placement heaps.
|
// Don't bother if we don't have placement heaps.
|
||||||
@ -284,6 +284,7 @@ MVKDeviceMemory::MVKDeviceMemory(MVKDevice* device,
|
|||||||
const VkMemoryAllocateInfo* pAllocateInfo,
|
const VkMemoryAllocateInfo* pAllocateInfo,
|
||||||
const VkAllocationCallbacks* pAllocator) : MVKVulkanAPIDeviceObject(device) {
|
const VkAllocationCallbacks* pAllocator) : MVKVulkanAPIDeviceObject(device) {
|
||||||
// Set Metal memory parameters
|
// Set Metal memory parameters
|
||||||
|
_vkMemAllocFlags = 0;
|
||||||
_vkMemPropFlags = _device->_pMemoryProperties->memoryTypes[pAllocateInfo->memoryTypeIndex].propertyFlags;
|
_vkMemPropFlags = _device->_pMemoryProperties->memoryTypes[pAllocateInfo->memoryTypeIndex].propertyFlags;
|
||||||
_mtlStorageMode = mvkMTLStorageModeFromVkMemoryPropertyFlags(_vkMemPropFlags);
|
_mtlStorageMode = mvkMTLStorageModeFromVkMemoryPropertyFlags(_vkMemPropFlags);
|
||||||
_mtlCPUCacheMode = mvkMTLCPUCacheModeFromVkMemoryPropertyFlags(_vkMemPropFlags);
|
_mtlCPUCacheMode = mvkMTLCPUCacheModeFromVkMemoryPropertyFlags(_vkMemPropFlags);
|
||||||
|
@ -63,7 +63,7 @@ id<MTLTexture> MVKImagePlane::getMTLTexture() {
|
|||||||
offset: memoryBinding->getDeviceMemoryOffset() + _subresources[0].layout.offset];
|
offset: memoryBinding->getDeviceMemoryOffset() + _subresources[0].layout.offset];
|
||||||
if (_image->_isAliasable) { [_mtlTexture makeAliasable]; }
|
if (_image->_isAliasable) { [_mtlTexture makeAliasable]; }
|
||||||
} else if (_image->_isAliasable && dvcMem && dvcMem->isDedicatedAllocation() &&
|
} else if (_image->_isAliasable && dvcMem && dvcMem->isDedicatedAllocation() &&
|
||||||
!contains(dvcMem->_imageMemoryBindings, memoryBinding)) {
|
!mvkContains(dvcMem->_imageMemoryBindings, memoryBinding)) {
|
||||||
// This is a dedicated allocation, but it belongs to another aliasable image.
|
// This is a dedicated allocation, but it belongs to another aliasable image.
|
||||||
// In this case, use the MTLTexture from the memory's dedicated image.
|
// In this case, use the MTLTexture from the memory's dedicated image.
|
||||||
// We know the other image must be aliasable, or I couldn't have been bound
|
// We know the other image must be aliasable, or I couldn't have been bound
|
||||||
|
@ -164,6 +164,9 @@ public:
|
|||||||
mvkIsAnyFlagEnabled(_flags, VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT));
|
mvkIsAnyFlagEnabled(_flags, VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Returns whether the shader for the stage uses physical storage buffer addresses. */
|
||||||
|
virtual bool usesPhysicalStorageBufferAddressesCapability(MVKShaderStage stage) = 0;
|
||||||
|
|
||||||
/** Constructs an instance for the device. layout, and parent (which may be NULL). */
|
/** Constructs an instance for the device. layout, and parent (which may be NULL). */
|
||||||
MVKPipeline(MVKDevice* device, MVKPipelineCache* pipelineCache, MVKPipelineLayout* layout,
|
MVKPipeline(MVKDevice* device, MVKPipelineCache* pipelineCache, MVKPipelineLayout* layout,
|
||||||
VkPipelineCreateFlags flags, MVKPipeline* parent);
|
VkPipelineCreateFlags flags, MVKPipeline* parent);
|
||||||
@ -270,6 +273,8 @@ public:
|
|||||||
/** Returns whether this pipeline has custom sample positions enabled. */
|
/** Returns whether this pipeline has custom sample positions enabled. */
|
||||||
bool isUsingCustomSamplePositions() { return _isUsingCustomSamplePositions; }
|
bool isUsingCustomSamplePositions() { return _isUsingCustomSamplePositions; }
|
||||||
|
|
||||||
|
bool usesPhysicalStorageBufferAddressesCapability(MVKShaderStage stage) override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns whether the MTLBuffer vertex shader buffer index is valid for a stage of this pipeline.
|
* Returns whether the MTLBuffer vertex shader buffer index is valid for a stage of this pipeline.
|
||||||
* It is if it is a descriptor binding within the descriptor binding range,
|
* It is if it is a descriptor binding within the descriptor binding range,
|
||||||
@ -338,6 +343,8 @@ protected:
|
|||||||
MVKMTLFunction getMTLFunction(SPIRVToMSLConversionConfiguration& shaderConfig,
|
MVKMTLFunction getMTLFunction(SPIRVToMSLConversionConfiguration& shaderConfig,
|
||||||
const VkPipelineShaderStageCreateInfo* pShaderStage,
|
const VkPipelineShaderStageCreateInfo* pShaderStage,
|
||||||
const char* pStageName);
|
const char* pStageName);
|
||||||
|
void markIfUsingPhysicalStorageBufferAddressesCapability(SPIRVToMSLConversionResultInfo& resultsInfo,
|
||||||
|
MVKShaderStage stage);
|
||||||
|
|
||||||
const VkPipelineShaderStageCreateInfo* _pVertexSS = nullptr;
|
const VkPipelineShaderStageCreateInfo* _pVertexSS = nullptr;
|
||||||
const VkPipelineShaderStageCreateInfo* _pTessCtlSS = nullptr;
|
const VkPipelineShaderStageCreateInfo* _pTessCtlSS = nullptr;
|
||||||
@ -356,6 +363,7 @@ protected:
|
|||||||
MVKSmallVector<MVKZeroDivisorVertexBinding> _zeroDivisorVertexBindings;
|
MVKSmallVector<MVKZeroDivisorVertexBinding> _zeroDivisorVertexBindings;
|
||||||
MVKSmallVector<MVKStagedMTLArgumentEncoders> _mtlArgumentEncoders;
|
MVKSmallVector<MVKStagedMTLArgumentEncoders> _mtlArgumentEncoders;
|
||||||
MVKSmallVector<MVKStagedDescriptorBindingUse> _descriptorBindingUse;
|
MVKSmallVector<MVKStagedDescriptorBindingUse> _descriptorBindingUse;
|
||||||
|
MVKSmallVector<MVKShaderStage> _stagesUsingPhysicalStorageBufferAddressesCapability;
|
||||||
|
|
||||||
MTLComputePipelineDescriptor* _mtlTessVertexStageDesc = nil;
|
MTLComputePipelineDescriptor* _mtlTessVertexStageDesc = nil;
|
||||||
id<MTLFunction> _mtlTessVertexFunctions[3] = {nil, nil, nil};
|
id<MTLFunction> _mtlTessVertexFunctions[3] = {nil, nil, nil};
|
||||||
@ -425,6 +433,8 @@ public:
|
|||||||
/** Returns the array of descriptor binding use for the descriptor set. */
|
/** Returns the array of descriptor binding use for the descriptor set. */
|
||||||
MVKBitArray& getDescriptorBindingUse(uint32_t descSetIndex, MVKShaderStage stage) override { return _descriptorBindingUse[descSetIndex]; }
|
MVKBitArray& getDescriptorBindingUse(uint32_t descSetIndex, MVKShaderStage stage) override { return _descriptorBindingUse[descSetIndex]; }
|
||||||
|
|
||||||
|
bool usesPhysicalStorageBufferAddressesCapability(MVKShaderStage stage) override;
|
||||||
|
|
||||||
/** Constructs an instance for the device and parent (which may be NULL). */
|
/** Constructs an instance for the device and parent (which may be NULL). */
|
||||||
MVKComputePipeline(MVKDevice* device,
|
MVKComputePipeline(MVKDevice* device,
|
||||||
MVKPipelineCache* pipelineCache,
|
MVKPipelineCache* pipelineCache,
|
||||||
@ -446,6 +456,7 @@ protected:
|
|||||||
bool _needsDynamicOffsetBuffer = false;
|
bool _needsDynamicOffsetBuffer = false;
|
||||||
bool _needsDispatchBaseBuffer = false;
|
bool _needsDispatchBaseBuffer = false;
|
||||||
bool _allowsDispatchBase = false;
|
bool _allowsDispatchBase = false;
|
||||||
|
bool _usesPhysicalStorageBufferAddressesCapability = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -929,6 +929,7 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLRenderPipelineDescriptor*
|
|||||||
_needsVertexDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
|
_needsVertexDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
|
||||||
_needsVertexViewRangeBuffer = funcRslts.needsViewRangeBuffer;
|
_needsVertexViewRangeBuffer = funcRslts.needsViewRangeBuffer;
|
||||||
_needsVertexOutputBuffer = funcRslts.needsOutputBuffer;
|
_needsVertexOutputBuffer = funcRslts.needsOutputBuffer;
|
||||||
|
markIfUsingPhysicalStorageBufferAddressesCapability(funcRslts, kMVKShaderStageVertex);
|
||||||
|
|
||||||
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageVertex);
|
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageVertex);
|
||||||
|
|
||||||
@ -998,6 +999,7 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLComputePipelineDescriptor
|
|||||||
_needsVertexBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
|
_needsVertexBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
|
||||||
_needsVertexDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
|
_needsVertexDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
|
||||||
_needsVertexOutputBuffer = funcRslts.needsOutputBuffer;
|
_needsVertexOutputBuffer = funcRslts.needsOutputBuffer;
|
||||||
|
markIfUsingPhysicalStorageBufferAddressesCapability(funcRslts, kMVKShaderStageVertex);
|
||||||
}
|
}
|
||||||
|
|
||||||
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageVertex);
|
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageVertex);
|
||||||
@ -1057,6 +1059,7 @@ bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescripto
|
|||||||
_needsTessCtlOutputBuffer = funcRslts.needsOutputBuffer;
|
_needsTessCtlOutputBuffer = funcRslts.needsOutputBuffer;
|
||||||
_needsTessCtlPatchOutputBuffer = funcRslts.needsPatchOutputBuffer;
|
_needsTessCtlPatchOutputBuffer = funcRslts.needsPatchOutputBuffer;
|
||||||
_needsTessCtlInputBuffer = funcRslts.needsInputThreadgroupMem;
|
_needsTessCtlInputBuffer = funcRslts.needsInputThreadgroupMem;
|
||||||
|
markIfUsingPhysicalStorageBufferAddressesCapability(funcRslts, kMVKShaderStageTessCtl);
|
||||||
|
|
||||||
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageTessCtl);
|
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageTessCtl);
|
||||||
|
|
||||||
@ -1113,6 +1116,7 @@ bool MVKGraphicsPipeline::addTessEvalShaderToPipeline(MTLRenderPipelineDescripto
|
|||||||
_needsTessEvalSwizzleBuffer = funcRslts.needsSwizzleBuffer;
|
_needsTessEvalSwizzleBuffer = funcRslts.needsSwizzleBuffer;
|
||||||
_needsTessEvalBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
|
_needsTessEvalBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
|
||||||
_needsTessEvalDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
|
_needsTessEvalDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
|
||||||
|
markIfUsingPhysicalStorageBufferAddressesCapability(funcRslts, kMVKShaderStageTessEval);
|
||||||
|
|
||||||
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageTessEval);
|
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageTessEval);
|
||||||
|
|
||||||
@ -1170,6 +1174,7 @@ bool MVKGraphicsPipeline::addFragmentShaderToPipeline(MTLRenderPipelineDescripto
|
|||||||
_needsFragmentBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
|
_needsFragmentBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
|
||||||
_needsFragmentDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
|
_needsFragmentDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
|
||||||
_needsFragmentViewRangeBuffer = funcRslts.needsViewRangeBuffer;
|
_needsFragmentViewRangeBuffer = funcRslts.needsViewRangeBuffer;
|
||||||
|
markIfUsingPhysicalStorageBufferAddressesCapability(funcRslts, kMVKShaderStageFragment);
|
||||||
|
|
||||||
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageFragment);
|
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageFragment);
|
||||||
|
|
||||||
@ -1804,6 +1809,17 @@ MVKMTLFunction MVKGraphicsPipeline::getMTLFunction(SPIRVToMSLConversionConfigura
|
|||||||
return func;
|
return func;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MVKGraphicsPipeline::markIfUsingPhysicalStorageBufferAddressesCapability(SPIRVToMSLConversionResultInfo& resultsInfo,
|
||||||
|
MVKShaderStage stage) {
|
||||||
|
if (resultsInfo.usesPhysicalStorageBufferAddressesCapability) {
|
||||||
|
_stagesUsingPhysicalStorageBufferAddressesCapability.push_back(stage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MVKGraphicsPipeline::usesPhysicalStorageBufferAddressesCapability(MVKShaderStage stage) {
|
||||||
|
return mvkContains(_stagesUsingPhysicalStorageBufferAddressesCapability, stage);
|
||||||
|
}
|
||||||
|
|
||||||
MVKGraphicsPipeline::~MVKGraphicsPipeline() {
|
MVKGraphicsPipeline::~MVKGraphicsPipeline() {
|
||||||
@synchronized (getMTLDevice()) {
|
@synchronized (getMTLDevice()) {
|
||||||
[_mtlTessVertexStageDesc release];
|
[_mtlTessVertexStageDesc release];
|
||||||
@ -1952,6 +1968,7 @@ MVKMTLFunction MVKComputePipeline::getMTLFunction(const VkComputePipelineCreateI
|
|||||||
_needsBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
|
_needsBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
|
||||||
_needsDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
|
_needsDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
|
||||||
_needsDispatchBaseBuffer = funcRslts.needsDispatchBaseBuffer;
|
_needsDispatchBaseBuffer = funcRslts.needsDispatchBaseBuffer;
|
||||||
|
_usesPhysicalStorageBufferAddressesCapability = funcRslts.usesPhysicalStorageBufferAddressesCapability;
|
||||||
|
|
||||||
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageCompute);
|
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageCompute);
|
||||||
|
|
||||||
@ -1962,6 +1979,10 @@ uint32_t MVKComputePipeline::getImplicitBufferIndex(uint32_t bufferIndexOffset)
|
|||||||
return _device->_pMetalFeatures->maxPerStageBufferCount - (bufferIndexOffset + 1);
|
return _device->_pMetalFeatures->maxPerStageBufferCount - (bufferIndexOffset + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool MVKComputePipeline::usesPhysicalStorageBufferAddressesCapability(MVKShaderStage stage) {
|
||||||
|
return _usesPhysicalStorageBufferAddressesCapability;
|
||||||
|
}
|
||||||
|
|
||||||
MVKComputePipeline::~MVKComputePipeline() {
|
MVKComputePipeline::~MVKComputePipeline() {
|
||||||
@synchronized (getMTLDevice()) {
|
@synchronized (getMTLDevice()) {
|
||||||
[_mtlPipelineState release];
|
[_mtlPipelineState release];
|
||||||
@ -2428,7 +2449,8 @@ namespace mvk {
|
|||||||
scr.needsDynamicOffsetBuffer,
|
scr.needsDynamicOffsetBuffer,
|
||||||
scr.needsInputThreadgroupMem,
|
scr.needsInputThreadgroupMem,
|
||||||
scr.needsDispatchBaseBuffer,
|
scr.needsDispatchBaseBuffer,
|
||||||
scr.needsViewRangeBuffer);
|
scr.needsViewRangeBuffer,
|
||||||
|
scr.usesPhysicalStorageBufferAddressesCapability);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -484,7 +484,7 @@ void mvkReleaseContainerContents(C& container) {
|
|||||||
|
|
||||||
/** Returns whether the container contains an item equal to the value. */
|
/** Returns whether the container contains an item equal to the value. */
|
||||||
template<class C, class T>
|
template<class C, class T>
|
||||||
bool contains(C& container, const T& val) {
|
bool mvkContains(C& container, const T& val) {
|
||||||
for (const T& cVal : container) { if (cVal == val) { return true; } }
|
for (const T& cVal : container) { if (cVal == val) { return true; } }
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -366,6 +366,7 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConversionConfigur
|
|||||||
conversionResult.resultInfo.needsInputThreadgroupMem = pMSLCompiler && pMSLCompiler->needs_input_threadgroup_mem();
|
conversionResult.resultInfo.needsInputThreadgroupMem = pMSLCompiler && pMSLCompiler->needs_input_threadgroup_mem();
|
||||||
conversionResult.resultInfo.needsDispatchBaseBuffer = pMSLCompiler && pMSLCompiler->needs_dispatch_base_buffer();
|
conversionResult.resultInfo.needsDispatchBaseBuffer = pMSLCompiler && pMSLCompiler->needs_dispatch_base_buffer();
|
||||||
conversionResult.resultInfo.needsViewRangeBuffer = pMSLCompiler && pMSLCompiler->needs_view_mask_buffer();
|
conversionResult.resultInfo.needsViewRangeBuffer = pMSLCompiler && pMSLCompiler->needs_view_mask_buffer();
|
||||||
|
conversionResult.resultInfo.usesPhysicalStorageBufferAddressesCapability = usesPhysicalStorageBufferAddressesCapability(pMSLCompiler);
|
||||||
|
|
||||||
// When using Metal argument buffers, if the shader is provided with dynamic buffer offsets,
|
// When using Metal argument buffers, if the shader is provided with dynamic buffer offsets,
|
||||||
// then it needs a buffer to hold these dynamic offsets.
|
// then it needs a buffer to hold these dynamic offsets.
|
||||||
@ -533,3 +534,15 @@ void SPIRVToMSLConverter::populateEntryPoint(Compiler* pCompiler,
|
|||||||
populateWorkgroupDimension(wgSize.height, spvEP.workgroup_size.y, heightSC);
|
populateWorkgroupDimension(wgSize.height, spvEP.workgroup_size.y, heightSC);
|
||||||
populateWorkgroupDimension(wgSize.depth, spvEP.workgroup_size.z, depthSC);
|
populateWorkgroupDimension(wgSize.depth, spvEP.workgroup_size.z, depthSC);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool SPIRVToMSLConverter::usesPhysicalStorageBufferAddressesCapability(Compiler* pCompiler) {
|
||||||
|
if (pCompiler) {
|
||||||
|
auto& declaredCapabilities = pCompiler->get_declared_capabilities();
|
||||||
|
for(auto dc: declaredCapabilities) {
|
||||||
|
if (dc == CapabilityPhysicalStorageBufferAddresses) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
@ -244,6 +244,7 @@ namespace mvk {
|
|||||||
bool needsInputThreadgroupMem = false;
|
bool needsInputThreadgroupMem = false;
|
||||||
bool needsDispatchBaseBuffer = false;
|
bool needsDispatchBaseBuffer = false;
|
||||||
bool needsViewRangeBuffer = false;
|
bool needsViewRangeBuffer = false;
|
||||||
|
bool usesPhysicalStorageBufferAddressesCapability = false;
|
||||||
|
|
||||||
} SPIRVToMSLConversionResultInfo;
|
} SPIRVToMSLConversionResultInfo;
|
||||||
|
|
||||||
@ -300,6 +301,7 @@ namespace mvk {
|
|||||||
void writeSPIRVToFile(std::string spvFilepath, std::string& log);
|
void writeSPIRVToFile(std::string spvFilepath, std::string& log);
|
||||||
void populateWorkgroupDimension(SPIRVWorkgroupSizeDimension& wgDim, uint32_t size, SPIRV_CROSS_NAMESPACE::SpecializationConstant& spvSpecConst);
|
void populateWorkgroupDimension(SPIRVWorkgroupSizeDimension& wgDim, uint32_t size, SPIRV_CROSS_NAMESPACE::SpecializationConstant& spvSpecConst);
|
||||||
void populateEntryPoint(SPIRV_CROSS_NAMESPACE::Compiler* pCompiler, SPIRVToMSLConversionOptions& options, SPIRVEntryPoint& entryPoint);
|
void populateEntryPoint(SPIRV_CROSS_NAMESPACE::Compiler* pCompiler, SPIRVToMSLConversionOptions& options, SPIRVEntryPoint& entryPoint);
|
||||||
|
bool usesPhysicalStorageBufferAddressesCapability(SPIRV_CROSS_NAMESPACE::Compiler* pCompiler);
|
||||||
|
|
||||||
std::vector<uint32_t> _spirv;
|
std::vector<uint32_t> _spirv;
|
||||||
};
|
};
|
||||||
|
Loading…
x
Reference in New Issue
Block a user