Support using Metal texel buffer for linear images to increase host coherency.
MVKDeviceMemory track MVKImages and MVKBuffers separately. Per Vulkan spec, restrict linear images to 2D, non-array, single mipmap. Use texel buffer if possible for texture on coherent device memory. Only flush MVKImages (not MVKBuffers) when device memory mapped. Do not flush texel buffer images.
This commit is contained in:
parent
7693ebe313
commit
0c854c508d
@ -36,6 +36,9 @@ public:
|
||||
/** Returns the memory requirements of this resource by populating the specified structure. */
|
||||
VkResult getMemoryRequirements(VkMemoryRequirements* pMemoryRequirements) override;
|
||||
|
||||
/** Binds this resource to the specified offset within the specified memory allocation. */
|
||||
VkResult bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) override;
|
||||
|
||||
/** Applies the specified global memory barrier. */
|
||||
void applyMemoryBarrier(VkPipelineStageFlags srcStageMask,
|
||||
VkPipelineStageFlags dstStageMask,
|
||||
@ -64,6 +67,8 @@ public:
|
||||
|
||||
MVKBuffer(MVKDevice* device, const VkBufferCreateInfo* pCreateInfo);
|
||||
|
||||
~MVKBuffer() override;
|
||||
|
||||
protected:
|
||||
using MVKResource::needsHostReadSync;
|
||||
|
||||
|
@ -36,6 +36,14 @@ VkResult MVKBuffer::getMemoryRequirements(VkMemoryRequirements* pMemoryRequireme
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
VkResult MVKBuffer::bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) {
|
||||
if (_deviceMemory) { _deviceMemory->removeBuffer(this); }
|
||||
|
||||
MVKResource::bindDeviceMemory(mvkMem, memOffset);
|
||||
|
||||
return _deviceMemory ? _deviceMemory->addBuffer(this) : VK_SUCCESS;
|
||||
}
|
||||
|
||||
void MVKBuffer::applyMemoryBarrier(VkPipelineStageFlags srcStageMask,
|
||||
VkPipelineStageFlags dstStageMask,
|
||||
VkMemoryBarrier* pMemoryBarrier,
|
||||
@ -83,7 +91,10 @@ bool MVKBuffer::needsHostReadSync(VkPipelineStageFlags srcStageMask,
|
||||
MVKBuffer::MVKBuffer(MVKDevice* device, const VkBufferCreateInfo* pCreateInfo) : MVKResource(device) {
|
||||
_byteAlignment = _device->_pMetalFeatures->mtlBufferAlignment;
|
||||
_byteCount = pCreateInfo->size;
|
||||
_isBuffer = true;
|
||||
}
|
||||
|
||||
MVKBuffer::~MVKBuffer() {
|
||||
if (_deviceMemory) { _deviceMemory->removeBuffer(this); }
|
||||
}
|
||||
|
||||
|
||||
|
@ -23,7 +23,8 @@
|
||||
|
||||
#import <Metal/Metal.h>
|
||||
|
||||
class MVKResource;
|
||||
class MVKBuffer;
|
||||
class MVKImage;
|
||||
|
||||
|
||||
#pragma mark MVKDeviceMemory
|
||||
@ -97,25 +98,28 @@ public:
|
||||
~MVKDeviceMemory() override;
|
||||
|
||||
protected:
|
||||
friend MVKResource;
|
||||
friend MVKBuffer;
|
||||
friend MVKImage;
|
||||
|
||||
VkDeviceSize adjustMemorySize(VkDeviceSize size, VkDeviceSize offset);
|
||||
VkResult addResource(MVKResource* rez);
|
||||
void removeResource(MVKResource* rez);
|
||||
VkResult addBuffer(MVKBuffer* mvkBuff);
|
||||
void removeBuffer(MVKBuffer* mvkBuff);
|
||||
VkResult addImage(MVKImage* mvkImg);
|
||||
void removeImage(MVKImage* mvkImg);
|
||||
bool ensureMTLBuffer();
|
||||
bool ensureHostMemory();
|
||||
void freeHostMemory();
|
||||
|
||||
std::vector<MVKResource*> _resources;
|
||||
std::vector<MVKBuffer*> _buffers;
|
||||
std::vector<MVKImage*> _images;
|
||||
std::mutex _rezLock;
|
||||
VkDeviceSize _allocationSize;
|
||||
VkDeviceSize _mapOffset;
|
||||
VkDeviceSize _mapSize;
|
||||
id<MTLBuffer> _mtlBuffer;
|
||||
void* _pMemory;
|
||||
void* _pHostMemory;
|
||||
bool _isMapped;
|
||||
std::mutex _lock;
|
||||
VkDeviceSize _allocationSize = 0;
|
||||
VkDeviceSize _mapOffset = 0;
|
||||
VkDeviceSize _mapSize = 0;
|
||||
id<MTLBuffer> _mtlBuffer = nil;
|
||||
void* _pMemory = nullptr;
|
||||
void* _pHostMemory = nullptr;
|
||||
bool _isMapped = false;
|
||||
MTLResourceOptions _mtlResourceOptions;
|
||||
MTLStorageMode _mtlStorageMode;
|
||||
MTLCPUCacheMode _mtlCPUCacheMode;
|
||||
|
@ -17,6 +17,7 @@
|
||||
*/
|
||||
|
||||
#include "MVKDeviceMemory.h"
|
||||
#include "MVKBuffer.h"
|
||||
#include "MVKImage.h"
|
||||
#include "mvk_datatypes.h"
|
||||
#include "MVKFoundation.h"
|
||||
@ -48,7 +49,7 @@ VkResult MVKDeviceMemory::map(VkDeviceSize offset, VkDeviceSize size, VkMemoryMa
|
||||
*ppData = (void*)((uintptr_t)_pMemory + offset);
|
||||
|
||||
// Coherent memory does not require flushing by app, so we must flush now, to handle any texture updates.
|
||||
if (isMemoryHostCoherent()) { pullFromDevice(offset, size, true); }
|
||||
pullFromDevice(offset, size, isMemoryHostCoherent());
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
@ -61,7 +62,7 @@ void MVKDeviceMemory::unmap() {
|
||||
}
|
||||
|
||||
// Coherent memory does not require flushing by app, so we must flush now.
|
||||
if (isMemoryHostCoherent()) { flushToDevice(_mapOffset, _mapSize, true); }
|
||||
flushToDevice(_mapOffset, _mapSize, isMemoryHostCoherent());
|
||||
|
||||
_mapOffset = 0;
|
||||
_mapSize = 0;
|
||||
@ -72,14 +73,15 @@ VkResult MVKDeviceMemory::flushToDevice(VkDeviceSize offset, VkDeviceSize size,
|
||||
// Coherent memory is flushed on unmap(), so it is only flushed if forced
|
||||
VkDeviceSize memSize = adjustMemorySize(size, offset);
|
||||
if (memSize > 0 && isMemoryHostAccessible() && (evenIfCoherent || !isMemoryHostCoherent()) ) {
|
||||
lock_guard<mutex> lock(_rezLock);
|
||||
for (auto& rez : _resources) { rez->flushToDevice(offset, memSize); }
|
||||
|
||||
#if MVK_MACOS
|
||||
if (_mtlBuffer && _mtlStorageMode == MTLStorageModeManaged) {
|
||||
[_mtlBuffer didModifyRange: NSMakeRange(offset, memSize)];
|
||||
}
|
||||
#endif
|
||||
|
||||
lock_guard<mutex> lock(_rezLock);
|
||||
for (auto& img : _images) { img->flushToDevice(offset, memSize); }
|
||||
}
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
@ -89,7 +91,7 @@ VkResult MVKDeviceMemory::pullFromDevice(VkDeviceSize offset, VkDeviceSize size,
|
||||
VkDeviceSize memSize = adjustMemorySize(size, offset);
|
||||
if (memSize > 0 && isMemoryHostAccessible() && (evenIfCoherent || !isMemoryHostCoherent()) ) {
|
||||
lock_guard<mutex> lock(_rezLock);
|
||||
for (auto& rez : _resources) { rez->pullFromDevice(offset, memSize); }
|
||||
for (auto& img : _images) { img->pullFromDevice(offset, memSize); }
|
||||
}
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
@ -100,21 +102,34 @@ VkDeviceSize MVKDeviceMemory::adjustMemorySize(VkDeviceSize size, VkDeviceSize o
|
||||
return (size == VK_WHOLE_SIZE) ? (_allocationSize - offset) : size;
|
||||
}
|
||||
|
||||
VkResult MVKDeviceMemory::addResource(MVKResource* rez) {
|
||||
VkResult MVKDeviceMemory::addBuffer(MVKBuffer* mvkBuff) {
|
||||
lock_guard<mutex> lock(_rezLock);
|
||||
|
||||
if (rez->_isBuffer && !ensureMTLBuffer() ) {
|
||||
if (!ensureMTLBuffer() ) {
|
||||
return mvkNotifyErrorWithText(VK_ERROR_OUT_OF_DEVICE_MEMORY, "Could not bind a VkBuffer to a VkDeviceMemory of size %llu bytes. The maximum memory-aligned size of a VkDeviceMemory that supports a VkBuffer is %llu bytes.", _allocationSize, _device->_pMetalFeatures->maxMTLBufferSize);
|
||||
}
|
||||
|
||||
_resources.push_back(rez);
|
||||
_buffers.push_back(mvkBuff);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void MVKDeviceMemory::removeResource(MVKResource* rez) {
|
||||
void MVKDeviceMemory::removeBuffer(MVKBuffer* mvkBuff) {
|
||||
lock_guard<mutex> lock(_rezLock);
|
||||
mvkRemoveAllOccurances(_resources, rez);
|
||||
mvkRemoveAllOccurances(_buffers, mvkBuff);
|
||||
}
|
||||
|
||||
VkResult MVKDeviceMemory::addImage(MVKImage* mvkImg) {
|
||||
lock_guard<mutex> lock(_rezLock);
|
||||
|
||||
_images.push_back(mvkImg);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void MVKDeviceMemory::removeImage(MVKImage* mvkImg) {
|
||||
lock_guard<mutex> lock(_rezLock);
|
||||
mvkRemoveAllOccurances(_images, mvkImg);
|
||||
}
|
||||
|
||||
// Ensures that this instance is backed by a MTLBuffer object,
|
||||
@ -171,12 +186,6 @@ MVKDeviceMemory::MVKDeviceMemory(MVKDevice* device,
|
||||
_mtlCPUCacheMode = mvkMTLCPUCacheModeFromVkMemoryPropertyFlags(vkMemProps);
|
||||
|
||||
_allocationSize = pAllocateInfo->allocationSize;
|
||||
_mtlBuffer = nil;
|
||||
_pMemory = nullptr;
|
||||
_pHostMemory = nullptr;
|
||||
_isMapped = false;
|
||||
_mapOffset = 0;
|
||||
_mapSize = 0;
|
||||
|
||||
// If memory needs to be coherent it must reside in an MTLBuffer, since an open-ended map() must work.
|
||||
if (isMemoryHostCoherent() && !ensureMTLBuffer() ) {
|
||||
@ -187,8 +196,10 @@ MVKDeviceMemory::MVKDeviceMemory(MVKDevice* device,
|
||||
MVKDeviceMemory::~MVKDeviceMemory() {
|
||||
// Unbind any resources that are using me. Iterate a copy of the collection,
|
||||
// to allow the resource to callback to remove itself from the collection.
|
||||
auto rezCopies = _resources;
|
||||
for (auto& rez : rezCopies) { rez->bindDeviceMemory(nullptr, 0); }
|
||||
auto buffCopies = _buffers;
|
||||
for (auto& buf : buffCopies) { buf->bindDeviceMemory(nullptr, 0); }
|
||||
auto imgCopies = _images;
|
||||
for (auto& img : imgCopies) { img->bindDeviceMemory(nullptr, 0); }
|
||||
|
||||
[_mtlBuffer release];
|
||||
_mtlBuffer = nil;
|
||||
|
@ -194,11 +194,13 @@ public:
|
||||
~MVKImage() override;
|
||||
|
||||
protected:
|
||||
friend class MVKDeviceMemory;
|
||||
friend class MVKImageView;
|
||||
using MVKResource::needsHostReadSync;
|
||||
|
||||
MVKImageSubresource* getSubresource(uint32_t mipLevel, uint32_t arrayLayer);
|
||||
void initMTLTextureViewSupport();
|
||||
bool validateLinear(const VkImageCreateInfo* pCreateInfo);
|
||||
bool validateUseTexelBuffer();
|
||||
void initSubresources(const VkImageCreateInfo* pCreateInfo);
|
||||
void initSubresourceLayout(MVKImageSubresource& imgSubRez);
|
||||
virtual id<MTLTexture> newMTLTexture();
|
||||
@ -207,8 +209,9 @@ protected:
|
||||
MTLTextureDescriptor* getMTLTextureDescriptor();
|
||||
void updateMTLTextureContent(MVKImageSubresource& subresource, VkDeviceSize offset, VkDeviceSize size);
|
||||
void getMTLTextureContent(MVKImageSubresource& subresource, VkDeviceSize offset, VkDeviceSize size);
|
||||
VkResult flushToDevice(VkDeviceSize offset, VkDeviceSize size) override;
|
||||
VkResult pullFromDevice(VkDeviceSize offset, VkDeviceSize size) override;
|
||||
bool shouldFlushHostMemory();
|
||||
VkResult flushToDevice(VkDeviceSize offset, VkDeviceSize size);
|
||||
VkResult pullFromDevice(VkDeviceSize offset, VkDeviceSize size);
|
||||
bool needsHostReadSync(VkPipelineStageFlags srcStageMask,
|
||||
VkPipelineStageFlags dstStageMask,
|
||||
VkImageMemoryBarrier* pImageMemoryBarrier);
|
||||
@ -227,6 +230,8 @@ protected:
|
||||
bool _isDepthStencilAttachment;
|
||||
bool _canSupportMTLTextureView;
|
||||
bool _hasExpectedTexelSize;
|
||||
bool _usesTexelBuffer;
|
||||
bool _isLinear;
|
||||
};
|
||||
|
||||
|
||||
|
@ -165,17 +165,41 @@ VkResult MVKImage::getMemoryRequirements(VkMemoryRequirements* pMemoryRequiremen
|
||||
|
||||
// Memory may have been mapped before image was bound, and needs to be loaded into the MTLTexture.
|
||||
VkResult MVKImage::bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) {
|
||||
VkResult rslt = MVKResource::bindDeviceMemory(mvkMem, memOffset);
|
||||
if (isMemoryHostAccessible()) { flushToDevice(getDeviceMemoryOffset(), getByteCount()); }
|
||||
return rslt;
|
||||
if (_deviceMemory) { _deviceMemory->removeImage(this); }
|
||||
|
||||
MVKResource::bindDeviceMemory(mvkMem, memOffset);
|
||||
|
||||
_usesTexelBuffer = validateUseTexelBuffer();
|
||||
|
||||
flushToDevice(getDeviceMemoryOffset(), getByteCount());
|
||||
|
||||
return _deviceMemory ? _deviceMemory->addImage(this) : VK_SUCCESS;
|
||||
}
|
||||
|
||||
bool MVKImage::validateUseTexelBuffer() {
|
||||
VkExtent2D blockExt = mvkMTLPixelFormatBlockTexelSize(_mtlPixelFormat);
|
||||
bool isUncompressed = blockExt.width == 1 && blockExt.height == 1;
|
||||
|
||||
bool useTexelBuffer = _device->_pMetalFeatures->texelBuffers; // Texel buffers available
|
||||
useTexelBuffer = useTexelBuffer && isMemoryHostAccessible() && _isLinear && isUncompressed; // Applicable memory layout
|
||||
useTexelBuffer = useTexelBuffer && _deviceMemory && _deviceMemory->_mtlBuffer; // Buffer is available to overlay
|
||||
|
||||
#if MVK_MACOS
|
||||
useTexelBuffer = useTexelBuffer && !isMemoryHostCoherent(); // macOS cannot use shared memory for texel buffers
|
||||
#endif
|
||||
|
||||
return useTexelBuffer;
|
||||
}
|
||||
|
||||
bool MVKImage::shouldFlushHostMemory() { return isMemoryHostAccessible() && !_usesTexelBuffer; }
|
||||
|
||||
// Flushes the device memory at the specified memory range into the MTLTexture. Updates
|
||||
// all subresources that overlap the specified range and are in an updatable layout state.
|
||||
VkResult MVKImage::flushToDevice(VkDeviceSize offset, VkDeviceSize size) {
|
||||
if (shouldFlushHostMemory()) {
|
||||
for (auto& subRez : _subresources) {
|
||||
switch (subRez.layoutState) {
|
||||
case VK_IMAGE_LAYOUT_UNDEFINED: // TODO: VK_IMAGE_LAYOUT_UNDEFINED should be illegal
|
||||
case VK_IMAGE_LAYOUT_UNDEFINED:
|
||||
case VK_IMAGE_LAYOUT_PREINITIALIZED:
|
||||
case VK_IMAGE_LAYOUT_GENERAL: {
|
||||
updateMTLTextureContent(subRez, offset, size);
|
||||
@ -185,12 +209,14 @@ VkResult MVKImage::flushToDevice(VkDeviceSize offset, VkDeviceSize size) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
// Pulls content from the MTLTexture into the device memory at the specified memory range.
|
||||
// Pulls from all subresources that overlap the specified range and are in an updatable layout state.
|
||||
VkResult MVKImage::pullFromDevice(VkDeviceSize offset, VkDeviceSize size) {
|
||||
if (shouldFlushHostMemory()) {
|
||||
for (auto& subRez : _subresources) {
|
||||
switch (subRez.layoutState) {
|
||||
case VK_IMAGE_LAYOUT_GENERAL: {
|
||||
@ -201,6 +227,7 @@ VkResult MVKImage::pullFromDevice(VkDeviceSize offset, VkDeviceSize size) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
@ -249,6 +276,10 @@ VkResult MVKImage::setMTLTexture(id<MTLTexture> mtlTexture) {
|
||||
id<MTLTexture> MVKImage::newMTLTexture() {
|
||||
if (_ioSurface) {
|
||||
return [getMTLDevice() newTextureWithDescriptor: getMTLTextureDescriptor() iosurface: _ioSurface plane: 0];
|
||||
} else if (_usesTexelBuffer) {
|
||||
return [_deviceMemory->_mtlBuffer newTextureWithDescriptor: getMTLTextureDescriptor()
|
||||
offset: getDeviceMemoryOffset()
|
||||
bytesPerRow: _subresources[0].layout.rowPitch];
|
||||
} else {
|
||||
return [getMTLDevice() newTextureWithDescriptor: getMTLTextureDescriptor()];
|
||||
}
|
||||
@ -337,24 +368,22 @@ MTLStorageMode MVKImage::getMTLStorageMode() {
|
||||
//specified subresource definition, from the underlying memory buffer.
|
||||
void MVKImage::updateMTLTextureContent(MVKImageSubresource& subresource,
|
||||
VkDeviceSize offset, VkDeviceSize size) {
|
||||
|
||||
VkImageSubresource& imgSubRez = subresource.subresource;
|
||||
VkSubresourceLayout& imgLayout = subresource.layout;
|
||||
|
||||
// Check if subresource overlaps the memory range.
|
||||
VkDeviceSize memStart = offset;
|
||||
VkDeviceSize memEnd = offset + size;
|
||||
VkDeviceSize imgStart = subresource.layout.offset;
|
||||
VkDeviceSize imgEnd = subresource.layout.offset + subresource.layout.size;
|
||||
VkDeviceSize imgStart = imgLayout.offset;
|
||||
VkDeviceSize imgEnd = imgLayout.offset + imgLayout.size;
|
||||
if (imgStart >= memEnd || imgEnd <= memStart) { return; }
|
||||
|
||||
// Don't update if host memory has not been mapped yet.
|
||||
void* pHostMem = getHostMemoryAddress();
|
||||
if ( !pHostMem ) { return; }
|
||||
|
||||
VkImageSubresource& imgSubRez = subresource.subresource;
|
||||
VkSubresourceLayout& imgLayout = subresource.layout;
|
||||
|
||||
uint32_t mipLvl = imgSubRez.mipLevel;
|
||||
uint32_t layer = imgSubRez.arrayLayer;
|
||||
|
||||
VkExtent3D mipExtent = getExtent3D(mipLvl);
|
||||
VkExtent3D mipExtent = getExtent3D(imgSubRez.mipLevel);
|
||||
VkImageType imgType = getImageType();
|
||||
void* pImgBytes = (void*)((uintptr_t)pHostMem + imgLayout.offset);
|
||||
|
||||
@ -363,8 +392,8 @@ void MVKImage::updateMTLTextureContent(MVKImageSubresource& subresource,
|
||||
mtlRegion.size = mvkMTLSizeFromVkExtent3D(mipExtent);
|
||||
|
||||
[getMTLTexture() replaceRegion: mtlRegion
|
||||
mipmapLevel: mipLvl
|
||||
slice: layer
|
||||
mipmapLevel: imgSubRez.mipLevel
|
||||
slice: imgSubRez.arrayLayer
|
||||
withBytes: pImgBytes
|
||||
bytesPerRow: (imgType != VK_IMAGE_TYPE_1D ? imgLayout.rowPitch : 0)
|
||||
bytesPerImage: (imgType == VK_IMAGE_TYPE_3D ? imgLayout.depthPitch : 0)];
|
||||
@ -374,24 +403,22 @@ void MVKImage::updateMTLTextureContent(MVKImageSubresource& subresource,
|
||||
// the underlying MTLTexture, corresponding to the specified subresource definition.
|
||||
void MVKImage::getMTLTextureContent(MVKImageSubresource& subresource,
|
||||
VkDeviceSize offset, VkDeviceSize size) {
|
||||
|
||||
VkImageSubresource& imgSubRez = subresource.subresource;
|
||||
VkSubresourceLayout& imgLayout = subresource.layout;
|
||||
|
||||
// Check if subresource overlaps the memory range.
|
||||
VkDeviceSize memStart = offset;
|
||||
VkDeviceSize memEnd = offset + size;
|
||||
VkDeviceSize imgStart = subresource.layout.offset;
|
||||
VkDeviceSize imgEnd = subresource.layout.offset + subresource.layout.size;
|
||||
VkDeviceSize imgStart = imgLayout.offset;
|
||||
VkDeviceSize imgEnd = imgLayout.offset + imgLayout.size;
|
||||
if (imgStart >= memEnd || imgEnd <= memStart) { return; }
|
||||
|
||||
// Don't update if host memory has not been mapped yet.
|
||||
void* pHostMem = getHostMemoryAddress();
|
||||
if ( !pHostMem ) { return; }
|
||||
|
||||
VkImageSubresource& imgSubRez = subresource.subresource;
|
||||
VkSubresourceLayout& imgLayout = subresource.layout;
|
||||
|
||||
uint32_t mipLvl = imgSubRez.mipLevel;
|
||||
uint32_t layer = imgSubRez.arrayLayer;
|
||||
|
||||
VkExtent3D mipExtent = getExtent3D(mipLvl);
|
||||
VkExtent3D mipExtent = getExtent3D(imgSubRez.mipLevel);
|
||||
VkImageType imgType = getImageType();
|
||||
void* pImgBytes = (void*)((uintptr_t)pHostMem + imgLayout.offset);
|
||||
|
||||
@ -403,8 +430,8 @@ void MVKImage::getMTLTextureContent(MVKImageSubresource& subresource,
|
||||
bytesPerRow: (imgType != VK_IMAGE_TYPE_1D ? imgLayout.rowPitch : 0)
|
||||
bytesPerImage: (imgType == VK_IMAGE_TYPE_3D ? imgLayout.depthPitch : 0)
|
||||
fromRegion: mtlRegion
|
||||
mipmapLevel: mipLvl
|
||||
slice: layer];
|
||||
mipmapLevel: imgSubRez.mipLevel
|
||||
slice: imgSubRez.arrayLayer];
|
||||
}
|
||||
|
||||
|
||||
@ -444,8 +471,10 @@ MVKImage::MVKImage(MVKDevice* device, const VkImageCreateInfo* pCreateInfo) : MV
|
||||
|
||||
_isDepthStencilAttachment = (mvkAreFlagsEnabled(pCreateInfo->usage, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) ||
|
||||
mvkAreFlagsEnabled(mvkVkFormatProperties(pCreateInfo->format).optimalTilingFeatures, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT));
|
||||
|
||||
_canSupportMTLTextureView = !_isDepthStencilAttachment;
|
||||
_hasExpectedTexelSize = (mvkMTLPixelFormatBytesPerBlock(_mtlPixelFormat) == mvkVkFormatBytesPerBlock(pCreateInfo->format));
|
||||
_isLinear = validateLinear(pCreateInfo);
|
||||
_usesTexelBuffer = false;
|
||||
|
||||
// Calc _byteCount after _mtlTexture & _byteAlignment
|
||||
for (uint32_t mipLvl = 0; mipLvl < _mipLevels; mipLvl++) {
|
||||
@ -453,9 +482,45 @@ MVKImage::MVKImage(MVKDevice* device, const VkImageCreateInfo* pCreateInfo) : MV
|
||||
}
|
||||
|
||||
initSubresources(pCreateInfo);
|
||||
initMTLTextureViewSupport();
|
||||
}
|
||||
|
||||
bool MVKImage::validateLinear(const VkImageCreateInfo* pCreateInfo) {
|
||||
if (pCreateInfo->tiling != VK_IMAGE_TILING_LINEAR) { return false; }
|
||||
|
||||
if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) {
|
||||
setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, imageType must be VK_IMAGE_TYPE_2D."));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_isDepthStencilAttachment) {
|
||||
setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, format must not be a depth/stencil format."));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_mipLevels > 1) {
|
||||
setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, mipLevels must be 1."));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_arrayLayers > 1) {
|
||||
setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, arrayLayers must be 1."));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_samples > 1) {
|
||||
setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, samples must be VK_SAMPLE_COUNT_1_BIT."));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (mvkAreOnlyAnyFlagsEnabled(_usage, (VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT))) {
|
||||
setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, usage must only include VK_IMAGE_USAGE_TRANSFER_SRC_BIT and/or VK_IMAGE_USAGE_TRANSFER_DST_BIT."));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Initializes the subresource definitions.
|
||||
void MVKImage::initSubresources(const VkImageCreateInfo* pCreateInfo) {
|
||||
_subresources.reserve(_mipLevels * _arrayLayers);
|
||||
@ -498,13 +563,8 @@ void MVKImage::initSubresourceLayout(MVKImageSubresource& imgSubRez) {
|
||||
layout.depthPitch = bytesPerLayerCurrLevel;
|
||||
}
|
||||
|
||||
// Determines whether this image can support Metal texture views,
|
||||
// and sets the _canSupportMTLTextureView variable appropriately.
|
||||
void MVKImage::initMTLTextureViewSupport() {
|
||||
_canSupportMTLTextureView = !_isDepthStencilAttachment;
|
||||
}
|
||||
|
||||
MVKImage::~MVKImage() {
|
||||
if (_deviceMemory) { _deviceMemory->removeImage(this); }
|
||||
resetMTLTexture();
|
||||
resetIOSurface();
|
||||
}
|
||||
|
@ -71,17 +71,9 @@ public:
|
||||
|
||||
#pragma mark Construction
|
||||
|
||||
/** Constructs an instance for the specified device. */
|
||||
MVKResource(MVKDevice* device) : MVKBaseDeviceObject(device) {}
|
||||
|
||||
/** Destructor. */
|
||||
~MVKResource() override;
|
||||
|
||||
protected:
|
||||
friend MVKDeviceMemory;
|
||||
|
||||
virtual VkResult flushToDevice(VkDeviceSize offset, VkDeviceSize size) { return VK_SUCCESS; };
|
||||
virtual VkResult pullFromDevice(VkDeviceSize offset, VkDeviceSize size) { return VK_SUCCESS; };
|
||||
virtual bool needsHostReadSync(VkPipelineStageFlags srcStageMask,
|
||||
VkPipelineStageFlags dstStageMask,
|
||||
VkMemoryBarrier* pMemoryBarrier);
|
||||
@ -90,5 +82,4 @@ protected:
|
||||
VkDeviceSize _deviceMemoryOffset = 0;
|
||||
VkDeviceSize _byteCount = 0;
|
||||
VkDeviceSize _byteAlignment = 0;
|
||||
bool _isBuffer = false;
|
||||
};
|
||||
|
@ -23,18 +23,13 @@
|
||||
#pragma mark MVKResource
|
||||
|
||||
VkResult MVKResource::bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) {
|
||||
if (_deviceMemory) { _deviceMemory->removeResource(this); }
|
||||
|
||||
_deviceMemory = mvkMem;
|
||||
_deviceMemoryOffset = memOffset;
|
||||
|
||||
return _deviceMemory ? _deviceMemory->addResource(this) : VK_SUCCESS;
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the specified global memory barrier requires a sync between this
|
||||
* texture and host memory for the purpose of the host reading texture memory.
|
||||
*/
|
||||
// Returns whether the specified global memory barrier requires a sync between this
|
||||
// texture and host memory for the purpose of the host reading texture memory.
|
||||
bool MVKResource::needsHostReadSync(VkPipelineStageFlags srcStageMask,
|
||||
VkPipelineStageFlags dstStageMask,
|
||||
VkMemoryBarrier* pMemoryBarrier) {
|
||||
@ -48,10 +43,3 @@ bool MVKResource::needsHostReadSync(VkPipelineStageFlags srcStageMask,
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#pragma mark Construction
|
||||
|
||||
MVKResource::~MVKResource() {
|
||||
if (_deviceMemory) { _deviceMemory->removeResource(this); }
|
||||
};
|
||||
|
||||
|
@ -363,7 +363,11 @@ bool mvkAreFlagsEnabled(T1 value, const T2 bitMask) { return ((value & bitMask)
|
||||
template<typename T1, typename T2>
|
||||
bool mvkIsAnyFlagEnabled(T1 value, const T2 bitMask) { return !!(value & bitMask); }
|
||||
|
||||
/** Returns whether the specified value has ONLY of the flags specified in bitMask enabled (set to 1). */
|
||||
/** Returns whether the specified value has ONLY ALL of the flags specified in bitMask enabled (set to 1), and none others. */
|
||||
template<typename T1, typename T2>
|
||||
bool mvkAreOnlyFlagsEnabled(T1 value, const T2 bitMask) { return (value == bitMask); }
|
||||
bool mvkAreOnlyAllFlagsEnabled(T1 value, const T2 bitMask) { return (value == bitMask); }
|
||||
|
||||
/** Returns whether the specified value has ONLY one or more of the flags specified in bitMask enabled (set to 1), and none others. */
|
||||
template<typename T1, typename T2>
|
||||
bool mvkAreOnlyAnyFlagsEnabled(T1 value, const T2 bitMask) { return (mvkIsAnyFlagEnabled(value, bitMask) && ((value | bitMask) == bitMask)); }
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user