Merge pull request #1381 from js6i/volatile_buffer_locking
MVKMTLBufferAllocator: Make sure temporary buffers stay mlocked() until they're encoded.
This commit is contained in:
commit
d5b4ef1b19
@ -1612,11 +1612,9 @@ void MVKCmdUpdateBuffer::encode(MVKCommandEncoder* cmdEncoder) {
|
||||
NSUInteger dstMTLBuffOffset = _dstBuffer->getMTLBufferOffset() + _dstOffset;
|
||||
|
||||
// Copy data to the source MTLBuffer
|
||||
MVKMTLBufferAllocation* srcMTLBufferAlloc = (MVKMTLBufferAllocation*)cmdEncoder->getCommandEncodingPool()->acquireMTLBufferAllocation(_dataSize);
|
||||
MVKMTLBufferAllocation* srcMTLBufferAlloc = cmdEncoder->getCommandEncodingPool()->acquireMTLBufferAllocation(_dataSize);
|
||||
void* pBuffData = srcMTLBufferAlloc->getContents();
|
||||
mlock(pBuffData, _dataSize);
|
||||
memcpy(pBuffData, _srcDataCache.data(), _dataSize);
|
||||
munlock(pBuffData, _dataSize);
|
||||
|
||||
[mtlBlitEnc copyFromBuffer: srcMTLBufferAlloc->_mtlBuffer
|
||||
sourceOffset: srcMTLBufferAlloc->_offset
|
||||
|
@ -695,7 +695,7 @@ void MVKCommandEncoder::setComputeBytes(id<MTLComputeCommandEncoder> mtlEncoder,
|
||||
|
||||
// Return the MTLBuffer allocation to the pool once the command buffer is done with it
|
||||
const MVKMTLBufferAllocation* MVKCommandEncoder::getTempMTLBuffer(NSUInteger length, bool isPrivate, bool isDedicated) {
|
||||
const MVKMTLBufferAllocation* mtlBuffAlloc = getCommandEncodingPool()->acquireMTLBufferAllocation(length, isPrivate, isDedicated);
|
||||
MVKMTLBufferAllocation* mtlBuffAlloc = getCommandEncodingPool()->acquireMTLBufferAllocation(length, isPrivate, isDedicated);
|
||||
[_mtlCmdBuffer addCompletedHandler: ^(id<MTLCommandBuffer> mcb) { mtlBuffAlloc->returnToPool(); }];
|
||||
return mtlBuffAlloc;
|
||||
}
|
||||
@ -708,9 +708,7 @@ MVKCommandEncodingPool* MVKCommandEncoder::getCommandEncodingPool() {
|
||||
const MVKMTLBufferAllocation* MVKCommandEncoder::copyToTempMTLBufferAllocation(const void* bytes, NSUInteger length, bool isDedicated) {
|
||||
const MVKMTLBufferAllocation* mtlBuffAlloc = getTempMTLBuffer(length, false, isDedicated);
|
||||
void* pBuffData = mtlBuffAlloc->getContents();
|
||||
mlock(pBuffData, length);
|
||||
memcpy(pBuffData, bytes, length);
|
||||
munlock(pBuffData, length);
|
||||
|
||||
return mtlBuffAlloc;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ public:
|
||||
* To return the returned allocation back to the pool to be reused,
|
||||
* call the returnToPool() function on the returned allocation.
|
||||
*/
|
||||
const MVKMTLBufferAllocation* acquireMTLBufferAllocation(NSUInteger length, bool isPrivate = false, bool isDedicated = false);
|
||||
MVKMTLBufferAllocation* acquireMTLBufferAllocation(NSUInteger length, bool isPrivate = false, bool isDedicated = false);
|
||||
|
||||
/**
|
||||
* Returns a MTLRenderPipelineState dedicated to rendering to several attachments
|
||||
|
@ -77,7 +77,7 @@ id<MTLDepthStencilState> MVKCommandEncodingPool::getMTLDepthStencilState(bool us
|
||||
MVK_ENC_REZ_ACCESS(_cmdClearDefaultDepthStencilState, newMTLDepthStencilState(useDepth, useStencil));
|
||||
}
|
||||
|
||||
const MVKMTLBufferAllocation* MVKCommandEncodingPool::acquireMTLBufferAllocation(NSUInteger length, bool isPrivate, bool isDedicated) {
|
||||
MVKMTLBufferAllocation* MVKCommandEncodingPool::acquireMTLBufferAllocation(NSUInteger length, bool isPrivate, bool isDedicated) {
|
||||
MVKAssert(isPrivate || !isDedicated, "Dedicated, host-shared temporary buffers are not supported.");
|
||||
if (isDedicated) {
|
||||
return _dedicatedMtlBufferAllocator.acquireMTLBufferRegion(length);
|
||||
|
@ -51,17 +51,20 @@ public:
|
||||
MVKMTLBufferAllocationPool* getPool() const { return _pool; }
|
||||
|
||||
/** Returns this object back to the pool that created it. */
|
||||
void returnToPool() const;
|
||||
void returnToPool();
|
||||
|
||||
/** Constructs this instance with the specified pool as its origin. */
|
||||
MVKMTLBufferAllocation(MVKMTLBufferAllocationPool* pool,
|
||||
id<MTLBuffer> mtlBuffer,
|
||||
NSUInteger offset,
|
||||
NSUInteger length) : _pool(pool), _mtlBuffer(mtlBuffer), _offset(offset), _length(length) {}
|
||||
NSUInteger length,
|
||||
uint64_t poolIndex) : _pool(pool), _mtlBuffer(mtlBuffer), _offset(offset), _length(length), _poolIndex(poolIndex) {}
|
||||
|
||||
protected:
|
||||
MVKMTLBufferAllocationPool* _pool;
|
||||
friend class MVKMTLBufferAllocationPool;
|
||||
|
||||
MVKMTLBufferAllocationPool* _pool;
|
||||
uint64_t _poolIndex;
|
||||
};
|
||||
|
||||
|
||||
@ -78,6 +81,11 @@ protected:
|
||||
class MVKMTLBufferAllocationPool : public MVKObjectPool<MVKMTLBufferAllocation>, public MVKDeviceTrackingMixin {
|
||||
|
||||
public:
|
||||
/** Returns a new allocation. */
|
||||
MVKMTLBufferAllocation* acquireAllocation();
|
||||
|
||||
/** Returns a new allocation (without mutual exclusion). */
|
||||
MVKMTLBufferAllocation* acquireAllocationUnlocked();
|
||||
|
||||
/** Returns the Vulkan API opaque object controlling this object. */
|
||||
MVKVulkanAPIObject* getVulkanAPIObject() override { return _device->getVulkanAPIObject(); };
|
||||
@ -93,7 +101,8 @@ protected:
|
||||
|
||||
MVKBaseObject* getBaseObject() override { return this; };
|
||||
MVKMTLBufferAllocation* newObject() override;
|
||||
void returnAllocation(MVKMTLBufferAllocation* ba) { _isThreadSafe ? returnObjectSafely(ba) : returnObject(ba); }
|
||||
void returnAllocationUnlocked(MVKMTLBufferAllocation* ba);
|
||||
void returnAllocation(MVKMTLBufferAllocation* ba);
|
||||
uint32_t calcMTLBufferAllocationCount();
|
||||
void addMTLBuffer();
|
||||
|
||||
@ -101,8 +110,9 @@ protected:
|
||||
NSUInteger _allocationLength;
|
||||
NSUInteger _mtlBufferLength;
|
||||
MTLStorageMode _mtlStorageMode;
|
||||
MVKSmallVector<id<MTLBuffer>, 64> _mtlBuffers;
|
||||
bool _isThreadSafe;
|
||||
struct MTLBufferTracker { id<MTLBuffer> mtlBuffer; uint64_t allocationCount; };
|
||||
MVKSmallVector<MTLBufferTracker, 64> _mtlBuffers;
|
||||
bool _isThreadSafe;
|
||||
};
|
||||
|
||||
|
||||
@ -132,7 +142,7 @@ public:
|
||||
* To return the MVKMTLBufferAllocation back to the pool, call
|
||||
* the returnToPool() function on the returned instance.
|
||||
*/
|
||||
const MVKMTLBufferAllocation* acquireMTLBufferRegion(NSUInteger length);
|
||||
MVKMTLBufferAllocation* acquireMTLBufferRegion(NSUInteger length);
|
||||
|
||||
/**
|
||||
* Configures this instance to dispense MVKMTLBufferAllocation up to the specified
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
MVKVulkanAPIObject* MVKMTLBufferAllocation::getVulkanAPIObject() { return _pool->getVulkanAPIObject(); };
|
||||
|
||||
void MVKMTLBufferAllocation::returnToPool() const { _pool->returnAllocation((MVKMTLBufferAllocation*)this); }
|
||||
void MVKMTLBufferAllocation::returnToPool() { _pool->returnAllocation(this); }
|
||||
|
||||
|
||||
#pragma mark -
|
||||
@ -39,16 +39,49 @@ MVKMTLBufferAllocation* MVKMTLBufferAllocationPool::newObject() {
|
||||
// of future allocation to beyond this allocation.
|
||||
NSUInteger offset = _nextOffset;
|
||||
_nextOffset += _allocationLength;
|
||||
return new MVKMTLBufferAllocation(this, _mtlBuffers.back(), offset, _allocationLength);
|
||||
return new MVKMTLBufferAllocation(this, _mtlBuffers.back().mtlBuffer, offset, _allocationLength, _mtlBuffers.size() - 1);
|
||||
}
|
||||
|
||||
// Adds a new MTLBuffer to the buffer pool and resets the next offset to the start of it
|
||||
void MVKMTLBufferAllocationPool::addMTLBuffer() {
|
||||
MTLResourceOptions mbOpts = (_mtlStorageMode << MTLResourceStorageModeShift) | MTLResourceCPUCacheModeDefaultCache;
|
||||
_mtlBuffers.push_back([_device->getMTLDevice() newBufferWithLength: _mtlBufferLength options: mbOpts]);
|
||||
_mtlBuffers.push_back({ [_device->getMTLDevice() newBufferWithLength: _mtlBufferLength options: mbOpts], 0 });
|
||||
_nextOffset = 0;
|
||||
}
|
||||
|
||||
MVKMTLBufferAllocation* MVKMTLBufferAllocationPool::acquireAllocationUnlocked() {
|
||||
MVKMTLBufferAllocation* ba = acquireObject();
|
||||
if (!_mtlBuffers[ba->_poolIndex].allocationCount++) {
|
||||
[ba->_mtlBuffer setPurgeableState: MTLPurgeableStateNonVolatile];
|
||||
}
|
||||
return ba;
|
||||
}
|
||||
|
||||
MVKMTLBufferAllocation* MVKMTLBufferAllocationPool::acquireAllocation() {
|
||||
if (_isThreadSafe) {
|
||||
std::lock_guard<std::mutex> lock(_lock);
|
||||
return acquireAllocationUnlocked();
|
||||
} else {
|
||||
return acquireAllocationUnlocked();
|
||||
}
|
||||
}
|
||||
|
||||
void MVKMTLBufferAllocationPool::returnAllocationUnlocked(MVKMTLBufferAllocation* ba) {
|
||||
if (!--_mtlBuffers[ba->_poolIndex].allocationCount) {
|
||||
[ba->_mtlBuffer setPurgeableState: MTLPurgeableStateVolatile];
|
||||
}
|
||||
returnObject(ba);
|
||||
}
|
||||
|
||||
void MVKMTLBufferAllocationPool::returnAllocation(MVKMTLBufferAllocation* ba) {
|
||||
if (_isThreadSafe) {
|
||||
std::lock_guard<std::mutex> lock(_lock);
|
||||
returnAllocationUnlocked(ba);
|
||||
} else {
|
||||
returnAllocationUnlocked(ba);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
MVKMTLBufferAllocationPool::MVKMTLBufferAllocationPool(MVKDevice* device, NSUInteger allocationLength, bool makeThreadSafe,
|
||||
bool isDedicated, MTLStorageMode mtlStorageMode) :
|
||||
@ -73,14 +106,17 @@ uint32_t MVKMTLBufferAllocationPool::calcMTLBufferAllocationCount() {
|
||||
}
|
||||
|
||||
MVKMTLBufferAllocationPool::~MVKMTLBufferAllocationPool() {
|
||||
mvkReleaseContainerContents(_mtlBuffers);
|
||||
for (uint32_t bufferIndex = 0; bufferIndex < _mtlBuffers.size(); ++bufferIndex) {
|
||||
[_mtlBuffers[bufferIndex].mtlBuffer release];
|
||||
}
|
||||
_mtlBuffers.clear();
|
||||
}
|
||||
|
||||
|
||||
#pragma mark -
|
||||
#pragma mark MVKMTLBufferAllocator
|
||||
|
||||
const MVKMTLBufferAllocation* MVKMTLBufferAllocator::acquireMTLBufferRegion(NSUInteger length) {
|
||||
MVKMTLBufferAllocation* MVKMTLBufferAllocator::acquireMTLBufferRegion(NSUInteger length) {
|
||||
MVKAssert(length <= _maxAllocationLength, "This MVKMTLBufferAllocator has been configured to dispense MVKMTLBufferRegions no larger than %lu bytes.", (unsigned long)_maxAllocationLength);
|
||||
|
||||
// Can't allocate a segment smaller than the minimum MTLBuffer alignment.
|
||||
@ -88,12 +124,7 @@ const MVKMTLBufferAllocation* MVKMTLBufferAllocator::acquireMTLBufferRegion(NSUI
|
||||
|
||||
// Convert max length to the next power-of-two exponent to use as a lookup
|
||||
NSUInteger p2Exp = mvkPowerOfTwoExponent(length);
|
||||
MVKMTLBufferAllocationPool* pRP = _regionPools[p2Exp];
|
||||
const MVKMTLBufferAllocation* region = _isThreadSafe ? pRP->acquireObjectSafely() : pRP->acquireObject();
|
||||
if (region) {
|
||||
[region->_mtlBuffer setPurgeableState: MTLPurgeableStateVolatile];
|
||||
}
|
||||
return region;
|
||||
return _regionPools[p2Exp]->acquireAllocation();
|
||||
}
|
||||
|
||||
MVKMTLBufferAllocator::MVKMTLBufferAllocator(MVKDevice* device, NSUInteger maxRegionLength, bool makeThreadSafe, bool isDedicated, MTLStorageMode mtlStorageMode) : MVKBaseDeviceObject(device) {
|
||||
|
@ -397,7 +397,7 @@ public:
|
||||
protected:
|
||||
inline uint8_t* getData() { return _mvkMTLBufferAllocation ? (uint8_t*)_mvkMTLBufferAllocation->getContents() : nullptr; }
|
||||
|
||||
const MVKMTLBufferAllocation* _mvkMTLBufferAllocation = nullptr;
|
||||
MVKMTLBufferAllocation* _mvkMTLBufferAllocation = nullptr;
|
||||
};
|
||||
|
||||
|
||||
|
@ -180,7 +180,7 @@ public:
|
||||
VkWriteDescriptorSetInlineUniformBlockEXT* pInlineUniformBlock);
|
||||
|
||||
/** Returns an MTLBuffer region allocation. */
|
||||
const MVKMTLBufferAllocation* acquireMTLBufferRegion(NSUInteger length);
|
||||
MVKMTLBufferAllocation* acquireMTLBufferRegion(NSUInteger length);
|
||||
/**
|
||||
* Returns the Metal argument buffer to which resources are written,
|
||||
* or return nil if Metal argument buffers are not being used.
|
||||
|
@ -339,7 +339,7 @@ void MVKDescriptorSet::read(const VkCopyDescriptorSet* pDescriptorCopy,
|
||||
}
|
||||
}
|
||||
|
||||
const MVKMTLBufferAllocation* MVKDescriptorSet::acquireMTLBufferRegion(NSUInteger length) {
|
||||
MVKMTLBufferAllocation* MVKDescriptorSet::acquireMTLBufferRegion(NSUInteger length) {
|
||||
return _pool->_inlineBlockMTLBufferAllocator.acquireMTLBufferRegion(length);
|
||||
}
|
||||
|
||||
|
@ -223,9 +223,7 @@ id<MTLBuffer> MVKTimestampQueryPool::getResultBuffer(MVKCommandEncoder* cmdEncod
|
||||
const MVKMTLBufferAllocation* tempBuff = cmdEncoder->getTempMTLBuffer(queryCount * _queryElementCount * sizeof(uint64_t));
|
||||
void* pBuffData = tempBuff->getContents();
|
||||
size_t size = queryCount * _queryElementCount * sizeof(uint64_t);
|
||||
mlock(pBuffData, size);
|
||||
memcpy(pBuffData, &_timestamps[firstQuery], size);
|
||||
munlock(pBuffData, size);
|
||||
offset = tempBuff->_offset;
|
||||
return tempBuff->_mtlBuffer;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user