Merge pull request #1323 from billhollings/tmp-buff-alloc
Enhancements and fixes to temporary MTLBuffer allocation.
This commit is contained in:
commit
79ba1d8e1e
@ -38,11 +38,11 @@ public:
|
||||
/** Returns the Vulkan API opaque object controlling this object. */
|
||||
MVKVulkanAPIObject* getVulkanAPIObject() override { return nullptr; }
|
||||
|
||||
/** Returns a new command instance. */
|
||||
T* newObject() override { return new T(); }
|
||||
|
||||
MVKCommandTypePool(bool isPooling = true) : MVKObjectPool<T>(isPooling) {}
|
||||
|
||||
protected:
|
||||
T* newObject() override { return new T(); }
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
@ -372,6 +372,9 @@ public:
|
||||
/** Get a temporary MTLBuffer that will be returned to a pool after the command buffer is finished. */
|
||||
const MVKMTLBufferAllocation* getTempMTLBuffer(NSUInteger length, bool isPrivate = false, bool isDedicated = false);
|
||||
|
||||
/** Copy the bytes to a temporary MTLBuffer that will be returned to a pool after the command buffer is finished. */
|
||||
const MVKMTLBufferAllocation* copyToTempMTLBufferAllocation(const void* bytes, NSUInteger length, bool isDedicated = false);
|
||||
|
||||
/** Returns the command encoding pool. */
|
||||
MVKCommandEncodingPool* getCommandEncodingPool();
|
||||
|
||||
@ -470,7 +473,6 @@ protected:
|
||||
void finishQueries();
|
||||
void setSubpass(MVKCommand* passCmd, VkSubpassContents subpassContents, uint32_t subpassIndex);
|
||||
void clearRenderArea();
|
||||
const MVKMTLBufferAllocation* copyToTempMTLBufferAllocation(const void* bytes, NSUInteger length);
|
||||
NSString* getMTLRenderCommandEncoderName();
|
||||
|
||||
VkSubpassContents _subpassContents;
|
||||
|
@ -647,15 +647,10 @@ void MVKCommandEncoder::setComputeBytes(id<MTLComputeCommandEncoder> mtlEncoder,
|
||||
}
|
||||
}
|
||||
|
||||
// Return the MTLBuffer allocation to the pool once the command buffer is done with it
|
||||
const MVKMTLBufferAllocation* MVKCommandEncoder::getTempMTLBuffer(NSUInteger length, bool isPrivate, bool isDedicated) {
|
||||
const MVKMTLBufferAllocation* mtlBuffAlloc = getCommandEncodingPool()->acquireMTLBufferAllocation(length, isPrivate, isDedicated);
|
||||
MVKMTLBufferAllocationPool* pool = mtlBuffAlloc->getPool();
|
||||
|
||||
// Return the MTLBuffer allocation to the pool once the command buffer is done with it
|
||||
[_mtlCmdBuffer addCompletedHandler: ^(id<MTLCommandBuffer> mcb) {
|
||||
pool->returnObjectSafely((MVKMTLBufferAllocation*)mtlBuffAlloc);
|
||||
}];
|
||||
|
||||
[_mtlCmdBuffer addCompletedHandler: ^(id<MTLCommandBuffer> mcb) { mtlBuffAlloc->returnToPool(); }];
|
||||
return mtlBuffAlloc;
|
||||
}
|
||||
|
||||
@ -664,8 +659,8 @@ MVKCommandEncodingPool* MVKCommandEncoder::getCommandEncodingPool() {
|
||||
}
|
||||
|
||||
// Copies the specified bytes into a temporary allocation within a pooled MTLBuffer, and returns the MTLBuffer allocation.
|
||||
const MVKMTLBufferAllocation* MVKCommandEncoder::copyToTempMTLBufferAllocation(const void* bytes, NSUInteger length) {
|
||||
const MVKMTLBufferAllocation* mtlBuffAlloc = getTempMTLBuffer(length);
|
||||
const MVKMTLBufferAllocation* MVKCommandEncoder::copyToTempMTLBufferAllocation(const void* bytes, NSUInteger length, bool isDedicated) {
|
||||
const MVKMTLBufferAllocation* mtlBuffAlloc = getTempMTLBuffer(length, false, isDedicated);
|
||||
void* pBuffData = mtlBuffAlloc->getContents();
|
||||
mlock(pBuffData, length);
|
||||
memcpy(pBuffData, bytes, length);
|
||||
|
@ -51,7 +51,7 @@ public:
|
||||
MVKMTLBufferAllocationPool* getPool() const { return _pool; }
|
||||
|
||||
/** Returns this object back to the pool that created it. */
|
||||
void returnToPool();
|
||||
void returnToPool() const;
|
||||
|
||||
/** Constructs this instance with the specified pool as its origin. */
|
||||
MVKMTLBufferAllocation(MVKMTLBufferAllocationPool* pool,
|
||||
@ -75,23 +75,26 @@ protected:
|
||||
* To return a MVKMTLBufferAllocation retrieved from this pool, back to this pool,
|
||||
* call the returnToPool() function on the MVKMTLBufferAllocation instance.
|
||||
*/
|
||||
class MVKMTLBufferAllocationPool : public MVKObjectPool<MVKMTLBufferAllocation> {
|
||||
class MVKMTLBufferAllocationPool : public MVKObjectPool<MVKMTLBufferAllocation>, public MVKDeviceTrackingMixin {
|
||||
|
||||
public:
|
||||
|
||||
/** Returns the Vulkan API opaque object controlling this object. */
|
||||
MVKVulkanAPIObject* getVulkanAPIObject() override { return _device->getVulkanAPIObject(); };
|
||||
|
||||
/** Returns a new MVKMTLBufferAllocation instance. */
|
||||
MVKMTLBufferAllocation* newObject() override;
|
||||
|
||||
/** Configures this instance to dispense MVKMTLBufferAllocation instances of the specified size. */
|
||||
MVKMTLBufferAllocationPool(MVKDevice* device, NSUInteger allocationLength, MTLStorageMode mtlStorageMode, bool isDedicated);
|
||||
MVKMTLBufferAllocationPool(MVKDevice* device, NSUInteger allocationLength, bool makeThreadSafe,
|
||||
bool isDedicated, MTLStorageMode mtlStorageMode);
|
||||
|
||||
~MVKMTLBufferAllocationPool() override;
|
||||
|
||||
protected:
|
||||
uint32_t calcMTLBufferAllocationCount();
|
||||
friend class MVKMTLBufferAllocation;
|
||||
|
||||
MVKBaseObject* getBaseObject() override { return this; };
|
||||
MVKMTLBufferAllocation* newObject() override;
|
||||
void returnAllocation(MVKMTLBufferAllocation* ba) { _isThreadSafe ? returnObjectSafely(ba) : returnObject(ba); }
|
||||
uint32_t calcMTLBufferAllocationCount();
|
||||
void addMTLBuffer();
|
||||
|
||||
NSUInteger _nextOffset;
|
||||
@ -99,7 +102,7 @@ protected:
|
||||
NSUInteger _mtlBufferLength;
|
||||
MTLStorageMode _mtlStorageMode;
|
||||
MVKSmallVector<id<MTLBuffer>, 64> _mtlBuffers;
|
||||
MVKDevice* _device;
|
||||
bool _isThreadSafe;
|
||||
};
|
||||
|
||||
|
||||
@ -138,14 +141,15 @@ public:
|
||||
* next power-of-two value that is at least as big as the specified maximum size.
|
||||
* If makeThreadSafe is true, a lock will be applied when an allocation is acquired.
|
||||
*/
|
||||
MVKMTLBufferAllocator(MVKDevice* device, NSUInteger maxRegionLength, bool makeThreadSafe = false, bool isDedicated = false, MTLStorageMode mtlStorageMode = MTLStorageModeShared);
|
||||
MVKMTLBufferAllocator(MVKDevice* device, NSUInteger maxRegionLength, bool makeThreadSafe = false,
|
||||
bool isDedicated = false, MTLStorageMode mtlStorageMode = MTLStorageModeShared);
|
||||
|
||||
~MVKMTLBufferAllocator() override;
|
||||
|
||||
protected:
|
||||
MVKSmallVector<MVKMTLBufferAllocationPool*, 32> _regionPools;
|
||||
NSUInteger _maxAllocationLength;
|
||||
bool _makeThreadSafe;
|
||||
bool _isThreadSafe;
|
||||
|
||||
};
|
||||
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
MVKVulkanAPIObject* MVKMTLBufferAllocation::getVulkanAPIObject() { return _pool->getVulkanAPIObject(); };
|
||||
|
||||
void MVKMTLBufferAllocation::returnToPool() { _pool->returnObjectSafely(this); }
|
||||
void MVKMTLBufferAllocation::returnToPool() const { _pool->returnAllocation((MVKMTLBufferAllocation*)this); }
|
||||
|
||||
|
||||
#pragma mark -
|
||||
@ -50,10 +50,13 @@ void MVKMTLBufferAllocationPool::addMTLBuffer() {
|
||||
}
|
||||
|
||||
|
||||
MVKMTLBufferAllocationPool::MVKMTLBufferAllocationPool(MVKDevice* device, NSUInteger allocationLength, MTLStorageMode mtlStorageMode, bool isDedicated)
|
||||
: MVKObjectPool<MVKMTLBufferAllocation>(true) {
|
||||
_device = device;
|
||||
MVKMTLBufferAllocationPool::MVKMTLBufferAllocationPool(MVKDevice* device, NSUInteger allocationLength, bool makeThreadSafe,
|
||||
bool isDedicated, MTLStorageMode mtlStorageMode) :
|
||||
MVKObjectPool<MVKMTLBufferAllocation>(true),
|
||||
MVKDeviceTrackingMixin(device) {
|
||||
|
||||
_allocationLength = allocationLength;
|
||||
_isThreadSafe = makeThreadSafe;
|
||||
_mtlBufferLength = _allocationLength * (isDedicated ? 1 : calcMTLBufferAllocationCount());
|
||||
_mtlStorageMode = mtlStorageMode;
|
||||
_nextOffset = _mtlBufferLength; // Force a MTLBuffer to be added on first access
|
||||
@ -80,10 +83,13 @@ MVKMTLBufferAllocationPool::~MVKMTLBufferAllocationPool() {
|
||||
const MVKMTLBufferAllocation* MVKMTLBufferAllocator::acquireMTLBufferRegion(NSUInteger length) {
|
||||
MVKAssert(length <= _maxAllocationLength, "This MVKMTLBufferAllocator has been configured to dispense MVKMTLBufferRegions no larger than %lu bytes.", (unsigned long)_maxAllocationLength);
|
||||
|
||||
// Can't allocate a segment smaller than the minimum MTLBuffer alignment.
|
||||
length = std::max<NSUInteger>(length, _device->_pMetalFeatures->mtlBufferAlignment);
|
||||
|
||||
// Convert max length to the next power-of-two exponent to use as a lookup
|
||||
NSUInteger p2Exp = mvkPowerOfTwoExponent(length);
|
||||
MVKMTLBufferAllocationPool* pRP = _regionPools[p2Exp];
|
||||
const MVKMTLBufferAllocation* region = _makeThreadSafe ? pRP->acquireObjectSafely() : pRP->acquireObject();
|
||||
const MVKMTLBufferAllocation* region = _isThreadSafe ? pRP->acquireObjectSafely() : pRP->acquireObject();
|
||||
if (region) {
|
||||
[region->_mtlBuffer setPurgeableState: MTLPurgeableStateVolatile];
|
||||
}
|
||||
@ -91,8 +97,8 @@ const MVKMTLBufferAllocation* MVKMTLBufferAllocator::acquireMTLBufferRegion(NSUI
|
||||
}
|
||||
|
||||
MVKMTLBufferAllocator::MVKMTLBufferAllocator(MVKDevice* device, NSUInteger maxRegionLength, bool makeThreadSafe, bool isDedicated, MTLStorageMode mtlStorageMode) : MVKBaseDeviceObject(device) {
|
||||
_maxAllocationLength = maxRegionLength;
|
||||
_makeThreadSafe = makeThreadSafe;
|
||||
_maxAllocationLength = std::max<NSUInteger>(maxRegionLength, _device->_pMetalFeatures->mtlBufferAlignment);
|
||||
_isThreadSafe = makeThreadSafe;
|
||||
|
||||
// Convert max length to the next power-of-two exponent
|
||||
NSUInteger maxP2Exp = mvkPowerOfTwoExponent(_maxAllocationLength);
|
||||
@ -101,7 +107,7 @@ MVKMTLBufferAllocator::MVKMTLBufferAllocator(MVKDevice* device, NSUInteger maxRe
|
||||
_regionPools.reserve(maxP2Exp + 1);
|
||||
NSUInteger allocLen = 1;
|
||||
for (uint32_t p2Exp = 0; p2Exp <= maxP2Exp; p2Exp++) {
|
||||
_regionPools.push_back(new MVKMTLBufferAllocationPool(device, allocLen, mtlStorageMode, isDedicated));
|
||||
_regionPools.push_back(new MVKMTLBufferAllocationPool(device, allocLen, makeThreadSafe, isDedicated, mtlStorageMode));
|
||||
allocLen <<= 1;
|
||||
}
|
||||
}
|
||||
|
@ -892,6 +892,7 @@ public:
|
||||
|
||||
protected:
|
||||
MVKBaseObject* getBaseObject() override { return this; };
|
||||
|
||||
};
|
||||
|
||||
|
||||
@ -929,25 +930,23 @@ protected:
|
||||
|
||||
/** Manages a pool of instances of a particular object type that requires an MVKDevice during construction. */
|
||||
template <class T>
|
||||
class MVKDeviceObjectPool : public MVKObjectPool<T> {
|
||||
class MVKDeviceObjectPool : public MVKObjectPool<T>, public MVKDeviceTrackingMixin {
|
||||
|
||||
public:
|
||||
|
||||
|
||||
/** Returns the Vulkan API opaque object controlling this object. */
|
||||
MVKVulkanAPIObject* getVulkanAPIObject() override { return _device; };
|
||||
|
||||
/** Returns a new instance. */
|
||||
T* newObject() override { return new T(_device); }
|
||||
|
||||
/**
|
||||
* Configures this instance for the device, and either use pooling, or not, depending
|
||||
* on the value of isPooling, which defaults to true if not indicated explicitly.
|
||||
*/
|
||||
MVKDeviceObjectPool(MVKDevice* device, bool isPooling = true) : MVKObjectPool<T>(isPooling), _device(device) {}
|
||||
MVKDeviceObjectPool(MVKDevice* device, bool isPooling = true) : MVKObjectPool<T>(isPooling), MVKDeviceTrackingMixin(device) {}
|
||||
|
||||
protected:
|
||||
MVKDevice* _device;
|
||||
T* newObject() override { return new T(_device); }
|
||||
MVKBaseObject* getBaseObject() override { return this; };
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user