Try to capture only the work queued up to this point in a fence.

This commit is contained in:
Chip Davis 2019-04-25 12:41:36 -05:00
parent 4591cac69b
commit 256ffc6d31
4 changed files with 54 additions and 13 deletions

View File

@ -127,6 +127,9 @@ protected:
void initMTLCommandQueue();
void initGPUCaptureScopes();
void destroyExecQueue();
void unlockQueue();
MVKSemaphoreImpl* addNewEvent(uint32_t submitCount);
void removeEvent(MVKSemaphoreImpl* event);
VkResult submit(MVKQueueSubmission* qSubmit);
MVKQueueFamily* _queueFamily;
@ -139,7 +142,9 @@ protected:
MVKMTLCommandBufferID _nextMTLCmdBuffID;
MVKGPUCaptureScope* _submissionCaptureScope;
MVKGPUCaptureScope* _presentationCaptureScope;
MVKSemaphoreImpl _queueIdleEvent;
std::mutex _activeCountLock;
std::atomic<uint32_t> _activeCount;
std::unordered_set<MVKSemaphoreImpl*> _pendingSubmitDoneEvents;
};

View File

@ -86,7 +86,8 @@ VkResult MVKQueue::submit(MVKQueueSubmission* qSubmit) {
VkResult MVKQueue::submit(uint32_t submitCount, const VkSubmitInfo* pSubmits,
VkFence fence, MVKCommandUse cmdBuffUse) {
VkResult rslt = VK_SUCCESS;
VkResult rslt = VK_SUCCESS;
MVKSemaphoreImpl* submitDoneEvent = addNewEvent(submitCount);
for (uint32_t sIdx = 0; sIdx < submitCount; sIdx++) {
VkResult subRslt = submit(new MVKQueueCommandBufferSubmission(_device, this, &pSubmits[sIdx], cmdBuffUse));
if (rslt == VK_SUCCESS) { rslt = subRslt; }
@ -94,8 +95,10 @@ VkResult MVKQueue::submit(uint32_t submitCount, const VkSubmitInfo* pSubmits,
if (rslt == VK_SUCCESS && fence) {
// Fence must wait for all completion blocks to finish.
dispatch_async(_execQueue ? _execQueue : _fenceQueue, ^{
_queueIdleEvent.wait();
submitDoneEvent->wait();
((MVKFence*)fence)->signal();
this->removeEvent(submitDoneEvent);
delete submitDoneEvent;
});
}
return rslt;
@ -105,6 +108,34 @@ VkResult MVKQueue::submit(const VkPresentInfoKHR* pPresentInfo) {
return submit(new MVKQueuePresentSurfaceSubmission(_device, this, pPresentInfo));
}
// Decrements the queue activation count and all known events.
void MVKQueue::unlockQueue() {
lock_guard<mutex> lock(_activeCountLock);
for (auto iter = _pendingSubmitDoneEvents.begin(), end = _pendingSubmitDoneEvents.end(); iter != end; ) {
if ((*iter)->release()) {
iter = _pendingSubmitDoneEvents.erase(iter);
} else {
++iter;
}
}
--_activeCount;
}
// Adds a new event with the current active count plus any additional submissions.
MVKSemaphoreImpl* MVKQueue::addNewEvent(uint32_t submitCount) {
lock_guard<mutex> lock(_activeCountLock);
_activeCount += submitCount;
auto* submitDoneEvent = new MVKSemaphoreImpl(true, _activeCount);
_pendingSubmitDoneEvents.insert(submitDoneEvent);
return submitDoneEvent;
}
// Removes an event from the set of pending submission completion events.
void MVKQueue::removeEvent(MVKSemaphoreImpl* event) {
lock_guard<mutex> lock(_activeCountLock);
_pendingSubmitDoneEvents.erase(event);
}
// Create an empty submit struct and fence, submit to queue and wait on fence.
VkResult MVKQueue::waitIdle(MVKCommandUse cmdBuffUse) {
@ -121,9 +152,6 @@ VkResult MVKQueue::waitIdle(MVKCommandUse cmdBuffUse) {
if (rslt != VK_SUCCESS)
return rslt;
// Wait for all completion routines across all submissions to execute.
_queueIdleEvent.wait();
return VK_SUCCESS;
}
@ -133,7 +161,7 @@ VkResult MVKQueue::waitIdle(MVKCommandUse cmdBuffUse) {
#define MVK_DISPATCH_QUEUE_QOS_CLASS QOS_CLASS_USER_INITIATED
MVKQueue::MVKQueue(MVKDevice* device, MVKQueueFamily* queueFamily, uint32_t index, float priority)
: MVKDispatchableDeviceObject(device) {
: MVKDispatchableDeviceObject(device), _activeCount(0) {
_queueFamily = queueFamily;
_index = index;
@ -328,8 +356,6 @@ MVKQueueCommandBufferSubmission::MVKQueueCommandBufferSubmission(MVKDevice* devi
(pSubmit ? pSubmit->waitSemaphoreCount : 0),
(pSubmit ? pSubmit->pWaitSemaphores : nullptr)) {
_queue->_queueIdleEvent.reserve();
// pSubmit can be null if just tracking the fence alone
if (pSubmit) {
uint32_t cbCnt = pSubmit->commandBufferCount;
@ -355,7 +381,7 @@ MVKQueueCommandBufferSubmission::MVKQueueCommandBufferSubmission(MVKDevice* devi
}
MVKQueueCommandBufferSubmission::~MVKQueueCommandBufferSubmission() {
_queue->_queueIdleEvent.release();
_queue->unlockQueue();
}

View File

@ -53,8 +53,9 @@ public:
/**
* Depending on configuration, releases one or all reservations. When all reservations
* have been released, unblocks all waiting threads to continue processing.
* Returns true if the last reservation was released.
*/
void release();
bool release();
/**
* Blocks processing on the current thread until any or all (depending on configuration) outstanding
@ -84,6 +85,9 @@ public:
MVKSemaphoreImpl(bool waitAll = true, uint32_t reservationCount = 0)
: _shouldWaitAll(waitAll), _reservationCount(reservationCount) {}
/** Destructor. */
~MVKSemaphoreImpl();
private:
bool operator()();

View File

@ -25,9 +25,9 @@ using namespace std;
#pragma mark -
#pragma mark MVKSemaphoreImpl
void MVKSemaphoreImpl::release() {
bool MVKSemaphoreImpl::release() {
lock_guard<mutex> lock(_lock);
if (isClear()) { return; }
if (isClear()) { return true; }
// Either decrement the reservation counter, or clear it altogether
if (_shouldWaitAll) {
@ -37,6 +37,7 @@ void MVKSemaphoreImpl::release() {
}
// If all reservations have been released, unblock all waiting threads
if ( isClear() ) { _blocker.notify_all(); }
return isClear();
}
void MVKSemaphoreImpl::reserve() {
@ -64,6 +65,11 @@ bool MVKSemaphoreImpl::wait(uint64_t timeout, bool reserveAgain) {
return isDone;
}
MVKSemaphoreImpl::~MVKSemaphoreImpl() {
// Acquire the lock to ensure proper ordering.
lock_guard<mutex> lock(_lock);
}
#pragma mark -
#pragma mark MVKSemaphore