mvk_vector

This commit is contained in:
aerofly 2018-12-09 16:34:30 +01:00
parent ffb4406388
commit f2bac60be4
15 changed files with 1129 additions and 56 deletions

View File

@ -20,8 +20,7 @@
#include "MVKCommand.h" #include "MVKCommand.h"
#include "MVKMTLResourceBindings.h" #include "MVKMTLResourceBindings.h"
#include <vector> #include "MVKVector.h"
#import <Metal/Metal.h> #import <Metal/Metal.h>
class MVKDevice; class MVKDevice;
@ -44,7 +43,7 @@ public:
MVKCmdBindVertexBuffers(MVKCommandTypePool<MVKCmdBindVertexBuffers>* pool); MVKCmdBindVertexBuffers(MVKCommandTypePool<MVKCmdBindVertexBuffers>* pool);
protected: protected:
std::vector<MVKMTLBufferBinding> _bindings; MVKVector<MVKMTLBufferBinding> _bindings;
}; };

View File

@ -19,8 +19,8 @@
#pragma once #pragma once
#include "MVKCommand.h" #include "MVKCommand.h"
#include "MVKVector.h"
#include <vector> #include <vector>
class MVKCommandBuffer; class MVKCommandBuffer;
class MVKPipeline; class MVKPipeline;
class MVKPipelineLayout; class MVKPipelineLayout;
@ -101,8 +101,8 @@ public:
private: private:
VkPipelineBindPoint _pipelineBindPoint; VkPipelineBindPoint _pipelineBindPoint;
MVKPipelineLayout* _pipelineLayout; MVKPipelineLayout* _pipelineLayout;
std::vector<MVKDescriptorSet*> _descriptorSets; MVKVector<MVKDescriptorSet*> _descriptorSets;
std::vector<uint32_t> _dynamicOffsets; MVKVector<uint32_t> _dynamicOffsets;
uint32_t _firstSet; uint32_t _firstSet;
}; };
@ -128,7 +128,7 @@ private:
MVKPipelineLayout* _pipelineLayout; MVKPipelineLayout* _pipelineLayout;
VkShaderStageFlags _stageFlags; VkShaderStageFlags _stageFlags;
uint32_t _offset; uint32_t _offset;
std::vector<char> _pushConstants; MVKVector<char> _pushConstants;
}; };

View File

@ -159,8 +159,13 @@ void MVKCmdPushConstants::setContent(VkPipelineLayout layout,
_stageFlags = stageFlags; _stageFlags = stageFlags;
_offset = offset; _offset = offset;
_pushConstants.resize(size); _pushConstants.clear();
copy_n((char*)pValues, size, _pushConstants.begin()); for( uint32_t i=0; i<size; ++i)
{
_pushConstants.push_back( reinterpret_cast<const char*>( pValues)[i] );
}
//_pushConstants.resize(size);
//copy_n((char*)pValues, size, _pushConstants.begin());
} }
void MVKCmdPushConstants::encode(MVKCommandEncoder* cmdEncoder) { void MVKCmdPushConstants::encode(MVKCommandEncoder* cmdEncoder) {

View File

@ -19,6 +19,7 @@
#pragma once #pragma once
#include "MVKCommand.h" #include "MVKCommand.h"
#include "MVKVector.h"
#include <vector> #include <vector>
#import <Metal/Metal.h> #import <Metal/Metal.h>
@ -47,7 +48,7 @@ private:
VkSubpassContents _contents; VkSubpassContents _contents;
MVKRenderPass* _renderPass; MVKRenderPass* _renderPass;
MVKFramebuffer* _framebuffer; MVKFramebuffer* _framebuffer;
std::vector<VkClearValue> _clearValues; MVKVector<VkClearValue> _clearValues;
}; };
@ -114,7 +115,7 @@ public:
private: private:
uint32_t _firstViewport; uint32_t _firstViewport;
std::vector<MTLViewport> _mtlViewports; MVKVector<MTLViewport> _mtlViewports;
}; };
@ -133,7 +134,7 @@ public:
private: private:
uint32_t _firstScissor; uint32_t _firstScissor;
std::vector<MTLScissorRect> _mtlScissors; MVKVector<MTLScissorRect> _mtlScissors;
}; };

View File

@ -23,6 +23,7 @@
#include "MVKCommandEncoderState.h" #include "MVKCommandEncoderState.h"
#include "MVKMTLBufferAllocation.h" #include "MVKMTLBufferAllocation.h"
#include "MVKCmdPipeline.h" #include "MVKCmdPipeline.h"
#include "MVKVector.h"
#include <vector> #include <vector>
#include <unordered_map> #include <unordered_map>
@ -239,7 +240,7 @@ public:
MVKRenderPass* renderPass, MVKRenderPass* renderPass,
MVKFramebuffer* framebuffer, MVKFramebuffer* framebuffer,
VkRect2D& renderArea, VkRect2D& renderArea,
std::vector<VkClearValue>* clearValues); MVKVector<VkClearValue>* clearValues);
/** Begins the next render subpass. */ /** Begins the next render subpass. */
void beginNextSubpass(VkSubpassContents renderpassContents); void beginNextSubpass(VkSubpassContents renderpassContents);

View File

@ -219,13 +219,19 @@ void MVKCommandEncoder::beginRenderpass(VkSubpassContents subpassContents,
MVKRenderPass* renderPass, MVKRenderPass* renderPass,
MVKFramebuffer* framebuffer, MVKFramebuffer* framebuffer,
VkRect2D& renderArea, VkRect2D& renderArea,
vector<VkClearValue>* clearValues) { MVKVector<VkClearValue>* clearValues) {
_renderPass = renderPass; _renderPass = renderPass;
_framebuffer = framebuffer; _framebuffer = framebuffer;
_renderArea = renderArea; _renderArea = renderArea;
_isRenderingEntireAttachment = (mvkVkOffset2DsAreEqual(_renderArea.offset, {0,0}) && _isRenderingEntireAttachment = (mvkVkOffset2DsAreEqual(_renderArea.offset, {0,0}) &&
mvkVkExtent2DsAreEqual(_renderArea.extent, _framebuffer->getExtent2D())); mvkVkExtent2DsAreEqual(_renderArea.extent, _framebuffer->getExtent2D()));
_clearValues.assign(clearValues->begin(), clearValues->end());
_clearValues.clear();
for( auto cv : *clearValues )
{
_clearValues.push_back( cv );
}
//_clearValues.assign(clearValues->begin(), clearValues->end());
setSubpass(subpassContents, 0); setSubpass(subpassContents, 0);
} }

View File

@ -18,9 +18,9 @@
#pragma once #pragma once
#include "MVKMTLResourceBindings.h" #include "MVKMTLResourceBindings.h"
#include "MVKCommandResourceFactory.h" #include "MVKCommandResourceFactory.h"
#include <vector> #include "MVKVector.h"
class MVKCommandEncoder; class MVKCommandEncoder;
class MVKOcclusionQueryPool; class MVKOcclusionQueryPool;
@ -135,7 +135,7 @@ public:
* The isSettingDynamically indicates that the scissor is being changed dynamically, * The isSettingDynamically indicates that the scissor is being changed dynamically,
* which is only allowed if the pipeline was created as VK_DYNAMIC_STATE_SCISSOR. * which is only allowed if the pipeline was created as VK_DYNAMIC_STATE_SCISSOR.
*/ */
void setViewports(std::vector<MTLViewport> mtlViewports, void setViewports(const MVKVector<MTLViewport> &mtlViewports,
uint32_t firstViewport, uint32_t firstViewport,
bool isSettingDynamically); bool isSettingDynamically);
@ -147,7 +147,7 @@ protected:
void encodeImpl() override; void encodeImpl() override;
void resetImpl() override; void resetImpl() override;
std::vector<MTLViewport> _mtlViewports; MVKVector<MTLViewport> _mtlViewports;
}; };
@ -164,7 +164,7 @@ public:
* The isSettingDynamically indicates that the scissor is being changed dynamically, * The isSettingDynamically indicates that the scissor is being changed dynamically,
* which is only allowed if the pipeline was created as VK_DYNAMIC_STATE_SCISSOR. * which is only allowed if the pipeline was created as VK_DYNAMIC_STATE_SCISSOR.
*/ */
void setScissors(std::vector<MTLScissorRect> mtlScissors, void setScissors(const MVKVector<MTLScissorRect> &mtlScissors,
uint32_t firstScissor, uint32_t firstScissor,
bool isSettingDynamically); bool isSettingDynamically);
@ -176,7 +176,7 @@ protected:
void encodeImpl() override; void encodeImpl() override;
void resetImpl() override; void resetImpl() override;
std::vector<MTLScissorRect> _mtlScissors; MVKVector<MTLScissorRect> _mtlScissors;
}; };
@ -189,7 +189,7 @@ class MVKPushConstantsCommandEncoderState : public MVKCommandEncoderState {
public: public:
/** Sets the specified push constants. */ /** Sets the specified push constants. */
void setPushConstants(uint32_t offset, std::vector<char>& pushConstants); void setPushConstants(uint32_t offset, MVKVector<char>& pushConstants);
/** Sets the index of the Metal buffer used to hold the push constants. */ /** Sets the index of the Metal buffer used to hold the push constants. */
void setMTLBufferIndex(uint32_t mtlBufferIndex); void setMTLBufferIndex(uint32_t mtlBufferIndex);
@ -203,7 +203,7 @@ protected:
void encodeImpl() override; void encodeImpl() override;
void resetImpl() override; void resetImpl() override;
std::vector<char> _pushConstants; MVKVector<char> _pushConstants;
VkShaderStageFlagBits _shaderStage; VkShaderStageFlagBits _shaderStage;
uint32_t _mtlBufferIndex = 0; uint32_t _mtlBufferIndex = 0;
}; };
@ -348,15 +348,15 @@ protected:
// Template function that marks both the vector and all binding elements in the vector as dirty. // Template function that marks both the vector and all binding elements in the vector as dirty.
template<class T> template<class T>
void markDirty(std::vector<T>& bindings, bool& bindingsDirtyFlag) { void markDirty(T& bindings, bool& bindingsDirtyFlag) {
for (auto& b : bindings) { b.isDirty = true; } for (auto& b : bindings) { b.isDirty = true; }
bindingsDirtyFlag = true; bindingsDirtyFlag = true;
} }
// Template function that updates an existing binding or adds a new binding to a vector // Template function that updates an existing binding or adds a new binding to a vector
// of bindings, and marks the binding, the vector, and this instance as dirty // of bindings, and marks the binding, the vector, and this instance as dirty
template<class T> template<class T, class U>
void bind(const T& b, std::vector<T>& bindings, bool& bindingsDirtyFlag) { void bind(const T& b, U& bindings, bool& bindingsDirtyFlag) {
if ( !b.mtlResource ) { return; } if ( !b.mtlResource ) { return; }
@ -365,7 +365,7 @@ protected:
bindingsDirtyFlag = true; bindingsDirtyFlag = true;
db.isDirty = true; db.isDirty = true;
for (auto iter = bindings.begin(), end = bindings.end(); iter != end; iter++) { for (auto iter = bindings.begin(), end = bindings.end(); iter != end; ++iter) {
if( iter->index == db.index ) { if( iter->index == db.index ) {
*iter = db; *iter = db;
return; return;
@ -377,7 +377,7 @@ protected:
// Template function that executes a lambda expression on each dirty element of // Template function that executes a lambda expression on each dirty element of
// a vector of bindings, and marks the bindings and the vector as no longer dirty. // a vector of bindings, and marks the bindings and the vector as no longer dirty.
template<class T> template<class T>
void encodeBinding(std::vector<T>& bindings, void encodeBinding(MVKVector<T>& bindings,
bool& bindingsDirtyFlag, bool& bindingsDirtyFlag,
std::function<void(MVKCommandEncoder* cmdEncoder, T& b)> mtlOperation) { std::function<void(MVKCommandEncoder* cmdEncoder, T& b)> mtlOperation) {
if (bindingsDirtyFlag) { if (bindingsDirtyFlag) {
@ -451,12 +451,12 @@ protected:
void resetImpl() override; void resetImpl() override;
void markDirty() override; void markDirty() override;
std::vector<MVKMTLBufferBinding> _vertexBufferBindings; MVKVector<MVKMTLBufferBinding> _vertexBufferBindings;
std::vector<MVKMTLBufferBinding> _fragmentBufferBindings; MVKVector<MVKMTLBufferBinding> _fragmentBufferBindings;
std::vector<MVKMTLTextureBinding> _vertexTextureBindings; MVKVector<MVKMTLTextureBinding> _vertexTextureBindings;
std::vector<MVKMTLTextureBinding> _fragmentTextureBindings; MVKVector<MVKMTLTextureBinding> _fragmentTextureBindings;
std::vector<MVKMTLSamplerStateBinding> _vertexSamplerStateBindings; MVKVector<MVKMTLSamplerStateBinding> _vertexSamplerStateBindings;
std::vector<MVKMTLSamplerStateBinding> _fragmentSamplerStateBindings; MVKVector<MVKMTLSamplerStateBinding> _fragmentSamplerStateBindings;
MVKMTLBufferBinding _vertexAuxBufferBinding; MVKMTLBufferBinding _vertexAuxBufferBinding;
MVKMTLBufferBinding _fragmentAuxBufferBinding; MVKMTLBufferBinding _fragmentAuxBufferBinding;
@ -499,9 +499,9 @@ protected:
void resetImpl() override; void resetImpl() override;
void markDirty() override; void markDirty() override;
std::vector<MVKMTLBufferBinding> _bufferBindings; MVKVector<MVKMTLBufferBinding> _bufferBindings;
std::vector<MVKMTLTextureBinding> _textureBindings; MVKVector<MVKMTLTextureBinding> _textureBindings;
std::vector<MVKMTLSamplerStateBinding> _samplerStateBindings; MVKVector<MVKMTLSamplerStateBinding> _samplerStateBindings;
MVKMTLBufferBinding _auxBufferBinding; MVKMTLBufferBinding _auxBufferBinding;
bool _areBufferBindingsDirty = false; bool _areBufferBindingsDirty = false;

View File

@ -50,7 +50,7 @@ void MVKPipelineCommandEncoderState::resetImpl() {
#pragma mark - #pragma mark -
#pragma mark MVKViewportCommandEncoderState #pragma mark MVKViewportCommandEncoderState
void MVKViewportCommandEncoderState::setViewports(vector<MTLViewport> mtlViewports, void MVKViewportCommandEncoderState::setViewports(const MVKVector<MTLViewport> &mtlViewports,
uint32_t firstViewport, uint32_t firstViewport,
bool isSettingDynamically) { bool isSettingDynamically) {
@ -91,7 +91,7 @@ void MVKViewportCommandEncoderState::resetImpl() {
#pragma mark - #pragma mark -
#pragma mark MVKScissorCommandEncoderState #pragma mark MVKScissorCommandEncoderState
void MVKScissorCommandEncoderState::setScissors(vector<MTLScissorRect> mtlScissors, void MVKScissorCommandEncoderState::setScissors(const MVKVector<MTLScissorRect> &mtlScissors,
uint32_t firstScissor, uint32_t firstScissor,
bool isSettingDynamically) { bool isSettingDynamically) {
@ -113,7 +113,11 @@ void MVKScissorCommandEncoderState::setScissors(vector<MTLScissorRect> mtlScisso
void MVKScissorCommandEncoderState::encodeImpl() { void MVKScissorCommandEncoderState::encodeImpl() {
MVKAssert(!_mtlScissors.empty(), "Must specify at least one scissor rect"); MVKAssert(!_mtlScissors.empty(), "Must specify at least one scissor rect");
std::vector<MTLScissorRect> clippedScissors(_mtlScissors); MVKVector<MTLScissorRect> clippedScissors;
for ( const auto &scissor : _mtlScissors)
{
clippedScissors.emplace_back( scissor );
}
std::for_each(clippedScissors.begin(), clippedScissors.end(), [this](MTLScissorRect& scissor) { std::for_each(clippedScissors.begin(), clippedScissors.end(), [this](MTLScissorRect& scissor) {
scissor = _cmdEncoder->clipToRenderArea(scissor); scissor = _cmdEncoder->clipToRenderArea(scissor);
}); });
@ -136,7 +140,7 @@ void MVKScissorCommandEncoderState::resetImpl() {
#pragma mark - #pragma mark -
#pragma mark MVKPushConstantsCommandEncoderState #pragma mark MVKPushConstantsCommandEncoderState
void MVKPushConstantsCommandEncoderState:: setPushConstants(uint32_t offset, vector<char>& pushConstants) { void MVKPushConstantsCommandEncoderState:: setPushConstants(uint32_t offset, MVKVector<char>& pushConstants) {
uint32_t pcCnt = (uint32_t)pushConstants.size(); uint32_t pcCnt = (uint32_t)pushConstants.size();
mvkEnsureSize(_pushConstants, offset + pcCnt); mvkEnsureSize(_pushConstants, offset + pcCnt);
copy(pushConstants.begin(), pushConstants.end(), _pushConstants.begin() + offset); copy(pushConstants.begin(), pushConstants.end(), _pushConstants.begin() + offset);

View File

@ -20,6 +20,7 @@
#include "MVKDevice.h" #include "MVKDevice.h"
#include "MVKImage.h" #include "MVKImage.h"
#include "MVKVector.h"
#include <MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h> #include <MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h>
#include <unordered_set> #include <unordered_set>
#include <unordered_map> #include <unordered_map>
@ -80,7 +81,7 @@ public:
void bind(MVKCommandEncoder* cmdEncoder, void bind(MVKCommandEncoder* cmdEncoder,
MVKDescriptorBinding& descBinding, MVKDescriptorBinding& descBinding,
MVKShaderResourceBinding& dslMTLRezIdxOffsets, MVKShaderResourceBinding& dslMTLRezIdxOffsets,
std::vector<uint32_t>& dynamicOffsets, MVKVector<uint32_t>& dynamicOffsets,
uint32_t* pDynamicOffsetIndex); uint32_t* pDynamicOffsetIndex);
/** Encodes this binding layout and the specified descriptor binding on the specified command encoder immediately. */ /** Encodes this binding layout and the specified descriptor binding on the specified command encoder immediately. */
@ -131,7 +132,7 @@ public:
void bindDescriptorSet(MVKCommandEncoder* cmdEncoder, void bindDescriptorSet(MVKCommandEncoder* cmdEncoder,
MVKDescriptorSet* descSet, MVKDescriptorSet* descSet,
MVKShaderResourceBinding& dslMTLRezIdxOffsets, MVKShaderResourceBinding& dslMTLRezIdxOffsets,
std::vector<uint32_t>& dynamicOffsets, MVKVector<uint32_t>& dynamicOffsets,
uint32_t* pDynamicOffsetIndex); uint32_t* pDynamicOffsetIndex);
@ -165,7 +166,7 @@ protected:
friend class MVKPipelineLayout; friend class MVKPipelineLayout;
friend class MVKDescriptorSet; friend class MVKDescriptorSet;
std::vector<MVKDescriptorSetLayoutBinding> _bindings; MVKVector<MVKDescriptorSetLayoutBinding> _bindings;
MVKShaderResourceBinding _mtlResourceCounts; MVKShaderResourceBinding _mtlResourceCounts;
bool _isPushDescriptorLayout : 1; bool _isPushDescriptorLayout : 1;
}; };

View File

@ -80,7 +80,7 @@ MVK_PUBLIC_SYMBOL MVKShaderResourceBinding& MVKShaderResourceBinding::operator+=
void MVKDescriptorSetLayoutBinding::bind(MVKCommandEncoder* cmdEncoder, void MVKDescriptorSetLayoutBinding::bind(MVKCommandEncoder* cmdEncoder,
MVKDescriptorBinding& descBinding, MVKDescriptorBinding& descBinding,
MVKShaderResourceBinding& dslMTLRezIdxOffsets, MVKShaderResourceBinding& dslMTLRezIdxOffsets,
vector<uint32_t>& dynamicOffsets, MVKVector<uint32_t>& dynamicOffsets,
uint32_t* pDynamicOffsetIndex) { uint32_t* pDynamicOffsetIndex) {
MVKMTLBufferBinding bb; MVKMTLBufferBinding bb;
MVKMTLTextureBinding tb; MVKMTLTextureBinding tb;
@ -494,7 +494,7 @@ VkResult MVKDescriptorSetLayoutBinding::initMetalResourceIndexOffsets(MVKShaderS
void MVKDescriptorSetLayout::bindDescriptorSet(MVKCommandEncoder* cmdEncoder, void MVKDescriptorSetLayout::bindDescriptorSet(MVKCommandEncoder* cmdEncoder,
MVKDescriptorSet* descSet, MVKDescriptorSet* descSet,
MVKShaderResourceBinding& dslMTLRezIdxOffsets, MVKShaderResourceBinding& dslMTLRezIdxOffsets,
vector<uint32_t>& dynamicOffsets, MVKVector<uint32_t>& dynamicOffsets,
uint32_t* pDynamicOffsetIndex) { uint32_t* pDynamicOffsetIndex) {
if (_isPushDescriptorLayout) return; if (_isPushDescriptorLayout) return;

View File

@ -22,6 +22,7 @@
#include "MVKDescriptorSet.h" #include "MVKDescriptorSet.h"
#include "MVKShaderModule.h" #include "MVKShaderModule.h"
#include "MVKSync.h" #include "MVKSync.h"
#include "MVKVector.h"
#include <MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h> #include <MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h>
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
@ -53,9 +54,9 @@ public:
/** Binds descriptor sets to a command encoder. */ /** Binds descriptor sets to a command encoder. */
void bindDescriptorSets(MVKCommandEncoder* cmdEncoder, void bindDescriptorSets(MVKCommandEncoder* cmdEncoder,
std::vector<MVKDescriptorSet*>& descriptorSets, MVKVector<MVKDescriptorSet*>& descriptorSets,
uint32_t firstSet, uint32_t firstSet,
std::vector<uint32_t>& dynamicOffsets); MVKVector<uint32_t>& dynamicOffsets);
/** Populates the specified shader converter context. */ /** Populates the specified shader converter context. */
void populateShaderConverterContext(SPIRVToMSLConverterContext& context); void populateShaderConverterContext(SPIRVToMSLConverterContext& context);
@ -157,8 +158,8 @@ protected:
VkPipelineRasterizationStateCreateInfo _rasterInfo; VkPipelineRasterizationStateCreateInfo _rasterInfo;
VkPipelineDepthStencilStateCreateInfo _depthStencilInfo; VkPipelineDepthStencilStateCreateInfo _depthStencilInfo;
std::vector<MTLViewport> _mtlViewports; MVKVector<MTLViewport> _mtlViewports;
std::vector<MTLScissorRect> _mtlScissors; MVKVector<MTLScissorRect> _mtlScissors;
id<MTLRenderPipelineState> _mtlPipelineState; id<MTLRenderPipelineState> _mtlPipelineState;
MTLCullMode _mtlCullMode; MTLCullMode _mtlCullMode;

View File

@ -36,9 +36,9 @@ using namespace std;
#pragma mark MVKPipelineLayout #pragma mark MVKPipelineLayout
void MVKPipelineLayout::bindDescriptorSets(MVKCommandEncoder* cmdEncoder, void MVKPipelineLayout::bindDescriptorSets(MVKCommandEncoder* cmdEncoder,
vector<MVKDescriptorSet*>& descriptorSets, MVKVector<MVKDescriptorSet*>& descriptorSets,
uint32_t firstSet, uint32_t firstSet,
vector<uint32_t>& dynamicOffsets) { MVKVector<uint32_t>& dynamicOffsets) {
uint32_t pDynamicOffsetIndex = 0; uint32_t pDynamicOffsetIndex = 0;
uint32_t dsCnt = (uint32_t)descriptorSets.size(); uint32_t dsCnt = (uint32_t)descriptorSets.size();
@ -343,6 +343,7 @@ MTLRenderPipelineDescriptor* MVKGraphicsPipeline::getMTLRenderPipelineDescriptor
} }
} }
_needsFragmentAuxBuffer = false;
// Fragment shader // Fragment shader
if (mvkAreFlagsEnabled(pSS->stage, VK_SHADER_STAGE_FRAGMENT_BIT)) { if (mvkAreFlagsEnabled(pSS->stage, VK_SHADER_STAGE_FRAGMENT_BIT)) {
shaderContext.options.entryPointStage = spv::ExecutionModelFragment; shaderContext.options.entryPointStage = spv::ExecutionModelFragment;

View File

@ -22,6 +22,7 @@
#include "MVKCommandBuffer.h" #include "MVKCommandBuffer.h"
#include "MVKImage.h" #include "MVKImage.h"
#include "MVKSync.h" #include "MVKSync.h"
#include "MVKVector.h"
#include <vector> #include <vector>
#include <mutex> #include <mutex>
@ -169,7 +170,7 @@ protected:
MVKQueueSubmission* _prev; MVKQueueSubmission* _prev;
MVKQueueSubmission* _next; MVKQueueSubmission* _next;
VkResult _submissionResult; VkResult _submissionResult;
std::vector<MVKSemaphore*> _waitSemaphores; MVKVector<MVKSemaphore*> _waitSemaphores;
bool _isAwaitingSemaphores; bool _isAwaitingSemaphores;
}; };
@ -204,8 +205,8 @@ protected:
void commitActiveMTLCommandBuffer(bool signalCompletion = false); void commitActiveMTLCommandBuffer(bool signalCompletion = false);
void finish(); void finish();
std::vector<MVKCommandBuffer*> _cmdBuffers; MVKVector<MVKCommandBuffer*> _cmdBuffers;
std::vector<MVKSemaphore*> _signalSemaphores; MVKVector<MVKSemaphore*> _signalSemaphores;
MVKFence* _fence; MVKFence* _fence;
MVKCommandUse _cmdBuffUse; MVKCommandUse _cmdBuffUse;
id<MTLCommandBuffer> _activeMTLCommandBuffer; id<MTLCommandBuffer> _activeMTLCommandBuffer;
@ -227,6 +228,6 @@ public:
const VkPresentInfoKHR* pPresentInfo); const VkPresentInfoKHR* pPresentInfo);
protected: protected:
std::vector<MVKSwapchainImage*> _surfaceImages; MVKVector<MVKSwapchainImage*> _surfaceImages;
}; };

View File

@ -0,0 +1,504 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// MVKVector.h - similar to std::vector
//
// 2017/01/26 - th/mb
//
// ---------------------------------------------------------------------------
//
// copyright (C) 2005-2017, Dr. Torsten Hans / Dr. Marc Borchers
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// - Redistributions of source code must retain the above copyright notice,
// this vector of conditions and the disclaimer below.
// - Redistributions in binary form must reproduce the above copyright notice,
// this vector of conditions and the disclaimer (as noted below) in the
// documentation and/or other materials provided with the distribution.
// - Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
#ifndef MVK_VECTOR_H
#define MVK_VECTOR_H
#include "MVKVectorAllocator.h"
#include <type_traits>
#include <initializer_list>
#include <utility>
template<class Type, class Allocator = mvk_vector_allocator_with_stack<Type, 8>> class MVKVector
{
Allocator alc;
public:
class iterator
{
const MVKVector *vector;
size_t index;
public:
iterator() = delete;
iterator( const size_t _index, const MVKVector &_vector ) : vector{ &_vector }, index{ _index } { }
iterator &operator=( const iterator &it )
{
vector = it.vector;
index = it.index;
return *this;
}
Type *operator->() const
{
return &vector->alc.ptr[index];
}
operator Type*( ) const
{
return &vector->alc.ptr[index];
}
bool operator==( const iterator &it ) const
{
return ( vector == it.vector ) && ( index == it.index );
}
bool operator!=( const iterator &it ) const
{
return ( vector != it.vector ) || ( index != it.index );
}
iterator& operator++() { ++index; return *this; }
bool is_valid() const { return index < vector->alc.num_elements_used; }
size_t get_position() const { return index; }
};
class reverse_iterator
{
const MVKVector *vector;
size_t index;
public:
reverse_iterator() = delete;
reverse_iterator( const size_t _index, const MVKVector &_vector ) : vector{ &_vector }, index{ _index } { }
reverse_iterator &operator=( const reverse_iterator & ) = delete;
Type *operator->() const
{
return &vector->alc.ptr[index];
}
operator Type*( ) const
{
return &vector->alc.ptr[index];
}
bool operator==( const reverse_iterator &it ) const
{
return vector == it.vector && index == it.index;
}
bool operator!=( const reverse_iterator &it ) const
{
return vector != it.vector || index != it.index;
}
reverse_iterator& operator++() { --index; return *this; }
bool is_valid() const { return index < vector->alc.num_elements_used; }
size_t get_position() const { return index; }
};
private:
size_t vector_GetNextCapacity() const
{
constexpr auto ELEMENTS_FOR_64_BYTES = 64 / sizeof( Type );
constexpr auto MINIMUM_CAPACITY = ELEMENTS_FOR_64_BYTES > 4 ? ELEMENTS_FOR_64_BYTES : 4;
const auto current_capacity = capacity();
//if( current_capacity < 256 )
// return MINIMUM_CAPACITY + 2 * current_capacity;
return MINIMUM_CAPACITY + ( 3 * current_capacity ) / 2;
}
void vector_Allocate( const size_t s )
{
const auto new_reserved_size = tm_max( s, alc.num_elements_used );
alc.allocate( new_reserved_size );
}
void vector_ReAllocate( const size_t s )
{
alc.re_allocate( s );
}
public:
MVKVector()
{
}
MVKVector( const size_t n, const Type t )
{
if( n > 0 )
{
alc.allocate( n );
for( size_t i = 0; i < n; ++i )
{
alc.construct( &alc.ptr[i], t );
}
alc.num_elements_used = n;
}
}
MVKVector( const MVKVector &a )
{
const size_t n = a.size();
if( n > 0 )
{
alc.allocate( n );
for( size_t i = 0; i < n; ++i )
{
alc.construct( &alc.ptr[i], a.alc.ptr[i] );
}
alc.num_elements_used = n;
}
}
MVKVector( MVKVector &&a ) : alc{ std::move( a.alc ) }
{
}
MVKVector( std::initializer_list<Type> vector )
{
if( vector.size() > capacity() )
{
vector_Allocate( vector.size() );
}
// std::initializer_list does not yet support std::move, we use it anyway but it has no effect
for( auto &&element : vector )
{
alc.construct( &alc.ptr[alc.num_elements_used], std::move( element ) );
++alc.num_elements_used;
}
}
~MVKVector()
{
}
MVKVector& operator=( const MVKVector &a )
{
if( this != &a )
{
const auto n = a.alc.num_elements_used;
if( alc.num_elements_used == n )
{
for( size_t i = 0; i < n; ++i )
{
alc.ptr[i] = a.alc.ptr[i];
}
}
else
{
if( n > capacity() )
{
vector_ReAllocate( n );
}
else
{
alc.destruct_all();
}
for( size_t i = 0; i < n; ++i )
{
alc.construct( &alc.ptr[i], a.alc.ptr[i] );
}
alc.num_elements_used = n;
}
}
return *this;
}
MVKVector& operator=( MVKVector &&a )
{
alc.swap( a.alc );
return *this;
}
bool operator==( const MVKVector &a ) const
{
if( alc.num_elements_used != a.alc.num_elements_used )
return false;
for( size_t i = 0; i < alc.num_elements_used; ++i )
{
if( alc.ptr[i] != a.alc.ptr[i] )
return false;
}
return true;
}
bool operator!=( const MVKVector &a ) const
{
if( alc.num_elements_used != a.alc.num_elements_used )
return true;
for( size_t i = 0; i < alc.num_elements_used; ++i )
{
if( alc.ptr[i] != a.alc.ptr[i] )
return true;
}
return false;
}
void swap( MVKVector &a )
{
alc.swap( a.alc );
}
void clear()
{
alc.template destruct_all<Type>();
}
void reset()
{
alc.deallocate();
}
iterator begin() const { return iterator( 0, *this ); }
iterator end() const { return iterator( alc.num_elements_used, *this ); }
reverse_iterator rbegin() const { return reverse_iterator( alc.num_elements_used - 1, *this ); }
reverse_iterator rend() const { return reverse_iterator( size_t( -1 ), *this ); }
size_t size() const { return alc.num_elements_used; }
bool empty() const { return alc.num_elements_used == 0; }
Type &at( const size_t i ) const
{
return alc.ptr[i];
}
const Type &operator[]( const size_t i ) const
{
return alc.ptr[i];
}
Type &operator[]( const size_t i )
{
return alc.ptr[i];
}
const Type *data() const
{
return alc.ptr;
}
Type *data()
{
return alc.ptr;
}
size_t capacity() const
{
return alc.get_capacity();
}
const Type &front() const
{
return alc.ptr[0];
}
Type &front()
{
return alc.ptr[0];
}
const Type &back() const
{
return alc.ptr[alc.num_elements_used - 1];
}
Type &back()
{
return alc.ptr[alc.num_elements_used - 1];
}
void pop_back()
{
if( alc.num_elements_used > 0 )
{
--alc.num_elements_used;
alc.destruct( &alc.ptr[alc.num_elements_used] );
}
}
void reserve( const size_t new_size )
{
if( new_size > capacity() )
{
vector_ReAllocate( new_size );
}
}
void assign( const size_t new_size, const Type &t )
{
if( new_size <= capacity() )
{
clear();
}
else
{
vector_Allocate( new_size );
}
for( size_t i = 0; i < new_size; ++i )
{
alc.construct( &alc.ptr[i], t );
}
alc.num_elements_used = new_size;
}
void resize( const size_t new_size, const Type t = { } )
{
if( new_size == alc.num_elements_used )
{
return;
}
if( new_size == 0 )
{
clear();
return;
}
if( new_size > alc.num_elements_used )
{
if( new_size > capacity() )
{
vector_ReAllocate( new_size );
}
while( alc.num_elements_used < new_size )
{
alc.construct( &alc.ptr[alc.num_elements_used], t );
++alc.num_elements_used;
}
}
else
{
//if constexpr( !std::is_trivially_destructible<Type>::value )
{
while( alc.num_elements_used > new_size )
{
--alc.num_elements_used;
alc.destruct( &alc.ptr[alc.num_elements_used] );
}
}
//else
//{
// alc.num_elements_used = new_size;
//}
}
}
// trims the capacity of the slist to the number of alc.ptr
void shrink_to_fit()
{
alc.shrink_to_fit();
}
void erase( const iterator it )
{
if( it.is_valid() )
{
--alc.num_elements_used;
for( size_t i = it.GetIndex(); i < alc.num_elements_used; ++i )
{
alc.ptr[i] = std::move( alc.ptr[i + 1] );
}
// this is required for types with a destructor
alc.destruct( &alc.ptr[alc.num_elements_used] );
}
}
// adds t before it and automatically resizes vector if necessary
void insert( const iterator it, Type t )
{
if( !it.is_valid() || alc.num_elements_used == 0 )
{
push_back( std::move( t ) );
}
else
{
if( alc.num_elements_used == capacity() )
vector_ReAllocate( vector_GetNextCapacity() );
// move construct last element
alc.construct( &alc.ptr[alc.num_elements_used], std::move( alc.ptr[alc.num_elements_used - 1] ) );
// move the remaining elements
const size_t it_position = it.get_position();
for( size_t i = alc.num_elements_used - 1; i > it_position; --i )
{
alc.ptr[i] = std::move( alc.ptr[i - 1] );
}
alc.ptr[it_position] = std::move( t );
++alc.num_elements_used;
}
}
void push_back( const Type &t )
{
if( alc.num_elements_used == capacity() )
vector_ReAllocate( vector_GetNextCapacity() );
alc.construct( &alc.ptr[alc.num_elements_used], t );
++alc.num_elements_used;
}
void push_back( Type &&t )
{
if( alc.num_elements_used == capacity() )
vector_ReAllocate( vector_GetNextCapacity() );
alc.construct( &alc.ptr[alc.num_elements_used], std::forward<Type>( t ) );
++alc.num_elements_used;
}
template<class... Args>
Type &emplace_back( Args&&... args )
{
if( alc.num_elements_used == capacity() )
vector_ReAllocate( vector_GetNextCapacity() );
alc.construct( &alc.ptr[alc.num_elements_used], std::forward<Args>( args )... );
++alc.num_elements_used;
return alc.ptr[alc.num_elements_used - 1];
}
};
#endif // MVK_VECTOR_H

View File

@ -0,0 +1,549 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// mvk_vector_allocator.h - allocator classes for slist's
//
// 2017/01/26 - th/mb
//
// ---------------------------------------------------------------------------
//
// copyright (C) 2005-2017, Dr. Torsten Hans / Dr. Marc Borchers
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the disclaimer below.
// - Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the disclaimer (as noted below) in the
// documentation and/or other materials provided with the distribution.
// - Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
#ifndef MVK_VECTOR_ALLOCATOR_H
#define MVK_VECTOR_ALLOCATOR_H
#include <new>
#include <type_traits>
namespace mvk_memory_allocator
{
inline char *alloc( size_t num_bytes )
{
return new char[num_bytes];
}
inline void free( void *ptr )
{
delete[] (char*)ptr;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// mvk_vector_allocator_default -> malloc based allocator for tm_vector
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
class mvk_vector_allocator_default final
{
public:
T *ptr;
size_t num_elements_used;
private:
size_t num_elements_reserved;
public:
template<class S, class... Args> typename std::enable_if< !std::is_trivially_constructible<S>::value >::type
construct( S *_ptr, Args&&... _args )
{
new ( _ptr ) S( std::forward<Args>( _args )... );
}
template<class S, class... Args> typename std::enable_if< std::is_trivially_constructible<S>::value >::type
construct( S *_ptr, Args&&... _args )
{
*_ptr = S( std::forward<Args>( _args )... );
}
template<class S> typename std::enable_if< !std::is_trivially_destructible<S>::value >::type
destruct( S *_ptr )
{
_ptr->~S();
}
template<class S> typename std::enable_if< std::is_trivially_destructible<S>::value >::type
destruct( S *_ptr )
{
}
template<class S> typename std::enable_if< !std::is_trivially_destructible<S>::value >::type
destruct_all()
{
for( size_t i = 0; i < num_elements_used; ++i )
{
ptr[i].~S();
}
num_elements_used = 0;
}
template<class S> typename std::enable_if< std::is_trivially_destructible<S>::value >::type
destruct_all()
{
num_elements_used = 0;
}
public:
constexpr mvk_vector_allocator_default() : ptr{ nullptr }, num_elements_used{ 0 }, num_elements_reserved{ 0 }
{
}
mvk_vector_allocator_default( mvk_vector_allocator_default &&a ) : ptr{ a.ptr }, num_elements_used{ a.num_elements_used }, num_elements_reserved{ a.num_elements_reserved }
{
a.ptr = nullptr;
a.num_elements_used = 0;
a.num_elements_reserved = 0;
}
~mvk_vector_allocator_default()
{
deallocate();
}
size_t get_capacity() const
{
return num_elements_reserved;
}
void swap( mvk_vector_allocator_default &a )
{
const auto copy_ptr = a.ptr;
const auto copy_num_elements_used = a.num_elements_used;
const auto copy_num_elements_reserved = a.num_elements_reserved;
a.ptr = ptr;
a.num_elements_used = num_elements_used;
a.num_elements_reserved = num_elements_reserved;
ptr = copy_ptr;
num_elements_used = copy_num_elements_used;
num_elements_reserved = copy_num_elements_reserved;
}
void allocate( const size_t num_elements_to_reserve )
{
deallocate();
ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) );
num_elements_used = 0;
num_elements_reserved = num_elements_to_reserve;
}
void re_allocate( const size_t num_elements_to_reserve )
{
//if constexpr( std::is_trivially_copyable<T>::value )
//{
// ptr = reinterpret_cast< T* >( mvk_memory_allocator::tm_memrealloc( ptr, num_elements_to_reserve * sizeof( T ) );
//}
//else
{
auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) );
for( size_t i = 0; i < num_elements_used; ++i )
{
construct( &new_ptr[i], std::move( ptr[i] ) );
destruct( &ptr[i] );
}
//if ( ptr != nullptr )
{
mvk_memory_allocator::free( ptr );
}
ptr = new_ptr;
}
num_elements_reserved = num_elements_to_reserve;
}
void shrink_to_fit()
{
if( num_elements_used == 0 )
{
deallocate();
}
else
{
auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_used * sizeof( T ) ) );
for( size_t i = 0; i < num_elements_used; ++i )
{
construct( &new_ptr[i], std::move( ptr[i] ) );
destruct( &ptr[i] );
}
mvk_memory_allocator::free( ptr );
ptr = new_ptr;
num_elements_reserved = num_elements_used;
}
}
void deallocate()
{
destruct_all<T>();
mvk_memory_allocator::free( ptr );
ptr = nullptr;
num_elements_reserved = 0;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// mvk_vector_allocator_with_stack -> malloc based slist allocator with extra stack storage
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, int N>
class mvk_vector_allocator_with_stack
{
public:
T *ptr;
size_t num_elements_used;
private:
//size_t num_elements_reserved; // uhh, num_elements_reserved is mapped onto the stack elements, let the fun begin
alignas( alignof( T ) ) unsigned char elements_stack[N * sizeof( T )];
static_assert( N * sizeof( T ) >= sizeof( size_t ), "Bummer, TH's nasty optimization doesn't work" );
void set_num_elements_reserved( const size_t num_elements_reserved )
{
*reinterpret_cast< size_t* >( &elements_stack[0] ) = num_elements_reserved;
}
//
// faster element construction and destruction using type traits
//
public:
//
// element creation and destruction
//
template<class S, class... Args> typename std::enable_if< !std::is_trivially_constructible<S, Args...>::value >::type
construct( S *_ptr, Args&&... _args )
{
new ( _ptr ) S( std::forward<Args>( _args )... );
}
template<class S, class... Args> typename std::enable_if< std::is_trivially_constructible<S, Args...>::value >::type
construct( S *_ptr, Args&&... _args )
{
*_ptr = S( std::forward<Args>( _args )... );
}
template<class S> typename std::enable_if< !std::is_trivially_destructible<S>::value >::type
destruct( S *_ptr )
{
_ptr->~S();
}
template<class S> typename std::enable_if< std::is_trivially_destructible<S>::value >::type
destruct( S *_ptr )
{
}
template<class S> typename std::enable_if< !std::is_trivially_destructible<S>::value >::type
destruct_all()
{
for( size_t i = 0; i < num_elements_used; ++i )
{
ptr[i].~S();
}
num_elements_used = 0;
}
template<class S> typename std::enable_if< std::is_trivially_destructible<S>::value >::type
destruct_all()
{
num_elements_used = 0;
}
template<class S> typename std::enable_if< !std::is_trivially_destructible<S>::value >::type
swap_stack( mvk_vector_allocator_with_stack &a )
{
T stack_copy[N];
for( size_t i = 0; i < num_elements_used; ++i )
{
construct( &stack_copy[i], std::move( ptr[i] ) );
destruct( &ptr[i] );
}
for( size_t i = 0; i < a.num_elements_used; ++i )
{
construct( &ptr[i], std::move( a.ptr[i] ) );
destruct( &ptr[i] );
}
for( size_t i = 0; i < num_elements_used; ++i )
{
construct( &a.ptr[i], std::move( stack_copy[i] ) );
destruct( &stack_copy[i] );
}
}
template<class S> typename std::enable_if< std::is_trivially_destructible<S>::value >::type
swap_stack( mvk_vector_allocator_with_stack &a )
{
constexpr int STACK_SIZE = N * sizeof( T );
for( int i = 0; i < STACK_SIZE; ++i )
{
const auto v = elements_stack[i];
elements_stack[i] = a.elements_stack[i];
a.elements_stack[i] = v;
}
}
public:
mvk_vector_allocator_with_stack() : ptr{ reinterpret_cast< T* >( &elements_stack[0] ) }, num_elements_used{ 0 }
{
}
mvk_vector_allocator_with_stack( mvk_vector_allocator_with_stack &&a ) : num_elements_used{ a.num_elements_used }
{
// is a heap based -> steal ptr from a
if( !a.get_data_on_stack() )
{
ptr = a.ptr;
set_num_elements_reserved( a.get_capacity() );
a.ptr = a.get_default_ptr();
}
else
{
ptr = get_default_ptr();
for( size_t i = 0; i < a.num_elements_used; ++i )
{
construct( &ptr[i], std::move( a.ptr[i] ) );
destruct( &a.ptr[i] );
}
}
a.num_elements_used = 0;
}
~mvk_vector_allocator_with_stack()
{
deallocate();
}
size_t get_capacity() const
{
return get_data_on_stack() ? N : *reinterpret_cast< const size_t* >( &elements_stack[0] );
}
constexpr T *get_default_ptr() const
{
return reinterpret_cast< T* >( const_cast< unsigned char * >( &elements_stack[0] ) );
}
bool get_data_on_stack() const
{
return ptr == get_default_ptr();
}
void swap( mvk_vector_allocator_with_stack &a )
{
// both allocators on heap -> easy case
if( !get_data_on_stack() && !a.get_data_on_stack() )
{
auto copy_ptr = ptr;
auto copy_num_elements_reserved = get_capacity();
ptr = a.ptr;
set_num_elements_reserved( a.get_capacity() );
a.ptr = copy_ptr;
a.set_num_elements_reserved( copy_num_elements_reserved );
}
// both allocators on stack -> just switch the stack contents
else if( get_data_on_stack() && a.get_data_on_stack() )
{
swap_stack<T>( a );
}
else if( get_data_on_stack() && !a.get_data_on_stack() )
{
auto copy_ptr = a.ptr;
auto copy_num_elements_reserved = a.get_capacity();
a.ptr = a.get_default_ptr();
for( size_t i = 0; i < num_elements_used; ++i )
{
construct( &a.ptr[i], std::move( ptr[i] ) );
destruct( &ptr[i] );
}
ptr = copy_ptr;
set_num_elements_reserved( copy_num_elements_reserved );
}
else if( !get_data_on_stack() && a.get_data_on_stack() )
{
auto copy_ptr = ptr;
auto copy_num_elements_reserved = get_capacity();
ptr = get_default_ptr();
for( size_t i = 0; i < a.num_elements_used; ++i )
{
construct( &ptr[i], std::move( a.ptr[i] ) );
destruct( &a.ptr[i] );
}
a.ptr = copy_ptr;
a.set_num_elements_reserved( copy_num_elements_reserved );
}
auto copy_num_elements_used = num_elements_used;
num_elements_used = a.num_elements_used;
a.num_elements_used = copy_num_elements_used;
}
//
// allocates rounded up to the defined alignment the number of bytes / if the system cannot allocate the specified amount of memory then a null block is returned
//
void allocate( const size_t num_elements_to_reserve )
{
deallocate();
// check if enough memory on stack space is left
if( num_elements_to_reserve <= N )
{
return;
}
ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) );
num_elements_used = 0;
set_num_elements_reserved( num_elements_to_reserve );
}
//template<class S> typename std::enable_if< !std::is_trivially_copyable<S>::value >::type
void _re_allocate( const size_t num_elements_to_reserve )
{
auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) );
for( size_t i = 0; i < num_elements_used; ++i )
{
construct( &new_ptr[i], std::move( ptr[i] ) );
destruct( &ptr[i] );
}
if( ptr != get_default_ptr() )
{
mvk_memory_allocator::free( ptr );
}
ptr = new_ptr;
set_num_elements_reserved( num_elements_to_reserve );
}
//template<class S> typename std::enable_if< std::is_trivially_copyable<S>::value >::type
// _re_allocate( const size_t num_elements_to_reserve )
//{
// const bool data_is_on_stack = get_data_on_stack();
//
// auto *new_ptr = reinterpret_cast< S* >( mvk_memory_allocator::tm_memrealloc( data_is_on_stack ? nullptr : ptr, num_elements_to_reserve * sizeof( S ) ) );
// if( data_is_on_stack )
// {
// for( int i = 0; i < N; ++i )
// {
// new_ptr[i] = ptr[i];
// }
// }
//
// ptr = new_ptr;
// set_num_elements_reserved( num_elements_to_reserve );
//}
void re_allocate( const size_t num_elements_to_reserve )
{
//TM_ASSERT( num_elements_to_reserve > get_capacity() );
if( num_elements_to_reserve > N )
{
_re_allocate( num_elements_to_reserve );
}
}
void shrink_to_fit()
{
// nothing to do if data is on stack already
if( get_data_on_stack() )
return;
// move elements to stack space
if( num_elements_used <= N )
{
const auto num_elements_reserved = get_capacity();
auto *stack_ptr = get_default_ptr();
for( size_t i = 0; i < num_elements_used; ++i )
{
construct( &stack_ptr[i], std::move( ptr[i] ) );
destruct( &ptr[i] );
}
mvk_memory_allocator::free( ptr );
ptr = stack_ptr;
}
else
{
auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( ptr, num_elements_used * sizeof( T ) ) );
for( size_t i = 0; i < num_elements_used; ++i )
{
construct( &new_ptr[i], std::move( ptr[i] ) );
destruct( &ptr[i] );
}
mvk_memory_allocator::free( ptr );
ptr = new_ptr;
set_num_elements_reserved( num_elements_used );
}
}
void deallocate()
{
destruct_all<T>();
if( !get_data_on_stack() )
{
mvk_memory_allocator::free( ptr );
}
ptr = get_default_ptr();
num_elements_used = 0;
}
};
#endif // MVK_VECTOR_ALLOCATOR_H