Merge branch 'main' of https://github.com/KhronosGroup/MoltenVK into visionOS

This commit is contained in:
Filip Lundgren 2023-06-23 10:29:57 -04:00
commit d8b5a7df55
78 changed files with 3755 additions and 2908 deletions

View File

@ -15,9 +15,9 @@ jobs:
build:
strategy:
matrix:
xcode: [ "14.2" ]
platform: [ "macos", "maccat", "ios", "tvos" ]
os: [ "macos-latest" ]
xcode: [ "14.3" ]
platform: [ "all", "macos", "ios" ]
os: [ "macos-13" ]
upload_artifacts: [ true ]
# additional specific configurations
include:
@ -41,7 +41,7 @@ jobs:
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Select Xcode version
run: sudo xcode-select -switch "${XCODE_DEV_PATH}"
@ -54,12 +54,12 @@ jobs:
echo "${XCODE_VERSION}"
XCODE_VERSION="$(echo "${XCODE_VERSION}" | tr '\t\r\n ' '_')"
echo "${XCODE_VERSION}"
echo "::set-output name=XCODE_VERSION::${XCODE_VERSION}"
echo "XCODE_VERSION=${XCODE_VERSION}" >> $GITHUB_OUTPUT
- name: Cache Dependencies
id: cache-dependencies
if: success() && !(github.event_name == 'push' && contains(github.ref, 'refs/tags/')) # never cache dependencies for pushed tags
uses: actions/cache@v2
uses: actions/cache@v3
with:
path: |
External/build
@ -94,11 +94,46 @@ jobs:
- name: Tar Artifacts
if: success() && matrix.upload_artifacts == true
# See: https://github.com/actions/upload-artifact#maintaining-file-permissions-and-case-sensitive-files
run: tar -cvf "${{ matrix.platform }}.tar" Package/Release/
# To reduce artifact size, don't include any stand-alone shader converter binaries.
run: |
rm -rf Package/Release/MoltenVKShaderConverter
tar -C Package -s/Release/MoltenVK/ -cvf "MoltenVK-${{ matrix.platform }}.tar" Release/
- name: Upload Artifacts
if: success() && matrix.upload_artifacts == true
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.platform }}
path: "${{ matrix.platform }}.tar"
name: "MoltenVK-${{ matrix.platform }}"
path: "MoltenVK-${{ matrix.platform }}.tar"
release:
name: 'Release'
needs: [build]
runs-on: ubuntu-latest
if: ${{ startsWith(github.ref, 'refs/tags/') }}
permissions:
contents: write
steps:
- name: Download Artifacts
uses: actions/download-artifact@v3
- name: Create Release
uses: ncipollo/release-action@v1
with:
# Allow updating existing releases if the workflow is triggered by release creation or re-run.
allowUpdates: true
# When the release is updated, delete the existing artifacts for replacement.
removeArtifacts: true
# If a release is being replaced, omit updating the name and body.
# Allows for creating a release and filling these in before the workflow runs.
# Then, the workflow will populate the release with the artifacts.
omitNameDuringUpdate: true
omitBodyDuringUpdate: true
# Upload all MoltenVK CI artifacts as release assets.
artifacts: "MoltenVK*/*"
artifactErrorsFailBuild: true

View File

@ -97,18 +97,22 @@ extern "C" {
# define MVK_MACOS_APPLE_SILICON (MVK_MACOS && MVK_APPLE_SILICON)
#endif
/** Building with Xcode versions. */
/** Building with Xcode versions. iOS version also covers tvOS. */
#ifndef MVK_XCODE_14_3
# define MVK_XCODE_14_3 ((__MAC_OS_X_VERSION_MAX_ALLOWED >= 130300) || \
(__IPHONE_OS_VERSION_MAX_ALLOWED >= 160400))
#endif
#ifndef MVK_XCODE_14
# define MVK_XCODE_14 ((__MAC_OS_X_VERSION_MAX_ALLOWED >= 130000) || \
(__IPHONE_OS_VERSION_MAX_ALLOWED >= 160000)) // Also covers tvOS
(__IPHONE_OS_VERSION_MAX_ALLOWED >= 160000))
#endif
#ifndef MVK_XCODE_13
# define MVK_XCODE_13 ((__MAC_OS_X_VERSION_MAX_ALLOWED >= 120000) || \
(__IPHONE_OS_VERSION_MAX_ALLOWED >= 150000)) // Also covers tvOS
(__IPHONE_OS_VERSION_MAX_ALLOWED >= 150000))
#endif
#ifndef MVK_XCODE_12
# define MVK_XCODE_12 ((__MAC_OS_X_VERSION_MAX_ALLOWED >= 101600) || \
(__IPHONE_OS_VERSION_MAX_ALLOWED >= 140000)) // Also covers tvOS
# define MVK_XCODE_12 ((__MAC_OS_X_VERSION_MAX_ALLOWED >= 110000) || \
(__IPHONE_OS_VERSION_MAX_ALLOWED >= 140000))
#endif
/** Directive to identify public symbols. */

View File

@ -167,3 +167,9 @@ uint64_t mvkGetUsedMemorySize();
/** Returns the size of a page of host memory on this platform. */
uint64_t mvkGetHostMemoryPageSize();
#pragma mark -
#pragma mark Threading
/** Returns the amount of avaliable CPU cores. */
uint32_t mvkGetAvaliableCPUCores();

View File

@ -138,3 +138,10 @@ uint64_t mvkGetUsedMemorySize() {
uint64_t mvkGetHostMemoryPageSize() { return sysconf(_SC_PAGESIZE); }
#pragma mark -
#pragma mark Threading
/** Returns the amount of avaliable CPU cores. */
uint32_t mvkGetAvaliableCPUCores() {
return (uint32_t)[[NSProcessInfo processInfo] activeProcessorCount];
}

View File

@ -21,6 +21,8 @@
#include <string>
#include <streambuf>
#include <vector>
#include <cxxabi.h>
namespace mvk {
@ -59,6 +61,26 @@ namespace mvk {
return varName;
}
/** Returns a string containing the ordinal suffix for a numeric value.*/
inline const char* getOrdinalSuffix(int64_t val) {
static const char* suffixes[] = {"th", "st", "nd", "rd"};
auto ord = val % 100;
if (ord > 10 && ord < 20) { return suffixes[0]; } // All teens end in th.
ord = ord % 10;
if (ord > 3) { return suffixes[0]; } // 4-9 end in th.
return suffixes[ord];
}
/** Returns the name of a C++ type. */
template<typename T>
inline std::string getTypeName(const T* pObj) {
int status;
char* demangledName = abi::__cxa_demangle(typeid(*pObj).name(), 0, 0, &status);
std::string tName = demangledName;
free(demangledName);
return tName;
}
#pragma mark -
#pragma mark Streams

View File

@ -43,13 +43,29 @@
self.view.wantsLayer = YES; // Back the view with a layer created by the makeBackingLayer method.
const char* argv[] = { "cube" };
// Enabling this will sync the rendering loop with the natural display link (60 fps).
// Disabling this will allow the rendering loop to run flat out, limited only by the rendering speed.
bool useDisplayLink = true;
VkPresentModeKHR vkPresentMode = useDisplayLink ? VK_PRESENT_MODE_FIFO_KHR : VK_PRESENT_MODE_IMMEDIATE_KHR;
char vkPresentModeStr[64];
sprintf(vkPresentModeStr, "%d", vkPresentMode);
const char* argv[] = { "cube", "--present_mode", vkPresentModeStr };
int argc = sizeof(argv)/sizeof(char*);
demo_main(&demo, self.view.layer, argc, argv);
CVDisplayLinkCreateWithActiveCGDisplays(&_displayLink);
CVDisplayLinkSetOutputCallback(_displayLink, &DisplayLinkCallback, &demo);
CVDisplayLinkStart(_displayLink);
if (useDisplayLink) {
CVDisplayLinkCreateWithActiveCGDisplays(&_displayLink);
CVDisplayLinkSetOutputCallback(_displayLink, &DisplayLinkCallback, &demo);
CVDisplayLinkStart(_displayLink);
} else {
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
while(true) {
demo_draw(&demo);
}
});
}
}

View File

@ -25,7 +25,7 @@ Table of Contents
- [Install *MoltenVK* replacing the Vulkan SDK `libMoltenVK.dylib`](#install_vksdk)
- [Build and Runtime Requirements](#requirements)
- [Interacting with the **MoltenVK** Runtime](#interaction)
- [MoltenVK `VK_MVK_moltenvk` Extension](#moltenvk_extension)
- [MoltenVK Header Files](#moltenvk_headers)
- [Configuring MoltenVK](#moltenvk_config)
- [*Metal Shading Language* Shaders](#shaders)
- [Troubleshooting Shader Conversion](#spv_vs_msl)
@ -330,6 +330,7 @@ In addition to core *Vulkan* functionality, **MoltenVK** also supports the foll
- `VK_KHR_maintenance1`
- `VK_KHR_maintenance2`
- `VK_KHR_maintenance3`
- `VK_KHR_map_memory2`
- `VK_KHR_multiview`
- `VK_KHR_portability_subset`
- `VK_KHR_push_descriptor`
@ -372,6 +373,7 @@ In addition to core *Vulkan* functionality, **MoltenVK** also supports the foll
- `VK_EXT_scalar_block_layout`
- `VK_EXT_separate_stencil_usage`
- `VK_EXT_shader_atomic_float` *(requires Metal 3.0)*
- `VK_EXT_shader_demote_to_helper_invocation` *(requires Metal Shading Language 2.3)*
- `VK_EXT_shader_stencil_export` *(requires Mac GPU family 2 or iOS GPU family 5)*
- `VK_EXT_shader_viewport_index_layer`
- `VK_EXT_subgroup_size_control` *(requires Metal 2.1 on Mac or Metal 2.2 and Apple family 4 on iOS)*
@ -383,7 +385,6 @@ In addition to core *Vulkan* functionality, **MoltenVK** also supports the foll
- `VK_EXT_texture_compression_astc_hdr` *(iOS and macOS, requires family 6 (A13) or better Apple GPU)*
- `VK_MVK_ios_surface` *(iOS) (Obsolete. Use `VK_EXT_metal_surface` instead.)*
- `VK_MVK_macos_surface` *(macOS) (Obsolete. Use `VK_EXT_metal_surface` instead.)*
- `VK_MVK_moltenvk`
- `VK_AMD_gpu_shader_half_float`
- `VK_AMD_negative_viewport_height`
- `VK_AMD_shader_image_load_store_lod` *(requires Apple GPU)*
@ -424,14 +425,12 @@ extension in the *Vulkan* specification for more information about the use of th
`VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR` flag.
<a name="moltenvk_headers"></a>
### MoltenVK Header Files
<a name="moltenvk_extension"></a>
### MoltenVK `VK_MVK_moltenvk` Extension
The `VK_MVK_moltenvk` *Vulkan* extension provides functionality beyond standard *Vulkan* functionality,
to support configuration options and behaviour that is specific to the **MoltenVK** implementation of *Vulkan*
functionality. You can access this functionality by including the `vk_mvk_moltenvk.h` header file in your code.
The `vk_mvk_moltenvk.h` file also includes the API documentation for this `VK_MVK_moltenvk` extension.
**MoltenVK** provides additional functionality beyond standard *Vulkan* functionality,
to support configuration options and query behaviour that is specific to the **MoltenVK**
implementation of *Vulkan* functionality.
The following API header files are included in the **MoltenVK** package, each of which
can be included in your application source code as follows:
@ -440,37 +439,33 @@ can be included in your application source code as follows:
where `HEADER_FILE` is one of the following:
- `vk_mvk_moltenvk.h` - Contains declarations and documentation for the functions, structures,
and enumerations that define the behaviour of the `VK_MVK_moltenvk` *Vulkan* extension.
- `mvk_vulkan.h` - This is a convenience header file that loads the `vulkan.h` header file
with the appropriate **MoltenVK** *Vulkan* platform surface extension automatically
enabled for *macOS*, *iOS*, or *tvOS*. Use this header file in place of the `vulkan.h`
header file, where access to a **MoltenVK** platform surface extension is required.
The `mvk_vulkan.h` header file automatically enables the `VK_USE_PLATFORM_METAL_EXT`
build setting and `VK_EXT_metal_surface` *Vulkan* extension.
- `mvk_vulkan.h` - This is a convenience header file that loads the `<vulkan/vulkan.h>` header file
with platform settings to enable the appropriate platform-surface and portability extensions.
- `mvk_datatypes.h` - Contains helpful functions for converting between *Vulkan* and *Metal* data types.
You do not need to use this functionality to use **MoltenVK**, as **MoltenVK** converts between
*Vulkan* and *Metal* datatypes automatically (using the functions declared in this header).
These functions are exposed in this header for your own purposes such as interacting with *Metal*
directly, or simply logging data values.
>***Note:*** Except for `vkGetMoltenVKConfigurationMVK()` and `vkSetMoltenVKConfigurationMVK()`,
the functions in `vk_mvk_moltenvk.h` are not supported by the *Vulkan SDK Loader and Layers*
framework. The opaque Vulkan objects used by the functions in `vk_mvk_moltenvk.h` (`VkPhysicalDevice`,
`VkShaderModule`, `VKImage`, ...), must have been retrieved directly from **MoltenVK**, and not through
the *Vulkan SDK Loader and Layers* framework. The *Vulkan SDK Loader and Layers* framework often changes
these opaque objects, and passing them from a higher layer directly to **MoltenVK** will result in
undefined behaviour.
- `mvk_config.h` - Contains public functions and structures to allow you to configure and
optimize **MoltenVK** for your particular application runtime requirements. For more
information, see the [Configuring MoltenVK](#moltenvk_config) section just below.
- `mvk_private_api.h` - Contains functions and structures to allow you to query **MoltenVK**
performance activity, and Metal capabilities on the platform. _**NOTE:**_ THESE
FUNCTIONS ARE NOT SUPPORTED BY THE *Vulkan Loader and Layers*, AND CAN ONLY BE USED
WHEN **MoltenVK** IS LINKED DIRECTLY TO YOUR APPLICATION.
- `mvk_datatypes.h` - Contains helpful functions for converting between *Vulkan* and *Metal*
data types. You do not need to use this functionality to use **MoltenVK**, as **MoltenVK**
converts between *Vulkan* and *Metal* datatypes automatically (using the functions declared
in this header). These functions are exposed in this header as a convienience for your own
purposes such as interacting with *Metal* directly, or simply logging data values.
<a name="moltenvk_config"></a>
### Configuring MoltenVK
The `VK_MVK_moltenvk` *Vulkan* extension provides the ability to configure and optimize
**MoltenVK** for your particular application runtime requirements.
The `mvk_config.h` header file provides the ability to configure and optimize **MoltenVK**
for your particular application runtime requirements. This can be helpful in situtations
where *Metal* behavior is different than *Vulkan* behavior, and the results or performance
you receive can depend on how **MoltenVK** works around those differences, which, in turn, may
depend on how you are using *Vulkan*. Different apps might benefit differently in this handling.
There are three mechanisms for setting the values of the **MoltenVK** configuration parameters:
@ -487,9 +482,9 @@ by a corresponding environment variable, or if the environment variable is not s
by a corresponding build setting at the time **MoltenVK** is compiled. The environment
variable and build setting for each configuration parameter share the same name.
See the description of the `MVKConfiguration` structure parameters and corresponding environment
variables in the `vk_mvk_moltenvk.h` file for more info about configuring and optimizing
**MoltenVK** at runtime or build time.
See the description of the `MVKConfiguration` structure parameters and corresponding
environment variables in the `mvk_config.h` file for more info about configuring and
optimizing **MoltenVK** at runtime or build time.
<a name="shaders"></a>

View File

@ -13,6 +13,62 @@ Copyright (c) 2015-2023 [The Brenwill Workshop Ltd.](http://www.brenwill.com)
MoltenVK 1.2.5
--------------
Released TBD
- Add support for extensions:
- `VK_EXT_shader_demote_to_helper_invocation`
- Ensure non-dispatch compute commands don't interfere with compute encoding state used by dispatch commands.
- Support `VK_PRESENT_MODE_IMMEDIATE_KHR` if `VkPresentTimeGOOGLE::desiredPresentTime` is zero.
- Support maximizing the concurrent executing compilation tasks via `MVKConfiguration::shouldMaximizeConcurrentCompilation`
- Add support for `VK_PRESENT_MODE_IMMEDIATE_KHR` to macOS Cube demo.
- Log more info about SPIR-V to MSL conversion errors.
MoltenVK 1.2.4
--------------
Released 2023/05/23
- Add support for extensions:
- `VK_KHR_map_memory2`
- Deprecate the obsolete and non-standard `VK_MVK_moltenvk` extension.
- Add `mvk_config.h`, `mvk_private_api.h`, and `mvk_deprecated_api.h`, and deprecate `vk_mvk_moltenvk.h`.
- Support BC compression on iOS/tvOS where available (iOS/tvOS 16.4 and above and supported by the GPU).
- Support separate depth and stencil attachments during dynamic rendering.
- Fix memory leak when waiting on timeline semaphores.
- Fix race condition when updating values in `VkPastPresentationTimingGOOGLE`,
and ensure swapchain image presented time is always populated when requested.
- Report error, but do not fail on request for timestamp query pool that is too
large for `MTLCounterSampleBuffer`, and fall back to emulation via CPU timestamps.
- Ensure shaders that use `PhysicalStorageBufferAddresses` encode the use of the associated `MTLBuffer`.
- Disable pipeline cache compression prior to macOS 10.15 and iOS/tvOS 13.0.
- Accumulate render stages when a resource is used by multiple descriptor bindings.
- Respect the bind point supplied to `vkCmdBindDescriptorSets()` / `vkCmdPushDescriptorSets()`.
- Check if shader compiled before adding it to a pipeline, to avoid Metal validation error.
- Identify each unsupported device feature flag that the app attempts to enable.
- Populate `deviceUUID` from `MTLDevice` location and peer group info,
which should be unique, and constant across OS reboots.
- Populate `deviceLUID` from `MTLDevice.registryID`.
- Avoid Metal validation warning when depth component swizzled away.
- Fix depth clamp and texture swizzle feature discovery on simulator builds.
- Advertise `VK_KHR_depth_stencil_resolve` extension on all devices.
- For correctness, set `VkPhysicalDeviceLimits::lineWidthGranularity` to `1`.
- Improve GitHub CI production of binary artifacts on submission and release.
- Update dependency libraries to match _Vulkan SDK 1.3.250_.
- Update to latest SPIRV-Cross:
- MSL: Fix for argument buffer index compare when invalid.
- MSL: Fix dref lod workaround on combined texture/samplers.
- MSL: Do not override variable name with v_ identifier.
- MSL: Use name_id consistently in argument declaration.
- MSL: Don't hit array copy path for pointer to array.
- MSL: Use templated array type when emitting BDA to arrays.
MoltenVK 1.2.3
--------------

View File

@ -1 +1 @@
3550a54ae01b295c40ce972d951b420b388b9401
55750be7886a96008b964e75e1eb4a5a6c369a2a

View File

@ -1 +1 @@
65ad768d8603671fc1085fe115019e72a595ced8
9e61870ecbd32514113b467e0a0c46f60ed222c7

View File

@ -1 +1 @@
f196c8d3cafcaf7e628b7b76a799c940999ee984
695887a994ef9cc00a7aa3f9c00b31a56ea79534

View File

@ -1 +1 @@
14e5a04e70057972eef8a40df422e30a3b70e4b5
d1517d64cfca91f573af1bf7341dc3a5113349c0

View File

@ -105,7 +105,7 @@
2FEA0AA324902F9F00EEF3AD /* MVKRenderPass.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7941C7DFB4800632CA3 /* MVKRenderPass.mm */; };
2FEA0AA424902F9F00EEF3AD /* MVKCmdTransfer.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB76D1C7DFB4800632CA3 /* MVKCmdTransfer.mm */; };
2FEA0AA524902F9F00EEF3AD /* MVKCmdQueries.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7711C7DFB4800632CA3 /* MVKCmdQueries.mm */; };
2FEA0AA624902F9F00EEF3AD /* vk_mvk_moltenvk.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7AC1C7DFB4800632CA3 /* vk_mvk_moltenvk.mm */; };
2FEA0AA624902F9F00EEF3AD /* mvk_api.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7AC1C7DFB4800632CA3 /* mvk_api.mm */; };
2FEA0AA724902F9F00EEF3AD /* MVKSwapchain.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB79C1C7DFB4800632CA3 /* MVKSwapchain.mm */; };
2FEA0AA824902F9F00EEF3AD /* MVKCommandEncoderState.mm in Sources */ = {isa = PBXBuildFile; fileRef = A95B7D681D3EE486003183D3 /* MVKCommandEncoderState.mm */; };
2FEA0AA924902F9F00EEF3AD /* MVKGPUCapture.mm in Sources */ = {isa = PBXBuildFile; fileRef = A93E83342121F0C8001FEBD4 /* MVKGPUCapture.mm */; };
@ -261,8 +261,8 @@
A94FB81F1C7DFB4800632CA3 /* MVKLayers.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7A11C7DFB4800632CA3 /* MVKLayers.mm */; };
A94FB82A1C7DFB4800632CA3 /* mvk_datatypes.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7A91C7DFB4800632CA3 /* mvk_datatypes.mm */; };
A94FB82B1C7DFB4800632CA3 /* mvk_datatypes.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7A91C7DFB4800632CA3 /* mvk_datatypes.mm */; };
A94FB8301C7DFB4800632CA3 /* vk_mvk_moltenvk.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7AC1C7DFB4800632CA3 /* vk_mvk_moltenvk.mm */; };
A94FB8311C7DFB4800632CA3 /* vk_mvk_moltenvk.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7AC1C7DFB4800632CA3 /* vk_mvk_moltenvk.mm */; };
A94FB8301C7DFB4800632CA3 /* mvk_api.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7AC1C7DFB4800632CA3 /* mvk_api.mm */; };
A94FB8311C7DFB4800632CA3 /* mvk_api.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7AC1C7DFB4800632CA3 /* mvk_api.mm */; };
A94FB8321C7DFB4800632CA3 /* vulkan.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7AD1C7DFB4800632CA3 /* vulkan.mm */; };
A94FB8331C7DFB4800632CA3 /* vulkan.mm in Sources */ = {isa = PBXBuildFile; fileRef = A94FB7AD1C7DFB4800632CA3 /* vulkan.mm */; };
A95870F81C90D29F009EB096 /* MVKCommandResourceFactory.h in Headers */ = {isa = PBXBuildFile; fileRef = A95870F61C90D29F009EB096 /* MVKCommandResourceFactory.h */; };
@ -318,6 +318,15 @@
A9A5E9C725C0822700E9085E /* MVKEnvironment.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A9A5E9C525C0822700E9085E /* MVKEnvironment.cpp */; };
A9A5E9C825C0822700E9085E /* MVKEnvironment.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A9A5E9C525C0822700E9085E /* MVKEnvironment.cpp */; };
A9A5E9C925C0822700E9085E /* MVKEnvironment.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A9A5E9C525C0822700E9085E /* MVKEnvironment.cpp */; };
A9B3D73B29F9B3B100745CD4 /* mvk_deprecated_api.h in Headers */ = {isa = PBXBuildFile; fileRef = A9B3D73829F9B3B100745CD4 /* mvk_deprecated_api.h */; };
A9B3D73C29F9B3B100745CD4 /* mvk_deprecated_api.h in Headers */ = {isa = PBXBuildFile; fileRef = A9B3D73829F9B3B100745CD4 /* mvk_deprecated_api.h */; };
A9B3D73D29F9B3B100745CD4 /* mvk_deprecated_api.h in Headers */ = {isa = PBXBuildFile; fileRef = A9B3D73829F9B3B100745CD4 /* mvk_deprecated_api.h */; };
A9B3D73E29F9B3B100745CD4 /* mvk_config.h in Headers */ = {isa = PBXBuildFile; fileRef = A9B3D73A29F9B3B100745CD4 /* mvk_config.h */; };
A9B3D73F29F9B3B100745CD4 /* mvk_config.h in Headers */ = {isa = PBXBuildFile; fileRef = A9B3D73A29F9B3B100745CD4 /* mvk_config.h */; };
A9B3D74029F9B3B100745CD4 /* mvk_config.h in Headers */ = {isa = PBXBuildFile; fileRef = A9B3D73A29F9B3B100745CD4 /* mvk_config.h */; };
A9B3D74229F9BDEE00745CD4 /* mvk_private_api.h in Headers */ = {isa = PBXBuildFile; fileRef = A9B3D74129F9BDEE00745CD4 /* mvk_private_api.h */; };
A9B3D74329F9BDEE00745CD4 /* mvk_private_api.h in Headers */ = {isa = PBXBuildFile; fileRef = A9B3D74129F9BDEE00745CD4 /* mvk_private_api.h */; };
A9B3D74429F9BDEE00745CD4 /* mvk_private_api.h in Headers */ = {isa = PBXBuildFile; fileRef = A9B3D74129F9BDEE00745CD4 /* mvk_private_api.h */; };
A9B51BD7225E986A00AC74D2 /* MVKOSExtensions.mm in Sources */ = {isa = PBXBuildFile; fileRef = A9B51BD2225E986A00AC74D2 /* MVKOSExtensions.mm */; };
A9B51BD8225E986A00AC74D2 /* MVKOSExtensions.mm in Sources */ = {isa = PBXBuildFile; fileRef = A9B51BD2225E986A00AC74D2 /* MVKOSExtensions.mm */; };
A9B51BD9225E986A00AC74D2 /* MVKOSExtensions.h in Headers */ = {isa = PBXBuildFile; fileRef = A9B51BD6225E986A00AC74D2 /* MVKOSExtensions.h */; };
@ -627,7 +636,7 @@
A94FB7A01C7DFB4800632CA3 /* MVKLayers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MVKLayers.h; sourceTree = "<group>"; };
A94FB7A11C7DFB4800632CA3 /* MVKLayers.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MVKLayers.mm; sourceTree = "<group>"; };
A94FB7A91C7DFB4800632CA3 /* mvk_datatypes.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = mvk_datatypes.mm; sourceTree = "<group>"; };
A94FB7AC1C7DFB4800632CA3 /* vk_mvk_moltenvk.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = vk_mvk_moltenvk.mm; sourceTree = "<group>"; };
A94FB7AC1C7DFB4800632CA3 /* mvk_api.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = mvk_api.mm; sourceTree = "<group>"; };
A94FB7AD1C7DFB4800632CA3 /* vulkan.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = vulkan.mm; sourceTree = "<group>"; };
A95870F61C90D29F009EB096 /* MVKCommandResourceFactory.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MVKCommandResourceFactory.h; sourceTree = "<group>"; };
A95870F71C90D29F009EB096 /* MVKCommandResourceFactory.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MVKCommandResourceFactory.mm; sourceTree = "<group>"; };
@ -655,6 +664,9 @@
A99C91012295FAC500A061DA /* MVKVulkanAPIObject.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MVKVulkanAPIObject.h; sourceTree = "<group>"; };
A9A5E9C525C0822700E9085E /* MVKEnvironment.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MVKEnvironment.cpp; sourceTree = "<group>"; };
A9AD67C72054DD6C00ED3C08 /* vulkan */ = {isa = PBXFileReference; lastKnownFileType = folder; path = vulkan; sourceTree = "<group>"; };
A9B3D73829F9B3B100745CD4 /* mvk_deprecated_api.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mvk_deprecated_api.h; sourceTree = "<group>"; };
A9B3D73A29F9B3B100745CD4 /* mvk_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mvk_config.h; sourceTree = "<group>"; };
A9B3D74129F9BDEE00745CD4 /* mvk_private_api.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mvk_private_api.h; sourceTree = "<group>"; };
A9B51BD2225E986A00AC74D2 /* MVKOSExtensions.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MVKOSExtensions.mm; sourceTree = "<group>"; };
A9B51BD6225E986A00AC74D2 /* MVKOSExtensions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MVKOSExtensions.h; sourceTree = "<group>"; };
A9B8EE0A1A98D796009C5A02 /* libMoltenVK.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libMoltenVK.a; sourceTree = BUILT_PRODUCTS_DIR; };
@ -704,7 +716,10 @@
A94FB7651C7DFB4800632CA3 /* API */ = {
isa = PBXGroup;
children = (
A9B3D73A29F9B3B100745CD4 /* mvk_config.h */,
A94FB7671C7DFB4800632CA3 /* mvk_datatypes.h */,
A9B3D73829F9B3B100745CD4 /* mvk_deprecated_api.h */,
A9B3D74129F9BDEE00745CD4 /* mvk_private_api.h */,
A948BB7E1E51642700DE59F2 /* mvk_vulkan.h */,
A94FB7691C7DFB4800632CA3 /* vk_mvk_moltenvk.h */,
);
@ -811,7 +826,7 @@
children = (
A9CEAAD1227378D400FAF779 /* mvk_datatypes.hpp */,
A94FB7A91C7DFB4800632CA3 /* mvk_datatypes.mm */,
A94FB7AC1C7DFB4800632CA3 /* vk_mvk_moltenvk.mm */,
A94FB7AC1C7DFB4800632CA3 /* mvk_api.mm */,
A94FB7AD1C7DFB4800632CA3 /* vulkan.mm */,
);
path = Vulkan;
@ -956,9 +971,11 @@
2FEA0A5624902F9F00EEF3AD /* MVKWatermarkShaderSource.h in Headers */,
2FEA0A5724902F9F00EEF3AD /* MTLSamplerDescriptor+MoltenVK.h in Headers */,
2FEA0A5824902F9F00EEF3AD /* MVKSync.h in Headers */,
A9B3D73F29F9B3B100745CD4 /* mvk_config.h in Headers */,
2FEA0A5924902F9F00EEF3AD /* MVKDevice.h in Headers */,
2FEA0A5A24902F9F00EEF3AD /* MVKSmallVector.h in Headers */,
2FEA0A5C24902F9F00EEF3AD /* MVKCommandPool.h in Headers */,
A9B3D74329F9BDEE00745CD4 /* mvk_private_api.h in Headers */,
2FEA0A5D24902F9F00EEF3AD /* MVKShaderModule.h in Headers */,
2FEA0A5E24902F9F00EEF3AD /* MVKVulkanAPIObject.h in Headers */,
2FEA0A5F24902F9F00EEF3AD /* MVKCmdQueries.h in Headers */,
@ -981,6 +998,7 @@
2FEA0A7024902F9F00EEF3AD /* MVKCmdTransfer.h in Headers */,
2FEA0A7124902F9F00EEF3AD /* MVKDescriptor.h in Headers */,
2FEA0A7224902F9F00EEF3AD /* MVKCmdDraw.h in Headers */,
A9B3D73C29F9B3B100745CD4 /* mvk_deprecated_api.h in Headers */,
2FEA0A7324902F9F00EEF3AD /* MVKCommandBuffer.h in Headers */,
2FEA0A7424902F9F00EEF3AD /* MTLRenderPassDescriptor+MoltenVK.h in Headers */,
2FEA0A7524902F9F00EEF3AD /* MVKCmdDebug.h in Headers */,
@ -1007,6 +1025,7 @@
A909F65F213B190700FCD6BE /* MVKExtensions.h in Headers */,
A94FB7B41C7DFB4800632CA3 /* vk_mvk_moltenvk.h in Headers */,
A987B669289AFB8A00F933C8 /* MVKDeviceFeatureStructs.def in Headers */,
A9B3D73E29F9B3B100745CD4 /* mvk_config.h in Headers */,
A94FB7B01C7DFB4800632CA3 /* mvk_datatypes.h in Headers */,
A948BB7F1E51642700DE59F2 /* mvk_vulkan.h in Headers */,
A98149511FB6A3F7005F00B4 /* MVKEnvironment.h in Headers */,
@ -1036,10 +1055,12 @@
A94FB80C1C7DFB4800632CA3 /* MVKShaderModule.h in Headers */,
A99C91042295FAC600A061DA /* MVKVulkanAPIObject.h in Headers */,
A94FB7C01C7DFB4800632CA3 /* MVKCmdQueries.h in Headers */,
A9B3D73B29F9B3B100745CD4 /* mvk_deprecated_api.h in Headers */,
A94FB7CC1C7DFB4800632CA3 /* MVKCommand.h in Headers */,
A981494F1FB6A3F7005F00B4 /* MVKBaseObject.h in Headers */,
A9C96DD01DDC20C20053187F /* MVKMTLBufferAllocation.h in Headers */,
A98149571FB6A3F7005F00B4 /* MVKObjectPool.h in Headers */,
A9B3D74229F9BDEE00745CD4 /* mvk_private_api.h in Headers */,
A94FB8141C7DFB4800632CA3 /* MVKSwapchain.h in Headers */,
A93E832F2121C5D4001FEBD4 /* MVKGPUCapture.h in Headers */,
A94FB7DC1C7DFB4800632CA3 /* MVKBuffer.h in Headers */,
@ -1081,6 +1102,7 @@
A909F660213B190700FCD6BE /* MVKExtensions.h in Headers */,
A94FB7B51C7DFB4800632CA3 /* vk_mvk_moltenvk.h in Headers */,
A987B66B289AFB8C00F933C8 /* MVKDeviceFeatureStructs.def in Headers */,
A9B3D74029F9B3B100745CD4 /* mvk_config.h in Headers */,
A94FB7B11C7DFB4800632CA3 /* mvk_datatypes.h in Headers */,
A948BB801E51642700DE59F2 /* mvk_vulkan.h in Headers */,
A98149521FB6A3F7005F00B4 /* MVKEnvironment.h in Headers */,
@ -1110,10 +1132,12 @@
A99C91052295FAC600A061DA /* MVKVulkanAPIObject.h in Headers */,
A94FB7C11C7DFB4800632CA3 /* MVKCmdQueries.h in Headers */,
A94FB7CD1C7DFB4800632CA3 /* MVKCommand.h in Headers */,
A9B3D73D29F9B3B100745CD4 /* mvk_deprecated_api.h in Headers */,
A98149501FB6A3F7005F00B4 /* MVKBaseObject.h in Headers */,
A9C96DD11DDC20C20053187F /* MVKMTLBufferAllocation.h in Headers */,
A98149581FB6A3F7005F00B4 /* MVKObjectPool.h in Headers */,
A94FB8151C7DFB4800632CA3 /* MVKSwapchain.h in Headers */,
A9B3D74429F9BDEE00745CD4 /* mvk_private_api.h in Headers */,
A93E83302121C5D4001FEBD4 /* MVKGPUCapture.h in Headers */,
A94FB7DD1C7DFB4800632CA3 /* MVKBuffer.h in Headers */,
A9F042A51FB4CF83009FCCB8 /* MVKCommonEnvironment.h in Headers */,
@ -1657,7 +1681,7 @@
2FEA0AA324902F9F00EEF3AD /* MVKRenderPass.mm in Sources */,
2FEA0AA424902F9F00EEF3AD /* MVKCmdTransfer.mm in Sources */,
2FEA0AA524902F9F00EEF3AD /* MVKCmdQueries.mm in Sources */,
2FEA0AA624902F9F00EEF3AD /* vk_mvk_moltenvk.mm in Sources */,
2FEA0AA624902F9F00EEF3AD /* mvk_api.mm in Sources */,
2FEA0AA724902F9F00EEF3AD /* MVKSwapchain.mm in Sources */,
2FEA0AA824902F9F00EEF3AD /* MVKCommandEncoderState.mm in Sources */,
2FEA0AA924902F9F00EEF3AD /* MVKGPUCapture.mm in Sources */,
@ -1716,7 +1740,7 @@
A94FB8061C7DFB4800632CA3 /* MVKRenderPass.mm in Sources */,
A94FB7BA1C7DFB4800632CA3 /* MVKCmdTransfer.mm in Sources */,
A94FB7C21C7DFB4800632CA3 /* MVKCmdQueries.mm in Sources */,
A94FB8301C7DFB4800632CA3 /* vk_mvk_moltenvk.mm in Sources */,
A94FB8301C7DFB4800632CA3 /* mvk_api.mm in Sources */,
A94FB8161C7DFB4800632CA3 /* MVKSwapchain.mm in Sources */,
A95B7D6B1D3EE486003183D3 /* MVKCommandEncoderState.mm in Sources */,
A93E83352121F0C8001FEBD4 /* MVKGPUCapture.mm in Sources */,
@ -1776,7 +1800,7 @@
A94FB8071C7DFB4800632CA3 /* MVKRenderPass.mm in Sources */,
A94FB7BB1C7DFB4800632CA3 /* MVKCmdTransfer.mm in Sources */,
A94FB7C31C7DFB4800632CA3 /* MVKCmdQueries.mm in Sources */,
A94FB8311C7DFB4800632CA3 /* vk_mvk_moltenvk.mm in Sources */,
A94FB8311C7DFB4800632CA3 /* mvk_api.mm in Sources */,
A94FB8171C7DFB4800632CA3 /* MVKSwapchain.mm in Sources */,
A95B7D6C1D3EE486003183D3 /* MVKCommandEncoderState.mm in Sources */,
A93E83362121F0C8001FEBD4 /* MVKGPUCapture.mm in Sources */,

File diff suppressed because it is too large Load Diff

View File

@ -16,14 +16,6 @@
* limitations under the License.
*/
/*
* This file contains functions for converting between Vulkan and Metal data types.
*
* The functions here are used internally by MoltenVK, and are exposed here
* as a convenience for use elsewhere within applications using MoltenVK.
*/
#ifndef __mvkDataTypes_h_
#define __mvkDataTypes_h_ 1
@ -37,6 +29,14 @@ extern "C" {
#import <CoreGraphics/CoreGraphics.h>
/*
* This file contains functions for converting between Vulkan and Metal data types.
*
* The functions here are used internally by MoltenVK, and are exposed here
* as a convenience for use elsewhere within applications using MoltenVK.
*/
#pragma mark -
#pragma mark Image properties

View File

@ -0,0 +1,233 @@
/*
* mvk_deprecated_api.h
*
* Copyright (c) 2015-2023 The Brenwill Workshop Ltd. (http://www.brenwill.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __mvk_deprecated_api_h_
#define __mvk_deprecated_api_h_ 1
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
#include <MoltenVK/mvk_config.h>
#include <IOSurface/IOSurfaceRef.h>
#define VK_MVK_MOLTENVK_SPEC_VERSION 37
#define VK_MVK_MOLTENVK_EXTENSION_NAME "VK_MVK_moltenvk"
/**
* This header contains obsolete and deprecated MoltenVK functions, that were originally
* part of the obsolete and deprecated non-standard VK_MVK_moltenvk extension.
*
* NOTE: USE OF THE FUNCTIONS BELOW IS NOT RECOMMENDED. THE VK_MVK_moltenvk EXTENSION,
* AND THE FUNCTIONS BELOW ARE NOT SUPPORTED BY THE VULKAN LOADER AND LAYERS.
* THE VULKAN OBJECTS PASSED IN THESE FUNCTIONS MUST HAVE BEEN RETRIEVED DIRECTLY
* FROM MOLTENVK, WITHOUT LINKING THROUGH THE VULKAN LOADER AND LAYERS.
*
* To interact with the Metal objects underlying Vulkan objects in MoltenVK,
* use the standard Vulkan VK_EXT_metal_objects extension.
* The VK_EXT_metal_objects extension is supported by the Vulkan Loader and Layers.
*/
#pragma mark -
#pragma mark Function types
typedef void (VKAPI_PTR *PFN_vkGetVersionStringsMVK)(char* pMoltenVersionStringBuffer, uint32_t moltenVersionStringBufferLength, char* pVulkanVersionStringBuffer, uint32_t vulkanVersionStringBufferLength);
typedef void (VKAPI_PTR *PFN_vkSetWorkgroupSizeMVK)(VkShaderModule shaderModule, uint32_t x, uint32_t y, uint32_t z);
typedef VkResult (VKAPI_PTR *PFN_vkUseIOSurfaceMVK)(VkImage image, IOSurfaceRef ioSurface);
typedef void (VKAPI_PTR *PFN_vkGetIOSurfaceMVK)(VkImage image, IOSurfaceRef* pIOSurface);
#ifdef __OBJC__
typedef void (VKAPI_PTR *PFN_vkGetMTLDeviceMVK)(VkPhysicalDevice physicalDevice, id<MTLDevice>* pMTLDevice);
typedef VkResult (VKAPI_PTR *PFN_vkSetMTLTextureMVK)(VkImage image, id<MTLTexture> mtlTexture);
typedef void (VKAPI_PTR *PFN_vkGetMTLTextureMVK)(VkImage image, id<MTLTexture>* pMTLTexture);
typedef void (VKAPI_PTR *PFN_vkGetMTLBufferMVK)(VkBuffer buffer, id<MTLBuffer>* pMTLBuffer);
typedef void (VKAPI_PTR *PFN_vkGetMTLCommandQueueMVK)(VkQueue queue, id<MTLCommandQueue>* pMTLCommandQueue);
#endif // __OBJC__
#pragma mark -
#pragma mark Function prototypes
#ifndef VK_NO_PROTOTYPES
#define MVK_DEPRECATED VKAPI_ATTR [[deprecated]]
#define MVK_DEPRECATED_USE_MTL_OBJS VKAPI_ATTR [[deprecated("Use the VK_EXT_metal_objects extension instead.")]]
/**
* DEPRECATED.
* Returns a human readable version of the MoltenVK and Vulkan versions.
*
* This function is provided as a convenience for reporting. Use the MVK_VERSION,
* VK_API_VERSION_1_0, and VK_HEADER_VERSION macros for programmatically accessing
* the corresponding version numbers.
*/
MVK_DEPRECATED
void VKAPI_CALL vkGetVersionStringsMVK(
char* pMoltenVersionStringBuffer,
uint32_t moltenVersionStringBufferLength,
char* pVulkanVersionStringBuffer,
uint32_t vulkanVersionStringBufferLength);
/**
* DEPRECATED.
* Sets the number of threads in a workgroup for a compute kernel.
*
* This needs to be called if you are creating compute shader modules from MSL source code
* or MSL compiled code. If you are using SPIR-V, workgroup size is determined automatically.
*
* This function is not supported by the Vulkan SDK Loader and Layers framework
* and is unavailable when using the Vulkan SDK Loader and Layers framework.
*/
MVK_DEPRECATED
void VKAPI_CALL vkSetWorkgroupSizeMVK(
VkShaderModule shaderModule,
uint32_t x,
uint32_t y,
uint32_t z);
#ifdef __OBJC__
/**
* DEPRECATED. Use the VK_EXT_metal_objects extension instead.
* Returns, in the pMTLDevice pointer, the MTLDevice used by the VkPhysicalDevice.
*
* This function is not supported by the Vulkan SDK Loader and Layers framework
* and is unavailable when using the Vulkan SDK Loader and Layers framework.
*/
MVK_DEPRECATED_USE_MTL_OBJS
void VKAPI_CALL vkGetMTLDeviceMVK(
VkPhysicalDevice physicalDevice,
id<MTLDevice>* pMTLDevice);
/**
* DEPRECATED. Use the VK_EXT_metal_objects extension instead.
* Sets the VkImage to use the specified MTLTexture.
*
* Any differences in the properties of mtlTexture and this image will modify the
* properties of this image.
*
* If a MTLTexture has already been created for this image, it will be destroyed.
*
* Returns VK_SUCCESS.
*
* This function is not supported by the Vulkan SDK Loader and Layers framework
* and is unavailable when using the Vulkan SDK Loader and Layers framework.
*/
MVK_DEPRECATED_USE_MTL_OBJS
VkResult VKAPI_CALL vkSetMTLTextureMVK(
VkImage image,
id<MTLTexture> mtlTexture);
/**
* DEPRECATED. Use the VK_EXT_metal_objects extension instead.
* Returns, in the pMTLTexture pointer, the MTLTexture currently underlaying the VkImage.
*
* This function is not supported by the Vulkan SDK Loader and Layers framework
* and is unavailable when using the Vulkan SDK Loader and Layers framework.
*/
MVK_DEPRECATED_USE_MTL_OBJS
void VKAPI_CALL vkGetMTLTextureMVK(
VkImage image,
id<MTLTexture>* pMTLTexture);
/**
* DEPRECATED. Use the VK_EXT_metal_objects extension instead.
* Returns, in the pMTLBuffer pointer, the MTLBuffer currently underlaying the VkBuffer.
*
* This function is not supported by the Vulkan SDK Loader and Layers framework
* and is unavailable when using the Vulkan SDK Loader and Layers framework.
*/
MVK_DEPRECATED_USE_MTL_OBJS
void VKAPI_CALL vkGetMTLBufferMVK(
VkBuffer buffer,
id<MTLBuffer>* pMTLBuffer);
/**
* DEPRECATED. Use the VK_EXT_metal_objects extension instead.
* Returns, in the pMTLCommandQueue pointer, the MTLCommandQueue currently underlaying the VkQueue.
*
* This function is not supported by the Vulkan SDK Loader and Layers framework
* and is unavailable when using the Vulkan SDK Loader and Layers framework.
*/
MVK_DEPRECATED_USE_MTL_OBJS
void VKAPI_CALL vkGetMTLCommandQueueMVK(
VkQueue queue,
id<MTLCommandQueue>* pMTLCommandQueue);
#endif // __OBJC__
/**
* DEPRECATED. Use the VK_EXT_metal_objects extension instead.
* Indicates that a VkImage should use an IOSurface to underlay the Metal texture.
*
* If ioSurface is not null, it will be used as the IOSurface, and any differences
* in the properties of that IOSurface will modify the properties of this image.
*
* If ioSurface is null, this image will create and use an IOSurface
* whose properties are compatible with the properties of this image.
*
* If a MTLTexture has already been created for this image, it will be destroyed.
*
* IOSurfaces are supported on the following platforms:
* - macOS 10.11 and above
* - iOS 11.0 and above
*
* To enable IOSurface support, ensure the Deployment Target build setting
* (MACOSX_DEPLOYMENT_TARGET or IPHONEOS_DEPLOYMENT_TARGET) is set to at least
* one of the values above when compiling MoltenVK, and any app that uses MoltenVK.
*
* Returns:
* - VK_SUCCESS.
* - VK_ERROR_FEATURE_NOT_PRESENT if IOSurfaces are not supported on the platform.
* - VK_ERROR_INITIALIZATION_FAILED if ioSurface is specified and is not compatible with this VkImage.
*
* This function is not supported by the Vulkan SDK Loader and Layers framework
* and is unavailable when using the Vulkan SDK Loader and Layers framework.
*/
MVK_DEPRECATED_USE_MTL_OBJS
VkResult VKAPI_CALL vkUseIOSurfaceMVK(
VkImage image,
IOSurfaceRef ioSurface);
/**
* DEPRECATED. Use the VK_EXT_metal_objects extension instead.
* Returns, in the pIOSurface pointer, the IOSurface currently underlaying the VkImage,
* as set by the useIOSurfaceMVK() function, or returns null if the VkImage is not using
* an IOSurface, or if the platform does not support IOSurfaces.
*
* This function is not supported by the Vulkan SDK Loader and Layers framework
* and is unavailable when using the Vulkan SDK Loader and Layers framework.
*/
MVK_DEPRECATED_USE_MTL_OBJS
void VKAPI_CALL vkGetIOSurfaceMVK(
VkImage image,
IOSurfaceRef* pIOSurface);
#endif // VK_NO_PROTOTYPES
#ifdef __cplusplus
}
#endif // __cplusplus
#endif

View File

@ -0,0 +1,296 @@
/*
* mvk_private_api.h
*
* Copyright (c) 2015-2023 The Brenwill Workshop Ltd. (http://www.brenwill.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __mvk_private_api_h_
#define __mvk_private_api_h_ 1
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
#include <vulkan/vulkan.h>
#ifdef __OBJC__
#import <Metal/Metal.h>
#else
typedef unsigned long MTLLanguageVersion;
typedef unsigned long MTLArgumentBuffersTier;
#endif
/**
* This header contains functions to query MoltenVK about
* available Metal features, and runtime performance information.
*
* NOTE: THE FUNCTIONS BELOW SHOULD BE USED WITH CARE. THESE FUNCTIONS ARE
* NOT PART OF VULKAN, AND ARE NOT SUPPORTED BY THE VULKAN LOADER AND LAYERS.
* THE VULKAN OBJECTS PASSED IN THESE FUNCTIONS MUST HAVE BEEN RETRIEVED
* DIRECTLY FROM MOLTENVK, WITHOUT LINKING THROUGH THE VULKAN LOADER AND LAYERS.
*/
#define MVK_PRIVATE_API_VERSION 37
/** Identifies the type of rounding Metal uses for float to integer conversions in particular calculatons. */
typedef enum MVKFloatRounding {
MVK_FLOAT_ROUNDING_NEAREST = 0, /**< Metal rounds to nearest. */
MVK_FLOAT_ROUNDING_UP = 1, /**< Metal rounds towards positive infinity. */
MVK_FLOAT_ROUNDING_DOWN = 2, /**< Metal rounds towards negative infinity. */
MVK_FLOAT_ROUNDING_UP_MAX_ENUM = 0x7FFFFFFF
} MVKFloatRounding;
/** Identifies the pipeline points where GPU counter sampling can occur. Maps to MTLCounterSamplingPoint. */
typedef enum MVKCounterSamplingBits {
MVK_COUNTER_SAMPLING_AT_DRAW = 0x00000001,
MVK_COUNTER_SAMPLING_AT_DISPATCH = 0x00000002,
MVK_COUNTER_SAMPLING_AT_BLIT = 0x00000004,
MVK_COUNTER_SAMPLING_AT_PIPELINE_STAGE = 0x00000008,
MVK_COUNTER_SAMPLING_MAX_ENUM = 0X7FFFFFFF
} MVKCounterSamplingBits;
typedef VkFlags MVKCounterSamplingFlags;
/**
* Features provided by the current implementation of Metal on the current device. You can
* retrieve a copy of this structure using the vkGetPhysicalDeviceMetalFeaturesMVK() function.
*
* This structure may be extended as new features are added to MoltenVK. If you are linking to
* an implementation of MoltenVK that was compiled from a different MVK_PRIVATE_API_VERSION
* than your app was, the size of this structure in your app may be larger or smaller than the
* struct in MoltenVK. See the description of the vkGetPhysicalDeviceMetalFeaturesMVK() function
* for information about how to handle this.
*
* TO SUPPORT DYNAMIC LINKING TO THIS STRUCTURE AS DESCRIBED ABOVE, THIS STRUCTURE SHOULD NOT
* BE CHANGED EXCEPT TO ADD ADDITIONAL MEMBERS ON THE END. EXISTING MEMBERS, AND THEIR ORDER,
* SHOULD NOT BE CHANGED.
*/
typedef struct {
uint32_t mslVersion; /**< The version of the Metal Shading Language available on this device. The format of the integer is MMmmpp, with two decimal digts each for Major, minor, and patch version values (eg. MSL 1.2 would appear as 010200). */
VkBool32 indirectDrawing; /**< If true, draw calls support parameters held in a GPU buffer. */
VkBool32 baseVertexInstanceDrawing; /**< If true, draw calls support specifiying the base vertex and instance. */
uint32_t dynamicMTLBufferSize; /**< If greater than zero, dynamic MTLBuffers for setting vertex, fragment, and compute bytes are supported, and their content must be below this value. */
VkBool32 shaderSpecialization; /**< If true, shader specialization (aka Metal function constants) is supported. */
VkBool32 ioSurfaces; /**< If true, VkImages can be underlaid by IOSurfaces via the vkUseIOSurfaceMVK() function, to support inter-process image transfers. */
VkBool32 texelBuffers; /**< If true, texel buffers are supported, allowing the contents of a buffer to be interpreted as an image via a VkBufferView. */
VkBool32 layeredRendering; /**< If true, layered rendering to multiple cube or texture array layers is supported. */
VkBool32 presentModeImmediate; /**< If true, immediate surface present mode (VK_PRESENT_MODE_IMMEDIATE_KHR), allowing a swapchain image to be presented immediately, without waiting for the vertical sync period of the display, is supported. */
VkBool32 stencilViews; /**< If true, stencil aspect views are supported through the MTLPixelFormatX24_Stencil8 and MTLPixelFormatX32_Stencil8 formats. */
VkBool32 multisampleArrayTextures; /**< If true, MTLTextureType2DMultisampleArray is supported. */
VkBool32 samplerClampToBorder; /**< If true, the border color set when creating a sampler will be respected. */
uint32_t maxTextureDimension; /**< The maximum size of each texture dimension (width, height, or depth). */
uint32_t maxPerStageBufferCount; /**< The total number of per-stage Metal buffers available for shader uniform content and attributes. */
uint32_t maxPerStageTextureCount; /**< The total number of per-stage Metal textures available for shader uniform content. */
uint32_t maxPerStageSamplerCount; /**< The total number of per-stage Metal samplers available for shader uniform content. */
VkDeviceSize maxMTLBufferSize; /**< The max size of a MTLBuffer (in bytes). */
VkDeviceSize mtlBufferAlignment; /**< The alignment used when allocating memory for MTLBuffers. Must be PoT. */
VkDeviceSize maxQueryBufferSize; /**< The maximum size of an occlusion query buffer (in bytes). */
VkDeviceSize mtlCopyBufferAlignment; /**< The alignment required during buffer copy operations (in bytes). */
VkSampleCountFlags supportedSampleCounts; /**< A bitmask identifying the sample counts supported by the device. */
uint32_t minSwapchainImageCount; /**< The minimum number of swapchain images that can be supported by a surface. */
uint32_t maxSwapchainImageCount; /**< The maximum number of swapchain images that can be supported by a surface. */
VkBool32 combinedStoreResolveAction; /**< If true, the device supports VK_ATTACHMENT_STORE_OP_STORE with a simultaneous resolve attachment. */
VkBool32 arrayOfTextures; /**< If true, arrays of textures is supported. */
VkBool32 arrayOfSamplers; /**< If true, arrays of texture samplers is supported. */
MTLLanguageVersion mslVersionEnum; /**< The version of the Metal Shading Language available on this device, as a Metal enumeration. */
VkBool32 depthSampleCompare; /**< If true, depth texture samplers support the comparison of the pixel value against a reference value. */
VkBool32 events; /**< If true, Metal synchronization events (MTLEvent) are supported. */
VkBool32 memoryBarriers; /**< If true, full memory barriers within Metal render passes are supported. */
VkBool32 multisampleLayeredRendering; /**< If true, layered rendering to multiple multi-sampled cube or texture array layers is supported. */
VkBool32 stencilFeedback; /**< If true, fragment shaders that write to [[stencil]] outputs are supported. */
VkBool32 textureBuffers; /**< If true, textures of type MTLTextureTypeBuffer are supported. */
VkBool32 postDepthCoverage; /**< If true, coverage masks in fragment shaders post-depth-test are supported. */
VkBool32 fences; /**< If true, Metal synchronization fences (MTLFence) are supported. */
VkBool32 rasterOrderGroups; /**< If true, Raster order groups in fragment shaders are supported. */
VkBool32 native3DCompressedTextures; /**< If true, 3D compressed images are supported natively, without manual decompression. */
VkBool32 nativeTextureSwizzle; /**< If true, component swizzle is supported natively, without manual swizzling in shaders. */
VkBool32 placementHeaps; /**< If true, MTLHeap objects support placement of resources. */
VkDeviceSize pushConstantSizeAlignment; /**< The alignment used internally when allocating memory for push constants. Must be PoT. */
uint32_t maxTextureLayers; /**< The maximum number of layers in an array texture. */
uint32_t maxSubgroupSize; /**< The maximum number of threads in a SIMD-group. */
VkDeviceSize vertexStrideAlignment; /**< The alignment used for the stride of vertex attribute bindings. */
VkBool32 indirectTessellationDrawing; /**< If true, tessellation draw calls support parameters held in a GPU buffer. */
VkBool32 nonUniformThreadgroups; /**< If true, the device supports arbitrary-sized grids in compute workloads. */
VkBool32 renderWithoutAttachments; /**< If true, we don't have to create a dummy attachment for a render pass if there isn't one. */
VkBool32 deferredStoreActions; /**< If true, render pass store actions can be specified after the render encoder is created. */
VkBool32 sharedLinearTextures; /**< If true, linear textures and texture buffers can be created from buffers in Shared storage. */
VkBool32 depthResolve; /**< If true, resolving depth textures with filters other than Sample0 is supported. */
VkBool32 stencilResolve; /**< If true, resolving stencil textures with filters other than Sample0 is supported. */
uint32_t maxPerStageDynamicMTLBufferCount; /**< The maximum number of inline buffers that can be set on a command buffer. */
uint32_t maxPerStageStorageTextureCount; /**< The total number of per-stage Metal textures with read-write access available for writing to from a shader. */
VkBool32 astcHDRTextures; /**< If true, ASTC HDR pixel formats are supported. */
VkBool32 renderLinearTextures; /**< If true, linear textures are renderable. */
VkBool32 pullModelInterpolation; /**< If true, explicit interpolation functions are supported. */
VkBool32 samplerMirrorClampToEdge; /**< If true, the mirrored clamp to edge address mode is supported in samplers. */
VkBool32 quadPermute; /**< If true, quadgroup permutation functions (vote, ballot, shuffle) are supported in shaders. */
VkBool32 simdPermute; /**< If true, SIMD-group permutation functions (vote, ballot, shuffle) are supported in shaders. */
VkBool32 simdReduction; /**< If true, SIMD-group reduction functions (arithmetic) are supported in shaders. */
uint32_t minSubgroupSize; /**< The minimum number of threads in a SIMD-group. */
VkBool32 textureBarriers; /**< If true, texture barriers are supported within Metal render passes. */
VkBool32 tileBasedDeferredRendering; /**< If true, this device uses tile-based deferred rendering. */
VkBool32 argumentBuffers; /**< If true, Metal argument buffers are supported. */
VkBool32 descriptorSetArgumentBuffers; /**< If true, a Metal argument buffer can be assigned to a descriptor set, and used on any pipeline and pipeline stage. If false, a different Metal argument buffer must be used for each pipeline-stage/descriptor-set combination. */
MVKFloatRounding clearColorFloatRounding; /**< Identifies the type of rounding Metal uses for MTLClearColor float to integer conversions. */
MVKCounterSamplingFlags counterSamplingPoints; /**< Identifies the points where pipeline GPU counter sampling may occur. */
VkBool32 programmableSamplePositions; /**< If true, programmable MSAA sample positions are supported. */
VkBool32 shaderBarycentricCoordinates; /**< If true, fragment shader barycentric coordinates are supported. */
MTLArgumentBuffersTier argumentBuffersTier; /**< The argument buffer tier available on this device, as a Metal enumeration. */
VkBool32 needsSampleDrefLodArrayWorkaround; /**< If true, sampling from arrayed depth images with explicit LoD is broken and needs a workaround. */
VkDeviceSize hostMemoryPageSize; /**< The size of a page of host memory on this platform. */
} MVKPhysicalDeviceMetalFeatures;
/** MoltenVK performance of a particular type of activity. */
typedef struct {
uint32_t count; /**< The number of activities of this type. */
double latestDuration; /**< The latest (most recent) duration of the activity, in milliseconds. */
double averageDuration; /**< The average duration of the activity, in milliseconds. */
double minimumDuration; /**< The minimum duration of the activity, in milliseconds. */
double maximumDuration; /**< The maximum duration of the activity, in milliseconds. */
} MVKPerformanceTracker;
/** MoltenVK performance of shader compilation activities. */
typedef struct {
MVKPerformanceTracker hashShaderCode; /** Create a hash from the incoming shader code. */
MVKPerformanceTracker spirvToMSL; /** Convert SPIR-V to MSL source code. */
MVKPerformanceTracker mslCompile; /** Compile MSL source code into a MTLLibrary. */
MVKPerformanceTracker mslLoad; /** Load pre-compiled MSL code into a MTLLibrary. */
MVKPerformanceTracker mslCompress; /** Compress MSL source code after compiling a MTLLibrary, to hold it in a pipeline cache. */
MVKPerformanceTracker mslDecompress; /** Decompress MSL source code to write the MSL when serializing a pipeline cache. */
MVKPerformanceTracker shaderLibraryFromCache; /** Retrieve a shader library from the cache, lazily creating it if needed. */
MVKPerformanceTracker functionRetrieval; /** Retrieve a MTLFunction from a MTLLibrary. */
MVKPerformanceTracker functionSpecialization; /** Specialize a retrieved MTLFunction. */
MVKPerformanceTracker pipelineCompile; /** Compile MTLFunctions into a pipeline. */
MVKPerformanceTracker glslToSPRIV; /** Convert GLSL to SPIR-V code. */
} MVKShaderCompilationPerformance;
/** MoltenVK performance of pipeline cache activities. */
typedef struct {
MVKPerformanceTracker sizePipelineCache; /** Calculate the size of cache data required to write MSL to pipeline cache data stream. */
MVKPerformanceTracker writePipelineCache; /** Write MSL to pipeline cache data stream. */
MVKPerformanceTracker readPipelineCache; /** Read MSL from pipeline cache data stream. */
} MVKPipelineCachePerformance;
/** MoltenVK performance of queue activities. */
typedef struct {
MVKPerformanceTracker mtlQueueAccess; /** Create an MTLCommandQueue or access an existing cached instance. */
MVKPerformanceTracker mtlCommandBufferCompletion; /** Completion of a MTLCommandBuffer on the GPU, from commit to completion callback. */
MVKPerformanceTracker nextCAMetalDrawable; /** Retrieve next CAMetalDrawable from CAMetalLayer during presentation. */
MVKPerformanceTracker frameInterval; /** Frame presentation interval (1000/FPS). */
} MVKQueuePerformance;
/**
* MoltenVK performance. You can retrieve a copy of this structure using the vkGetPerformanceStatisticsMVK() function.
*
* This structure may be extended as new features are added to MoltenVK. If you are linking to
* an implementation of MoltenVK that was compiled from a different MVK_PRIVATE_API_VERSION
* than your app was, the size of this structure in your app may be larger or smaller than the
* struct in MoltenVK. See the description of the vkGetPerformanceStatisticsMVK() function for
* information about how to handle this.
*
* TO SUPPORT DYNAMIC LINKING TO THIS STRUCTURE AS DESCRIBED ABOVE, THIS STRUCTURE SHOULD NOT
* BE CHANGED EXCEPT TO ADD ADDITIONAL MEMBERS ON THE END. EXISTING MEMBERS, AND THEIR ORDER,
* SHOULD NOT BE CHANGED.
*/
typedef struct {
MVKShaderCompilationPerformance shaderCompilation; /** Shader compilations activities. */
MVKPipelineCachePerformance pipelineCache; /** Pipeline cache activities. */
MVKQueuePerformance queue; /** Queue activities. */
} MVKPerformanceStatistics;
#pragma mark -
#pragma mark Function types
typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceMetalFeaturesMVK)(VkPhysicalDevice physicalDevice, MVKPhysicalDeviceMetalFeatures* pMetalFeatures, size_t* pMetalFeaturesSize);
typedef VkResult (VKAPI_PTR *PFN_vkGetPerformanceStatisticsMVK)(VkDevice device, MVKPerformanceStatistics* pPerf, size_t* pPerfSize);
#pragma mark -
#pragma mark Function prototypes
#ifndef VK_NO_PROTOTYPES
/**
* Populates the pMetalFeatures structure with the Metal-specific features
* supported by the specified physical device.
*
* If you are linking to an implementation of MoltenVK that was compiled from a different
* MVK_PRIVATE_API_VERSION than your app was, the size of the MVKPhysicalDeviceMetalFeatures
* structure in your app may be larger or smaller than the same struct as expected by MoltenVK.
*
* When calling this function, set the value of *pMetalFeaturesSize to sizeof(MVKPhysicalDeviceMetalFeatures),
* to tell MoltenVK the limit of the size of your MVKPhysicalDeviceMetalFeatures structure. Upon return from
* this function, the value of *pMetalFeaturesSize will hold the actual number of bytes copied into your
* passed MVKPhysicalDeviceMetalFeatures structure, which will be the smaller of what your app thinks is the
* size of MVKPhysicalDeviceMetalFeatures, and what MoltenVK thinks it is. This represents the safe access
* area within the structure for both MoltenVK and your app.
*
* If the size that MoltenVK expects for MVKPhysicalDeviceMetalFeatures is different than the value passed in
* *pMetalFeaturesSize, this function will return VK_INCOMPLETE, otherwise it will return VK_SUCCESS.
*
* Although it is not necessary, you can use this function to determine in advance the value that MoltenVK
* expects the size of MVKPhysicalDeviceMetalFeatures to be by setting the value of pMetalFeatures to NULL.
* In that case, this function will set *pMetalFeaturesSize to the size that MoltenVK expects
* MVKPhysicalDeviceMetalFeatures to be.
*
* This function is not supported by the Vulkan SDK Loader and Layers framework
* and is unavailable when using the Vulkan SDK Loader and Layers framework.
*/
VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceMetalFeaturesMVK(
VkPhysicalDevice physicalDevice,
MVKPhysicalDeviceMetalFeatures* pMetalFeatures,
size_t* pMetalFeaturesSize);
/**
* Populates the pPerf structure with the current performance statistics for the device.
*
* If you are linking to an implementation of MoltenVK that was compiled from a different
* MVK_PRIVATE_API_VERSION than your app was, the size of the MVKPerformanceStatistics
* structure in your app may be larger or smaller than the same struct as expected by MoltenVK.
*
* When calling this function, set the value of *pPerfSize to sizeof(MVKPerformanceStatistics),
* to tell MoltenVK the limit of the size of your MVKPerformanceStatistics structure. Upon return
* from this function, the value of *pPerfSize will hold the actual number of bytes copied into
* your passed MVKPerformanceStatistics structure, which will be the smaller of what your app
* thinks is the size of MVKPerformanceStatistics, and what MoltenVK thinks it is. This
* represents the safe access area within the structure for both MoltenVK and your app.
*
* If the size that MoltenVK expects for MVKPerformanceStatistics is different than the value passed
* in *pPerfSize, this function will return VK_INCOMPLETE, otherwise it will return VK_SUCCESS.
*
* Although it is not necessary, you can use this function to determine in advance the value
* that MoltenVK expects the size of MVKPerformanceStatistics to be by setting the value of
* pPerf to NULL. In that case, this function will set *pPerfSize to the size that MoltenVK
* expects MVKPerformanceStatistics to be.
*
* This function is not supported by the Vulkan SDK Loader and Layers framework
* and is unavailable when using the Vulkan SDK Loader and Layers framework.
*/
VKAPI_ATTR VkResult VKAPI_CALL vkGetPerformanceStatisticsMVK(
VkDevice device,
MVKPerformanceStatistics* pPerf,
size_t* pPerfSize);
#endif // VK_NO_PROTOTYPES
#ifdef __cplusplus
}
#endif // __cplusplus
#endif

File diff suppressed because it is too large Load Diff

View File

@ -23,7 +23,6 @@
#include "MVKBuffer.h"
#include "MVKPipeline.h"
#include "MVKFoundation.h"
#include "MVKEnvironment.h"
#include "mvk_datatypes.hpp"
@ -394,7 +393,7 @@ VkResult MVKCmdPushDescriptorSet::setContent(MVKCommandBuffer* cmdBuff,
}
void MVKCmdPushDescriptorSet::encode(MVKCommandEncoder* cmdEncoder) {
_pipelineLayout->pushDescriptorSet(cmdEncoder, _descriptorWrites.contents(), _set);
_pipelineLayout->pushDescriptorSet(cmdEncoder, _pipelineBindPoint, _descriptorWrites.contents(), _set);
}
MVKCmdPushDescriptorSet::~MVKCmdPushDescriptorSet() {

View File

@ -262,7 +262,7 @@ protected:
// Concrete template class implementations.
typedef MVKCmdSetViewport<1> MVKCmdSetViewport1;
typedef MVKCmdSetViewport<kMVKCachedViewportScissorCount> MVKCmdSetViewportMulti;
typedef MVKCmdSetViewport<kMVKMaxViewportScissorCount> MVKCmdSetViewportMulti;
#pragma mark -
@ -292,7 +292,7 @@ protected:
// Concrete template class implementations.
typedef MVKCmdSetScissor<1> MVKCmdSetScissor1;
typedef MVKCmdSetScissor<kMVKCachedViewportScissorCount> MVKCmdSetScissorMulti;
typedef MVKCmdSetScissor<kMVKMaxViewportScissorCount> MVKCmdSetScissorMulti;
#pragma mark -

View File

@ -282,7 +282,7 @@ void MVKCmdSetViewport<N>::encode(MVKCommandEncoder* cmdEncoder) {
}
template class MVKCmdSetViewport<1>;
template class MVKCmdSetViewport<kMVKCachedViewportScissorCount>;
template class MVKCmdSetViewport<kMVKMaxViewportScissorCount>;
#pragma mark -
@ -309,7 +309,7 @@ void MVKCmdSetScissor<N>::encode(MVKCommandEncoder* cmdEncoder) {
}
template class MVKCmdSetScissor<1>;
template class MVKCmdSetScissor<kMVKCachedViewportScissorCount>;
template class MVKCmdSetScissor<kMVKMaxViewportScissorCount>;
#pragma mark -

View File

@ -284,8 +284,6 @@ protected:
float _mtlDepthVal;
uint32_t _mtlStencilValue;
MVKCommandUse _commandUse;
bool _isClearingDepth;
bool _isClearingStencil;
};
@ -326,7 +324,7 @@ protected:
VkClearValue& getClearValue(uint32_t attIdx) override { return _vkClearValues[attIdx]; }
void setClearValue(uint32_t attIdx, const VkClearValue& clearValue) override { _vkClearValues[attIdx] = clearValue; }
VkClearValue _vkClearValues[kMVKCachedColorAttachmentCount];
VkClearValue _vkClearValues[kMVKMaxColorAttachmentCount];
};
typedef MVKCmdClearMultiAttachments<1> MVKCmdClearMultiAttachments1;

View File

@ -25,7 +25,6 @@
#include "MVKFramebuffer.h"
#include "MVKRenderPass.h"
#include "MTLRenderPassDescriptor+MoltenVK.h"
#include "MVKEnvironment.h"
#include "mvk_datatypes.hpp"
#include <algorithm>
#include <sys/mman.h>
@ -956,7 +955,7 @@ void MVKCmdCopyBuffer<N>::encode(MVKCommandEncoder* cmdEncoder) {
copyInfo.dstOffset = (uint32_t)cpyRgn.dstOffset;
copyInfo.size = (uint32_t)cpyRgn.size;
id<MTLComputeCommandEncoder> mtlComputeEnc = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseCopyBuffer);
id<MTLComputeCommandEncoder> mtlComputeEnc = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseCopyBuffer, true);
[mtlComputeEnc pushDebugGroup: @"vkCmdCopyBuffer"];
[mtlComputeEnc setComputePipelineState: cmdEncoder->getCommandEncodingPool()->getCmdCopyBufferBytesMTLComputePipelineState()];
[mtlComputeEnc setBuffer:srcMTLBuff offset: srcMTLBuffOffset atIndex: 0];
@ -1142,7 +1141,7 @@ void MVKCmdBufferImageCopy<N>::encode(MVKCommandEncoder* cmdEncoder) {
info.offset = cpyRgn.imageOffset;
info.extent = cpyRgn.imageExtent;
bool needsTempBuff = mipLevel != 0;
id<MTLComputeCommandEncoder> mtlComputeEnc = cmdEncoder->getMTLComputeEncoder(cmdUse);
id<MTLComputeCommandEncoder> mtlComputeEnc = cmdEncoder->getMTLComputeEncoder(cmdUse, false); // Compute state will be marked dirty on next compute encoder after Blit encoder below.
id<MTLComputePipelineState> mtlComputeState = cmdEncoder->getCommandEncodingPool()->getCmdCopyBufferToImage3DDecompressMTLComputePipelineState(needsTempBuff);
[mtlComputeEnc pushDebugGroup: @"vkCmdCopyBufferToImage"];
[mtlComputeEnc setComputePipelineState: mtlComputeState];
@ -1260,8 +1259,6 @@ VkResult MVKCmdClearAttachments<N>::setContent(MVKCommandBuffer* cmdBuff,
_commandUse = cmdUse;
_mtlDepthVal = 0.0;
_mtlStencilValue = 0;
_isClearingDepth = false;
_isClearingStencil = false;
MVKPixelFormats* pixFmts = cmdBuff->getPixelFormats();
// For each attachment to be cleared, mark it so in the render pipeline state
@ -1279,14 +1276,12 @@ VkResult MVKCmdClearAttachments<N>::setContent(MVKCommandBuffer* cmdBuff,
}
if (mvkIsAnyFlagEnabled(clrAtt.aspectMask, VK_IMAGE_ASPECT_DEPTH_BIT)) {
_isClearingDepth = true;
_rpsKey.enableAttachment(kMVKClearAttachmentDepthStencilIndex);
_rpsKey.enableAttachment(kMVKClearAttachmentDepthIndex);
_mtlDepthVal = pixFmts->getMTLClearDepthValue(clrAtt.clearValue);
}
if (mvkIsAnyFlagEnabled(clrAtt.aspectMask, VK_IMAGE_ASPECT_STENCIL_BIT)) {
_isClearingStencil = true;
_rpsKey.enableAttachment(kMVKClearAttachmentDepthStencilIndex);
_rpsKey.enableAttachment(kMVKClearAttachmentStencilIndex);
_mtlStencilValue = pixFmts->getMTLClearStencilValue(clrAtt.clearValue);
}
}
@ -1443,31 +1438,24 @@ void MVKCmdClearAttachments<N>::encode(MVKCommandEncoder* cmdEncoder) {
clearColors[caIdx] = { (float)mtlCC.red, (float)mtlCC.green, (float)mtlCC.blue, (float)mtlCC.alpha};
}
// The depth value (including vertex position Z value) is held in the last index.
clearColors[kMVKClearAttachmentDepthStencilIndex] = { _mtlDepthVal, _mtlDepthVal, _mtlDepthVal, _mtlDepthVal };
// The depth value is the vertex position Z value.
clearColors[kMVKClearAttachmentDepthIndex] = { _mtlDepthVal, _mtlDepthVal, _mtlDepthVal, _mtlDepthVal };
VkFormat vkAttFmt = subpass->getDepthStencilFormat();
MTLPixelFormat mtlAttFmt = pixFmts->getMTLPixelFormat(vkAttFmt);
_rpsKey.attachmentMTLPixelFormats[kMVKClearAttachmentDepthStencilIndex] = mtlAttFmt;
_rpsKey.attachmentMTLPixelFormats[kMVKClearAttachmentDepthIndex] = pixFmts->getMTLPixelFormat(subpass->getDepthFormat());
if ( !subpass->isDepthAttachmentUsed() ) { _rpsKey.disableAttachment(kMVKClearAttachmentDepthIndex); }
bool isClearingDepth = _isClearingDepth && pixFmts->isDepthFormat(mtlAttFmt);
bool isClearingStencil = _isClearingStencil && pixFmts->isStencilFormat(mtlAttFmt);
if (!isClearingDepth && !isClearingStencil) {
// If the subpass attachment isn't actually used, don't try to clear it.
_rpsKey.disableAttachment(kMVKClearAttachmentDepthStencilIndex);
}
_rpsKey.attachmentMTLPixelFormats[kMVKClearAttachmentStencilIndex] = pixFmts->getMTLPixelFormat(subpass->getStencilFormat());
if ( !subpass->isStencilAttachmentUsed() ) { _rpsKey.disableAttachment(kMVKClearAttachmentStencilIndex); }
if (!_rpsKey.isAnyAttachmentEnabled()) {
// Nothing to do.
return;
}
if ( !_rpsKey.isAnyAttachmentEnabled() ) { return; }
// Render the clear colors to the attachments
MVKCommandEncodingPool* cmdEncPool = cmdEncoder->getCommandEncodingPool();
id<MTLRenderCommandEncoder> mtlRendEnc = cmdEncoder->_mtlRenderEncoder;
[mtlRendEnc pushDebugGroup: getMTLDebugGroupLabel()];
[mtlRendEnc setRenderPipelineState: cmdEncPool->getCmdClearMTLRenderPipelineState(_rpsKey)];
[mtlRendEnc setDepthStencilState: cmdEncPool->getMTLDepthStencilState(isClearingDepth, isClearingStencil)];
[mtlRendEnc setDepthStencilState: cmdEncPool->getMTLDepthStencilState(_rpsKey.isAttachmentUsed(kMVKClearAttachmentDepthIndex),
_rpsKey.isAttachmentUsed(kMVKClearAttachmentStencilIndex))];
[mtlRendEnc setStencilReferenceValue: _mtlStencilValue];
[mtlRendEnc setCullMode: MTLCullModeNone];
[mtlRendEnc setTriangleFillMode: MTLTriangleFillModeFill];
@ -1592,7 +1580,7 @@ void MVKCmdClearImage<N>::encode(MVKCommandEncoder* cmdEncoder) {
// Luckily for us, linear images only have one mip and one array layer under Metal.
assert( !isDS );
id<MTLComputePipelineState> mtlClearState = cmdEncoder->getCommandEncodingPool()->getCmdClearColorImageMTLComputePipelineState(pixFmts->getFormatType(_image->getVkFormat()));
id<MTLComputeCommandEncoder> mtlComputeEnc = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseClearColorImage);
id<MTLComputeCommandEncoder> mtlComputeEnc = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseClearColorImage, true);
[mtlComputeEnc pushDebugGroup: @"vkCmdClearColorImage"];
[mtlComputeEnc setComputePipelineState: mtlClearState];
[mtlComputeEnc setTexture: imgMTLTex atIndex: 0];
@ -1759,7 +1747,7 @@ void MVKCmdFillBuffer::encode(MVKCommandEncoder* cmdEncoder) {
NSUInteger tgWidth = std::min(cps.maxTotalThreadsPerThreadgroup, cmdEncoder->getMTLDevice().maxThreadsPerThreadgroup.width);
NSUInteger tgCount = _wordCount / tgWidth;
id<MTLComputeCommandEncoder> mtlComputeEnc = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseFillBuffer);
id<MTLComputeCommandEncoder> mtlComputeEnc = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseFillBuffer, true);
[mtlComputeEnc pushDebugGroup: @"vkCmdFillBuffer"];
[mtlComputeEnc setComputePipelineState: cps];
[mtlComputeEnc setBytes: &_dataValue length: sizeof(_dataValue) atIndex: 1];

View File

@ -337,10 +337,12 @@ public:
* Returns the current Metal compute encoder for the specified use,
* which determines the label assigned to the returned encoder.
*
* If the current encoder is not a compute encoder, this function ends current before
* beginning compute encoding.
* If the current encoder is a compute encoder, the compute state being tracked can
* optionally be marked dirty. Otherwise, if the current encoder is not a compute
* encoder, this function ends the current encoder before beginning compute encoding.
*/
id<MTLComputeCommandEncoder> getMTLComputeEncoder(MVKCommandUse cmdUse);
id<MTLComputeCommandEncoder> getMTLComputeEncoder(MVKCommandUse cmdUse,
bool markCurrentComputeStateDirty = false);
/**
* Returns the current Metal BLIT encoder for the specified use,

View File

@ -401,10 +401,17 @@ void MVKCommandEncoder::beginRendering(MVKCommand* rendCmd, const VkRenderingInf
? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
: VK_SUBPASS_CONTENTS_INLINE);
uint32_t maxAttCnt = (pRenderingInfo->colorAttachmentCount + 1) * 2;
MVKImageView* attachments[maxAttCnt];
uint32_t maxAttCnt = (pRenderingInfo->colorAttachmentCount + 2) * 2;
MVKImageView* imageViews[maxAttCnt];
VkClearValue clearValues[maxAttCnt];
uint32_t attCnt = mvkGetAttachments(pRenderingInfo, attachments, clearValues);
uint32_t attCnt = 0;
MVKRenderingAttachmentIterator attIter(pRenderingInfo);
attIter.iterate([&](const VkRenderingAttachmentInfo* pAttInfo, VkImageAspectFlagBits aspect, bool isResolveAttachment)->void {
imageViews[attCnt] = (MVKImageView*)(isResolveAttachment ? pAttInfo->resolveImageView : pAttInfo->imageView);
clearValues[attCnt] = pAttInfo->clearValue;
attCnt++;
});
// If we're resuming a suspended renderpass, continue to use the existing renderpass
// (with updated rendering flags) and framebuffer. Otherwise, create new transient
@ -419,13 +426,14 @@ void MVKCommandEncoder::beginRendering(MVKCommand* rendCmd, const VkRenderingInf
mvkRP->setRenderingFlags(pRenderingInfo->flags);
mvkFB = _pEncodingContext->getFramebuffer();
} else {
mvkRP = mvkCreateRenderPass(getDevice(), pRenderingInfo);
mvkFB = mvkCreateFramebuffer(getDevice(), pRenderingInfo, mvkRP);
auto* mvkDev = getDevice();
mvkRP = mvkDev->createRenderPass(pRenderingInfo, nullptr);
mvkFB = mvkDev->createFramebuffer(pRenderingInfo, nullptr);
}
beginRenderpass(rendCmd, contents, mvkRP, mvkFB,
pRenderingInfo->renderArea,
MVKArrayRef(clearValues, attCnt),
MVKArrayRef(attachments, attCnt),
MVKArrayRef(imageViews, attCnt),
MVKArrayRef<MVKArrayRef<MTLSamplePosition>>(),
kMVKCommandUseBeginRendering);
@ -831,10 +839,6 @@ void MVKCommandEncoder::endMetalRenderEncoding() {
void MVKCommandEncoder::endCurrentMetalEncoding() {
endMetalRenderEncoding();
_computePipelineState.markDirty();
_computeResourcesState.markDirty();
_computePushConstants.markDirty();
if (_mtlComputeEncoder && _cmdBuffer->_hasStageCounterTimestampCommand) { [_mtlComputeEncoder updateFence: getStageCountersMTLFence()]; }
endMetalEncoding(_mtlComputeEncoder);
_mtlComputeEncoderUse = kMVKCommandUseNone;
@ -846,12 +850,18 @@ void MVKCommandEncoder::endCurrentMetalEncoding() {
encodeTimestampStageCounterSamples();
}
id<MTLComputeCommandEncoder> MVKCommandEncoder::getMTLComputeEncoder(MVKCommandUse cmdUse) {
id<MTLComputeCommandEncoder> MVKCommandEncoder::getMTLComputeEncoder(MVKCommandUse cmdUse, bool markCurrentComputeStateDirty) {
if ( !_mtlComputeEncoder ) {
endCurrentMetalEncoding();
_mtlComputeEncoder = [_mtlCmdBuffer computeCommandEncoder];
retainIfImmediatelyEncoding(_mtlComputeEncoder);
beginMetalComputeEncoding(cmdUse);
markCurrentComputeStateDirty = true; // Always mark current compute state dirty for new encoder
}
if(markCurrentComputeStateDirty) {
_computePipelineState.markDirty();
_computePushConstants.markDirty();
_computeResourcesState.markDirty();
}
if (_mtlComputeEncoderUse != cmdUse) {
_mtlComputeEncoderUse = cmdUse;

View File

@ -152,7 +152,7 @@ public:
protected:
void encodeImpl(uint32_t stage) override;
MVKSmallVector<VkViewport, kMVKCachedViewportScissorCount> _viewports, _dynamicViewports;
MVKSmallVector<VkViewport, kMVKMaxViewportScissorCount> _viewports, _dynamicViewports;
};
@ -180,7 +180,7 @@ public:
protected:
void encodeImpl(uint32_t stage) override;
MVKSmallVector<VkRect2D, kMVKCachedViewportScissorCount> _scissors, _dynamicScissors;
MVKSmallVector<VkRect2D, kMVKMaxViewportScissorCount> _scissors, _dynamicScissors;
};
@ -355,11 +355,11 @@ public:
MVKArrayRef<uint32_t> dynamicOffsets,
uint32_t& dynamicOffsetIndex);
/** Encodes the Metal resource to the Metal command encoder. */
virtual void encodeArgumentBufferResourceUsage(MVKShaderStage stage,
id<MTLResource> mtlResource,
MTLResourceUsage mtlUsage,
MTLRenderStages mtlStages) = 0;
/** Encodes the indirect use of the Metal resource to the Metal command encoder. */
virtual void encodeResourceUsage(MVKShaderStage stage,
id<MTLResource> mtlResource,
MTLResourceUsage mtlUsage,
MTLRenderStages mtlStages) = 0;
void markDirty() override;
@ -548,10 +548,10 @@ public:
std::function<void(MVKCommandEncoder*, MVKMTLTextureBinding&)> bindTexture,
std::function<void(MVKCommandEncoder*, MVKMTLSamplerStateBinding&)> bindSampler);
void encodeArgumentBufferResourceUsage(MVKShaderStage stage,
id<MTLResource> mtlResource,
MTLResourceUsage mtlUsage,
MTLRenderStages mtlStages) override;
void encodeResourceUsage(MVKShaderStage stage,
id<MTLResource> mtlResource,
MTLResourceUsage mtlUsage,
MTLRenderStages mtlStages) override;
/** Offset all buffers for vertex attribute bindings with zero divisors by the given number of strides. */
void offsetZeroDivisorVertexBuffers(MVKGraphicsStage stage, MVKGraphicsPipeline* pipeline, uint32_t firstInstance);
@ -565,6 +565,8 @@ public:
/** Marks any overridden buffer indexes as dirty. */
void markOverriddenBufferIndexesDirty();
void endMetalRenderPass() override;
void markDirty() override;
#pragma mark Construction
@ -577,6 +579,7 @@ protected:
void bindMetalArgumentBuffer(MVKShaderStage stage, MVKMTLBufferBinding& buffBind) override;
ResourceBindings<8> _shaderStageResourceBindings[kMVKShaderStageFragment + 1];
std::unordered_map<id<MTLResource>, MTLRenderStages> _renderUsageStages;
};
@ -609,10 +612,10 @@ public:
/** Sets the current dynamic offset buffer state. */
void bindDynamicOffsetBuffer(const MVKShaderImplicitRezBinding& binding, bool needDynamicOffsetBuffer);
void encodeArgumentBufferResourceUsage(MVKShaderStage stage,
id<MTLResource> mtlResource,
MTLResourceUsage mtlUsage,
MTLRenderStages mtlStages) override;
void encodeResourceUsage(MVKShaderStage stage,
id<MTLResource> mtlResource,
MTLResourceUsage mtlUsage,
MTLRenderStages mtlStages) override;
/**
* Marks the buffer binding using the index as having been overridden,

View File

@ -328,15 +328,12 @@ void MVKDepthStencilCommandEncoderState::beginMetalRenderPass() {
MVKCommandEncoderState::beginMetalRenderPass();
MVKRenderSubpass* mvkSubpass = _cmdEncoder->getSubpass();
MVKPixelFormats* pixFmts = _cmdEncoder->getPixelFormats();
MTLPixelFormat mtlDSFormat = pixFmts->getMTLPixelFormat(mvkSubpass->getDepthStencilFormat());
bool prevHasDepthAttachment = _hasDepthAttachment;
_hasDepthAttachment = pixFmts->isDepthFormat(mtlDSFormat);
_hasDepthAttachment = mvkSubpass->isDepthAttachmentUsed();
if (_hasDepthAttachment != prevHasDepthAttachment) { markDirty(); }
bool prevHasStencilAttachment = _hasStencilAttachment;
_hasStencilAttachment = pixFmts->isStencilFormat(mtlDSFormat);
_hasStencilAttachment = mvkSubpass->isStencilAttachmentUsed();
if (_hasStencilAttachment != prevHasStencilAttachment) { markDirty(); }
}
@ -693,6 +690,11 @@ void MVKGraphicsResourcesCommandEncoderState::encodeBindings(MVKShaderStage stag
encodeMetalArgumentBuffer(stage);
MVKPipeline* pipeline = getPipeline();
if (pipeline && pipeline->usesPhysicalStorageBufferAddressesCapability(stage)) {
getDevice()->encodeGPUAddressableBuffers(this, stage);
}
auto& shaderStage = _shaderStageResourceBindings[stage];
if (shaderStage.swizzleBufferBinding.isDirty) {
@ -755,6 +757,11 @@ void MVKGraphicsResourcesCommandEncoderState::offsetZeroDivisorVertexBuffers(MVK
}
}
void MVKGraphicsResourcesCommandEncoderState::endMetalRenderPass() {
MVKResourcesCommandEncoderState::endMetalRenderPass();
_renderUsageStages.clear();
}
// Mark everything as dirty
void MVKGraphicsResourcesCommandEncoderState::markDirty() {
MVKResourcesCommandEncoderState::markDirty();
@ -963,10 +970,10 @@ void MVKGraphicsResourcesCommandEncoderState::bindMetalArgumentBuffer(MVKShaderS
bindBuffer(stage, buffBind);
}
void MVKGraphicsResourcesCommandEncoderState::encodeArgumentBufferResourceUsage(MVKShaderStage stage,
id<MTLResource> mtlResource,
MTLResourceUsage mtlUsage,
MTLRenderStages mtlStages) {
void MVKGraphicsResourcesCommandEncoderState::encodeResourceUsage(MVKShaderStage stage,
id<MTLResource> mtlResource,
MTLResourceUsage mtlUsage,
MTLRenderStages mtlStages) {
if (mtlResource && mtlStages) {
if (stage == kMVKShaderStageTessCtl) {
auto* mtlCompEnc = _cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationVertexTessCtl);
@ -974,7 +981,12 @@ void MVKGraphicsResourcesCommandEncoderState::encodeArgumentBufferResourceUsage(
} else {
auto* mtlRendEnc = _cmdEncoder->_mtlRenderEncoder;
if ([mtlRendEnc respondsToSelector: @selector(useResource:usage:stages:)]) {
[mtlRendEnc useResource: mtlResource usage: mtlUsage stages: mtlStages];
// Within a renderpass, a resource may be used by multiple descriptor bindings,
// each of which may assign a different usage stage. Dynamically accumulate
// usage stages across all descriptor bindings using the resource.
auto& accumStages = _renderUsageStages[mtlResource];
accumStages |= mtlStages;
[mtlRendEnc useResource: mtlResource usage: mtlUsage stages: accumStages];
} else {
[mtlRendEnc useResource: mtlResource usage: mtlUsage];
}
@ -1039,8 +1051,10 @@ void MVKComputeResourcesCommandEncoderState::encodeImpl(uint32_t) {
encodeMetalArgumentBuffer(kMVKShaderStageCompute);
MVKPipeline* pipeline = getPipeline();
bool fullImageViewSwizzle = pipeline ? pipeline->fullImageViewSwizzle() : false;
MVKPipeline* pipeline = getPipeline();
if (pipeline && pipeline->usesPhysicalStorageBufferAddressesCapability(kMVKShaderStageCompute)) {
getDevice()->encodeGPUAddressableBuffers(this, kMVKShaderStageCompute);
}
if (_resourceBindings.swizzleBufferBinding.isDirty) {
for (auto& b : _resourceBindings.textureBindings) {
@ -1053,6 +1067,7 @@ void MVKComputeResourcesCommandEncoderState::encodeImpl(uint32_t) {
_resourceBindings.swizzleBufferBinding.index);
} else {
bool fullImageViewSwizzle = pipeline ? pipeline->fullImageViewSwizzle() : false;
assertMissingSwizzles(_resourceBindings.needsSwizzle && !fullImageViewSwizzle, "compute", _resourceBindings.textureBindings.contents());
}
@ -1116,10 +1131,10 @@ void MVKComputeResourcesCommandEncoderState::bindMetalArgumentBuffer(MVKShaderSt
bindBuffer(buffBind);
}
void MVKComputeResourcesCommandEncoderState::encodeArgumentBufferResourceUsage(MVKShaderStage stage,
id<MTLResource> mtlResource,
MTLResourceUsage mtlUsage,
MTLRenderStages mtlStages) {
void MVKComputeResourcesCommandEncoderState::encodeResourceUsage(MVKShaderStage stage,
id<MTLResource> mtlResource,
MTLResourceUsage mtlUsage,
MTLRenderStages mtlStages) {
if (mtlResource) {
auto* mtlCompEnc = _cmdEncoder->getMTLComputeEncoder(kMVKCommandUseDispatch);
[mtlCompEnc useResource: mtlResource usage: mtlUsage];
@ -1145,7 +1160,7 @@ void MVKOcclusionQueryCommandEncoderState::endMetalRenderPass() {
if ( !_hasRasterized || !vizBuff || _mtlRenderPassQueries.empty() ) { return; } // Nothing to do.
id<MTLComputePipelineState> mtlAccumState = _cmdEncoder->getCommandEncodingPool()->getAccumulateOcclusionQueryResultsMTLComputePipelineState();
id<MTLComputeCommandEncoder> mtlAccumEncoder = _cmdEncoder->getMTLComputeEncoder(kMVKCommandUseAccumOcclusionQuery);
id<MTLComputeCommandEncoder> mtlAccumEncoder = _cmdEncoder->getMTLComputeEncoder(kMVKCommandUseAccumOcclusionQuery, true);
[mtlAccumEncoder setComputePipelineState: mtlAccumState];
for (auto& qryLoc : _mtlRenderPassQueries) {
// Accumulate the current results to the query pool's buffer.
@ -1158,7 +1173,6 @@ void MVKOcclusionQueryCommandEncoderState::endMetalRenderPass() {
[mtlAccumEncoder dispatchThreadgroups: MTLSizeMake(1, 1, 1)
threadsPerThreadgroup: MTLSizeMake(1, 1, 1)];
}
_cmdEncoder->endCurrentMetalEncoding();
_mtlRenderPassQueries.clear();
_hasRasterized = false;
}

View File

@ -107,9 +107,11 @@ namespace std {
#pragma mark -
#pragma mark MVKRPSKeyClearAtt
#define kMVKClearAttachmentCount (kMVKCachedColorAttachmentCount + 1)
#define kMVKClearAttachmentDepthStencilIndex (kMVKClearAttachmentCount - 1)
#define kMVKClearAttachmentLayeredRenderingBitIndex kMVKClearAttachmentCount
const static uint32_t kMVKClearColorAttachmentCount = kMVKMaxColorAttachmentCount;
const static uint32_t kMVKClearAttachmentDepthIndex = kMVKClearColorAttachmentCount;
const static uint32_t kMVKClearAttachmentStencilIndex = kMVKClearAttachmentDepthIndex + 1;
const static uint32_t kMVKClearAttachmentCount = kMVKClearAttachmentStencilIndex + 1;
const static uint32_t kMVKClearAttachmentLayeredRenderingBitIndex = kMVKClearAttachmentStencilIndex + 1;
/**
* Key to use for looking up cached MTLRenderPipelineState instances.

View File

@ -113,6 +113,8 @@ id<MTLSamplerState> MVKCommandResourceFactory::newCmdBlitImageMTLSamplerState(MT
id<MTLRenderPipelineState> MVKCommandResourceFactory::newCmdClearMTLRenderPipelineState(MVKRPSKeyClearAtt& attKey,
MVKVulkanAPIDeviceObject* owner) {
MVKPixelFormats* pixFmts = getPixelFormats();
id<MTLFunction> vtxFunc = newClearVertFunction(attKey); // temp retain
id<MTLFunction> fragFunc = newClearFragFunction(attKey); // temp retain
MTLRenderPipelineDescriptor* plDesc = [MTLRenderPipelineDescriptor new]; // temp retain
@ -122,15 +124,17 @@ id<MTLRenderPipelineState> MVKCommandResourceFactory::newCmdClearMTLRenderPipeli
plDesc.sampleCount = attKey.mtlSampleCount;
plDesc.inputPrimitiveTopologyMVK = MTLPrimitiveTopologyClassTriangle;
for (uint32_t caIdx = 0; caIdx < kMVKClearAttachmentDepthStencilIndex; caIdx++) {
for (uint32_t caIdx = 0; caIdx < kMVKClearColorAttachmentCount; caIdx++) {
MTLRenderPipelineColorAttachmentDescriptor* colorDesc = plDesc.colorAttachments[caIdx];
colorDesc.pixelFormat = (MTLPixelFormat)attKey.attachmentMTLPixelFormats[caIdx];
colorDesc.writeMask = attKey.isAttachmentEnabled(caIdx) ? MTLColorWriteMaskAll : MTLColorWriteMaskNone;
}
MVKPixelFormats* pixFmts = getPixelFormats();
MTLPixelFormat mtlDSFormat = (MTLPixelFormat)attKey.attachmentMTLPixelFormats[kMVKClearAttachmentDepthStencilIndex];
if (pixFmts->isDepthFormat(mtlDSFormat)) { plDesc.depthAttachmentPixelFormat = mtlDSFormat; }
if (pixFmts->isStencilFormat(mtlDSFormat)) { plDesc.stencilAttachmentPixelFormat = mtlDSFormat; }
MTLPixelFormat mtlDepthFormat = (MTLPixelFormat)attKey.attachmentMTLPixelFormats[kMVKClearAttachmentDepthIndex];
if (pixFmts->isDepthFormat(mtlDepthFormat)) { plDesc.depthAttachmentPixelFormat = mtlDepthFormat; }
MTLPixelFormat mtlStencilFormat = (MTLPixelFormat)attKey.attachmentMTLPixelFormats[kMVKClearAttachmentStencilIndex];
if (pixFmts->isStencilFormat(mtlStencilFormat)) { plDesc.stencilAttachmentPixelFormat = mtlStencilFormat; }
MTLVertexDescriptor* vtxDesc = plDesc.vertexDescriptor;
@ -273,7 +277,7 @@ id<MTLFunction> MVKCommandResourceFactory::newBlitFragFunction(MVKRPSKeyBlitImg&
[msl appendLineMVK: @" return out;"];
[msl appendLineMVK: @"}"];
// MVKLogDebug("\n%s", msl.UTF8String);
// MVKLogInfo("\n%s", msl.UTF8String);
return newMTLFunction(msl, funcName);
}
@ -300,15 +304,16 @@ id<MTLFunction> MVKCommandResourceFactory::newClearVertFunction(MVKRPSKeyClearAt
[msl appendLineMVK];
NSString* funcName = @"vertClear";
[msl appendFormat: @"vertex VaryingsPos %@(AttributesPos attributes [[stage_in]], constant ClearColorsIn& ccIn [[buffer(0)]]) {", funcName];
[msl appendFormat: @"vertex VaryingsPos %@(AttributesPos attributes [[stage_in]], constant ClearColorsIn& ccIn [[buffer(0)]]) {", funcName];
[msl appendLineMVK];
[msl appendLineMVK: @" VaryingsPos varyings;"];
[msl appendLineMVK: @" varyings.v_position = float4(attributes.a_position.x, -attributes.a_position.y, ccIn.colors[8].r, 1.0);"];
[msl appendFormat: @" varyings.v_position = float4(attributes.a_position.x, -attributes.a_position.y, ccIn.colors[%d].r, 1.0);", kMVKClearAttachmentDepthIndex];
[msl appendLineMVK];
[msl appendLineMVK: @" varyings.layer = uint(attributes.a_position.w);"];
[msl appendLineMVK: @" return varyings;"];
[msl appendLineMVK: @"}"];
// MVKLogDebug("\n%s", msl.UTF8String);
// MVKLogInfo("\n%s", msl.UTF8String);
return newMTLFunction(msl, funcName);
}
@ -329,7 +334,7 @@ id<MTLFunction> MVKCommandResourceFactory::newClearFragFunction(MVKRPSKeyClearAt
[msl appendLineMVK: @"} ClearColorsIn;"];
[msl appendLineMVK];
[msl appendLineMVK: @"typedef struct {"];
for (uint32_t caIdx = 0; caIdx < kMVKClearAttachmentDepthStencilIndex; caIdx++) {
for (uint32_t caIdx = 0; caIdx < kMVKClearColorAttachmentCount; caIdx++) {
if (attKey.isAttachmentUsed(caIdx)) {
NSString* typeStr = getMTLFormatTypeString((MTLPixelFormat)attKey.attachmentMTLPixelFormats[caIdx]);
[msl appendFormat: @" %@4 color%u [[color(%u)]];", typeStr, caIdx, caIdx];
@ -343,7 +348,7 @@ id<MTLFunction> MVKCommandResourceFactory::newClearFragFunction(MVKRPSKeyClearAt
[msl appendFormat: @"fragment ClearColorsOut %@(VaryingsPos varyings [[stage_in]], constant ClearColorsIn& ccIn [[buffer(0)]]) {", funcName];
[msl appendLineMVK];
[msl appendLineMVK: @" ClearColorsOut ccOut;"];
for (uint32_t caIdx = 0; caIdx < kMVKClearAttachmentDepthStencilIndex; caIdx++) {
for (uint32_t caIdx = 0; caIdx < kMVKClearColorAttachmentCount; caIdx++) {
if (attKey.isAttachmentUsed(caIdx)) {
NSString* typeStr = getMTLFormatTypeString((MTLPixelFormat)attKey.attachmentMTLPixelFormats[caIdx]);
[msl appendFormat: @" ccOut.color%u = %@4(ccIn.colors[%u]);", caIdx, typeStr, caIdx];
@ -353,7 +358,7 @@ id<MTLFunction> MVKCommandResourceFactory::newClearFragFunction(MVKRPSKeyClearAt
[msl appendLineMVK: @" return ccOut;"];
[msl appendLineMVK: @"}"];
// MVKLogDebug("\n%s", msl.UTF8String);
// MVKLogInfo("\n%s", msl.UTF8String);
return newMTLFunction(msl, funcName);
}

View File

@ -19,7 +19,6 @@
#include "MVKBuffer.h"
#include "MVKCommandBuffer.h"
#include "MVKFoundation.h"
#include "MVKEnvironment.h"
#include "mvk_datatypes.hpp"
using namespace std;

View File

@ -120,6 +120,7 @@ public:
/** Encodes the descriptors in the descriptor set that are specified by this layout, */
void bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSet* descSet,
MVKShaderResourceBinding& dslMTLRezIdxOffsets,
MVKArrayRef<uint32_t> dynamicOffsets,
@ -127,6 +128,7 @@ public:
/** Encodes this binding layout and the specified descriptor on the specified command encoder immediately. */
void push(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
uint32_t& dstArrayElement,
uint32_t& descriptorCount,
uint32_t& descriptorsPushed,
@ -207,6 +209,7 @@ public:
/** Encodes this descriptor (based on its layout binding index) on the the command encoder. */
virtual void bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
@ -273,6 +276,7 @@ class MVKBufferDescriptor : public MVKDescriptor {
public:
void bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
@ -362,6 +366,7 @@ public:
VkDescriptorType getDescriptorType() override { return VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; }
void bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
@ -411,6 +416,7 @@ class MVKImageDescriptor : public MVKDescriptor {
public:
void bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
@ -491,6 +497,7 @@ class MVKSamplerDescriptorMixin {
protected:
void bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
@ -538,6 +545,7 @@ public:
VkDescriptorType getDescriptorType() override { return VK_DESCRIPTOR_TYPE_SAMPLER; }
void bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
@ -585,6 +593,7 @@ public:
VkDescriptorType getDescriptorType() override { return VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; }
void bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
@ -630,6 +639,7 @@ class MVKTexelBufferDescriptor : public MVKDescriptor {
public:
void bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],

View File

@ -20,6 +20,16 @@
#include "MVKDescriptorSet.h"
#include "MVKBuffer.h"
#define BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bind, pipelineBindPoint, stage, ...) \
do { \
if ((stage) == kMVKShaderStageCompute) { \
if ((cmdEncoder) && (pipelineBindPoint) == VK_PIPELINE_BIND_POINT_COMPUTE) \
(cmdEncoder)->_computeResourcesState.bind(__VA_ARGS__); \
} else { \
if ((cmdEncoder) && (pipelineBindPoint) == VK_PIPELINE_BIND_POINT_GRAPHICS) \
(cmdEncoder)->_graphicsResourcesState.bind(static_cast<MVKShaderStage>(stage), __VA_ARGS__); \
} \
} while (0)
#pragma mark MVKShaderStageResourceBinding
@ -195,6 +205,7 @@ uint32_t MVKDescriptorSetLayoutBinding::getDescriptorCount(MVKDescriptorSet* des
// A null cmdEncoder can be passed to perform a validation pass
void MVKDescriptorSetLayoutBinding::bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSet* descSet,
MVKShaderResourceBinding& dslMTLRezIdxOffsets,
MVKArrayRef<uint32_t> dynamicOffsets,
@ -208,7 +219,7 @@ void MVKDescriptorSetLayoutBinding::bind(MVKCommandEncoder* cmdEncoder,
for (uint32_t descIdx = 0; descIdx < descCnt; descIdx++) {
MVKDescriptor* mvkDesc = descSet->getDescriptor(getBinding(), descIdx);
if (mvkDesc->getDescriptorType() == descType) {
mvkDesc->bind(cmdEncoder, this, descIdx, _applyToStage, mtlIdxs, dynamicOffsets, dynamicOffsetIndex);
mvkDesc->bind(cmdEncoder, pipelineBindPoint, this, descIdx, _applyToStage, mtlIdxs, dynamicOffsets, dynamicOffsetIndex);
}
}
}
@ -220,6 +231,7 @@ static const T& get(const void* pData, size_t stride, uint32_t index) {
// A null cmdEncoder can be passed to perform a validation pass
void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
uint32_t& dstArrayElement,
uint32_t& descriptorCount,
uint32_t& descriptorsPushed,
@ -271,11 +283,7 @@ void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
if (_applyToStage[i]) {
bb.index = mtlIdxs.stages[i].bufferIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindBuffer(bb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindBuffer, pipelineBindPoint, i, bb);
}
}
break;
@ -289,11 +297,7 @@ void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
if (_applyToStage[i]) {
bb.index = mtlIdxs.stages[i].bufferIndex;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindBuffer(bb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindBuffer, pipelineBindPoint, i, bb);
}
}
break;
@ -318,18 +322,10 @@ void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
if (_applyToStage[i]) {
tb.index = mtlIdxs.stages[i].textureIndex + rezIdx + planeIndex;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindTexture(tb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindTexture, pipelineBindPoint, i, tb);
if (_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
bb.index = mtlIdxs.stages[i].bufferIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindBuffer(bb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindBuffer, pipelineBindPoint, i, bb);
}
}
}
@ -351,18 +347,10 @@ void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
if (_applyToStage[i]) {
tb.index = mtlIdxs.stages[i].textureIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindTexture(tb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindTexture, pipelineBindPoint, i, tb);
if (_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
bb.index = mtlIdxs.stages[i].bufferIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindBuffer(bb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindBuffer, pipelineBindPoint, i, bb);
}
}
}
@ -381,11 +369,7 @@ void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
if (_applyToStage[i]) {
sb.index = mtlIdxs.stages[i].samplerIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindSamplerState(sb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindSamplerState, pipelineBindPoint, i, sb);
}
}
break;
@ -410,13 +394,8 @@ void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
if (_applyToStage[i]) {
tb.index = mtlIdxs.stages[i].textureIndex + rezIdx + planeIndex;
sb.index = mtlIdxs.stages[i].samplerIndex + rezIdx;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindTexture(tb); }
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindSamplerState(sb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb); }
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindTexture, pipelineBindPoint, i, tb);
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindSamplerState, pipelineBindPoint, i, sb);
}
}
}
@ -742,6 +721,7 @@ MTLResourceUsage MVKDescriptor::getMTLResourceUsage() {
// A null cmdEncoder can be passed to perform a validation pass
void MVKBufferDescriptor::bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
@ -762,11 +742,7 @@ void MVKBufferDescriptor::bind(MVKCommandEncoder* cmdEncoder,
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
if (stages[i]) {
bb.index = mtlIndexes.stages[i].bufferIndex + elementIndex;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindBuffer(bb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindBuffer, pipelineBindPoint, i, bb);
}
}
}
@ -786,10 +762,8 @@ void MVKBufferDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCommandEncoder
atIndex: argIdx];
}
if (encodeUsage) {
rezEncState->encodeArgumentBufferResourceUsage(stage,
_mvkBuffer ? _mvkBuffer->getMTLBuffer() : nil,
getMTLResourceUsage(),
mvkDSLBind->getMTLRenderStages());
id<MTLBuffer> mtlBuffer = _mvkBuffer ? _mvkBuffer->getMTLBuffer() : nil;
rezEncState->encodeResourceUsage(stage, mtlBuffer, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
}
}
@ -836,6 +810,7 @@ void MVKBufferDescriptor::reset() {
// A null cmdEncoder can be passed to perform a validation pass
void MVKInlineUniformBlockDescriptor::bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
@ -852,11 +827,7 @@ void MVKInlineUniformBlockDescriptor::bind(MVKCommandEncoder* cmdEncoder,
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
if (stages[i]) {
bb.index = mtlIndexes.stages[i].bufferIndex;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindBuffer(bb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindBuffer, pipelineBindPoint, i, bb);
}
}
}
@ -876,10 +847,8 @@ void MVKInlineUniformBlockDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCo
atIndex: argIdx];
}
if (encodeUsage) {
rezEncState->encodeArgumentBufferResourceUsage(stage,
_mvkMTLBufferAllocation ? _mvkMTLBufferAllocation->_mtlBuffer : nil,
getMTLResourceUsage(),
mvkDSLBind->getMTLRenderStages());
id<MTLBuffer> mtlBuffer = _mvkMTLBufferAllocation ? _mvkMTLBufferAllocation->_mtlBuffer : nil;
rezEncState->encodeResourceUsage(stage, mtlBuffer, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
}
}
@ -927,6 +896,7 @@ void MVKInlineUniformBlockDescriptor::reset() {
// A null cmdEncoder can be passed to perform a validation pass
void MVKImageDescriptor::bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
@ -956,18 +926,10 @@ void MVKImageDescriptor::bind(MVKCommandEncoder* cmdEncoder,
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
if (stages[i]) {
tb.index = mtlIndexes.stages[i].textureIndex + elementIndex + planeIndex;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindTexture(tb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindTexture, pipelineBindPoint, i, tb);
if (descType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
bb.index = mtlIndexes.stages[i].bufferIndex + elementIndex + planeIndex;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindBuffer(bb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindBuffer, pipelineBindPoint, i, bb);
}
}
}
@ -994,7 +956,7 @@ void MVKImageDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCommandEncoderS
[mtlArgEncoder setTexture: mtlTexture atIndex: argIdx];
}
if (encodeUsage) {
rezEncState->encodeArgumentBufferResourceUsage(stage, mtlTexture, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
rezEncState->encodeResourceUsage(stage, mtlTexture, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
}
if (descType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
id<MTLTexture> mtlTex = mtlTexture.parentTexture ? mtlTexture.parentTexture : mtlTexture;
@ -1005,7 +967,7 @@ void MVKImageDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCommandEncoderS
[mtlArgEncoder setBuffer: mtlBuff offset: mtlTex.bufferOffset atIndex: argIdx];
}
if (encodeUsage) {
rezEncState->encodeArgumentBufferResourceUsage(stage, mtlBuff, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
rezEncState->encodeResourceUsage(stage, mtlBuff, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
}
}
}
@ -1052,6 +1014,7 @@ void MVKImageDescriptor::reset() {
// Metal validation requires each sampler in an array of samplers to be populated,
// even if not used, so populate a default if one hasn't been set.
void MVKSamplerDescriptorMixin::bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
@ -1069,11 +1032,7 @@ void MVKSamplerDescriptorMixin::bind(MVKCommandEncoder* cmdEncoder,
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
if (stages[i]) {
sb.index = mtlIndexes.stages[i].samplerIndex + elementIndex;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindSamplerState(sb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindSamplerState, pipelineBindPoint, i, sb);
}
}
}
@ -1140,13 +1099,14 @@ void MVKSamplerDescriptorMixin::reset() {
// A null cmdEncoder can be passed to perform a validation pass
void MVKSamplerDescriptor::bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
MVKShaderResourceBinding& mtlIndexes,
MVKArrayRef<uint32_t> dynamicOffsets,
uint32_t& dynamicOffsetIndex) {
MVKSamplerDescriptorMixin::bind(cmdEncoder, mvkDSLBind, elementIndex, stages, mtlIndexes, dynamicOffsets, dynamicOffsetIndex);
MVKSamplerDescriptorMixin::bind(cmdEncoder, pipelineBindPoint, mvkDSLBind, elementIndex, stages, mtlIndexes, dynamicOffsets, dynamicOffsetIndex);
}
void MVKSamplerDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCommandEncoderState* rezEncState,
@ -1189,14 +1149,15 @@ void MVKSamplerDescriptor::reset() {
// A null cmdEncoder can be passed to perform a validation pass
void MVKCombinedImageSamplerDescriptor::bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
MVKShaderResourceBinding& mtlIndexes,
MVKArrayRef<uint32_t> dynamicOffsets,
uint32_t& dynamicOffsetIndex) {
MVKImageDescriptor::bind(cmdEncoder, mvkDSLBind, elementIndex, stages, mtlIndexes, dynamicOffsets, dynamicOffsetIndex);
MVKSamplerDescriptorMixin::bind(cmdEncoder, mvkDSLBind, elementIndex, stages, mtlIndexes, dynamicOffsets, dynamicOffsetIndex);
MVKImageDescriptor::bind(cmdEncoder, pipelineBindPoint, mvkDSLBind, elementIndex, stages, mtlIndexes, dynamicOffsets, dynamicOffsetIndex);
MVKSamplerDescriptorMixin::bind(cmdEncoder, pipelineBindPoint, mvkDSLBind, elementIndex, stages, mtlIndexes, dynamicOffsets, dynamicOffsetIndex);
}
void MVKCombinedImageSamplerDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCommandEncoderState* rezEncState,
@ -1242,6 +1203,7 @@ void MVKCombinedImageSamplerDescriptor::reset() {
// A null cmdEncoder can be passed to perform a validation pass
void MVKTexelBufferDescriptor::bind(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKDescriptorSetLayoutBinding* mvkDSLBind,
uint32_t elementIndex,
bool stages[],
@ -1263,18 +1225,10 @@ void MVKTexelBufferDescriptor::bind(MVKCommandEncoder* cmdEncoder,
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
if (stages[i]) {
tb.index = mtlIndexes.stages[i].textureIndex + elementIndex;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindTexture(tb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindTexture, pipelineBindPoint, i, tb);
if (descType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
bb.index = mtlIndexes.stages[i].bufferIndex + elementIndex;
if (i == kMVKShaderStageCompute) {
if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindBuffer(bb); }
} else {
if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb); }
}
BIND_GRAPHICS_OR_COMPUTE(cmdEncoder, bindBuffer, pipelineBindPoint, i, bb);
}
}
}
@ -1294,7 +1248,7 @@ void MVKTexelBufferDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCommandEn
[mtlArgEncoder setTexture: mtlTexture atIndex: argIdx];
}
if (encodeUsage) {
rezEncState->encodeArgumentBufferResourceUsage(stage, mtlTexture, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
rezEncState->encodeResourceUsage(stage, mtlTexture, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
}
if (descType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
@ -1305,7 +1259,7 @@ void MVKTexelBufferDescriptor::encodeToMetalArgumentBuffer(MVKResourcesCommandEn
[mtlArgEncoder setBuffer: mtlBuff offset: mtlTexture.bufferOffset atIndex: argIdx];
}
if (encodeUsage) {
rezEncState->encodeArgumentBufferResourceUsage(stage, mtlBuff, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
rezEncState->encodeResourceUsage(stage, mtlBuff, getMTLResourceUsage(), mvkDSLBind->getMTLRenderStages());
}
}
}

View File

@ -81,6 +81,7 @@ public:
/** Encodes this descriptor set layout and the specified descriptor updates on the specified command encoder immediately. */
void pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKArrayRef<VkWriteDescriptorSet> descriptorWrites,
MVKShaderResourceBinding& dslMTLRezIdxOffsets);
@ -338,6 +339,9 @@ public:
/** Get the type of this template. */
VkDescriptorUpdateTemplateType getType() const;
/** Get the bind point of this template */
VkPipelineBindPoint getBindPoint() const { return _pipelineBindPoint; }
/** Constructs an instance for the specified device. */
MVKDescriptorUpdateTemplate(MVKDevice* device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo);
@ -347,6 +351,7 @@ public:
protected:
void propagateDebugName() override {}
VkPipelineBindPoint _pipelineBindPoint;
VkDescriptorUpdateTemplateType _type;
MVKSmallVector<VkDescriptorUpdateTemplateEntry, 1> _entries;
};

View File

@ -43,7 +43,7 @@ void MVKDescriptorSetLayout::bindDescriptorSet(MVKCommandEncoder* cmdEncoder,
dynamicOffsets, dynamicOffsetIndex); }
if ( !isUsingMetalArgumentBuffers() ) {
for (auto& dslBind : _bindings) {
dslBind.bind(cmdEncoder, descSet, dslMTLRezIdxOffsets, dynamicOffsets, dynamicOffsetIndex);
dslBind.bind(cmdEncoder, pipelineBindPoint, descSet, dslMTLRezIdxOffsets, dynamicOffsets, dynamicOffsetIndex);
}
}
}
@ -91,6 +91,7 @@ static const void* getWriteParameters(VkDescriptorType type, const VkDescriptorI
// A null cmdEncoder can be passed to perform a validation pass
void MVKDescriptorSetLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKArrayRef<VkWriteDescriptorSet> descriptorWrites,
MVKShaderResourceBinding& dslMTLRezIdxOffsets) {
@ -127,7 +128,7 @@ void MVKDescriptorSetLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
pBufferInfo, pTexelBufferView, pInlineUniformBlock, stride);
uint32_t descriptorsPushed = 0;
uint32_t bindIdx = _bindingToIndex[dstBinding];
_bindings[bindIdx].push(cmdEncoder, dstArrayElement, descriptorCount,
_bindings[bindIdx].push(cmdEncoder, pipelineBindPoint, dstArrayElement, descriptorCount,
descriptorsPushed, descWrite.descriptorType,
stride, pData, dslMTLRezIdxOffsets);
pBufferInfo += descriptorsPushed;
@ -148,6 +149,7 @@ void MVKDescriptorSetLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
return;
if (!cmdEncoder) { clearConfigurationResult(); }
VkPipelineBindPoint bindPoint = descUpdateTemplate->getBindPoint();
for (uint32_t i = 0; i < descUpdateTemplate->getNumberOfEntries(); i++) {
const VkDescriptorUpdateTemplateEntry* pEntry = descUpdateTemplate->getEntry(i);
uint32_t dstBinding = pEntry->dstBinding;
@ -161,7 +163,7 @@ void MVKDescriptorSetLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
if (!_bindingToIndex.count(dstBinding)) continue;
uint32_t descriptorsPushed = 0;
uint32_t bindIdx = _bindingToIndex[dstBinding];
_bindings[bindIdx].push(cmdEncoder, dstArrayElement, descriptorCount,
_bindings[bindIdx].push(cmdEncoder, bindPoint, dstArrayElement, descriptorCount,
descriptorsPushed, pEntry->descriptorType,
pEntry->stride, pCurData, dslMTLRezIdxOffsets);
pCurData = (const char*)pCurData + pEntry->stride * descriptorsPushed;
@ -876,7 +878,7 @@ VkDescriptorUpdateTemplateType MVKDescriptorUpdateTemplate::getType() const {
MVKDescriptorUpdateTemplate::MVKDescriptorUpdateTemplate(MVKDevice* device,
const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo) :
MVKVulkanAPIDeviceObject(device), _type(pCreateInfo->templateType) {
MVKVulkanAPIDeviceObject(device), _pipelineBindPoint(pCreateInfo->pipelineBindPoint), _type(pCreateInfo->templateType) {
for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; i++)
_entries.push_back(pCreateInfo->pDescriptorUpdateEntries[i]);

View File

@ -18,7 +18,6 @@
#pragma once
#include "MVKEnvironment.h"
#include "MVKFoundation.h"
#include "MVKVulkanAPIObject.h"
#include "MVKMTLResourceBindings.h"
@ -27,6 +26,7 @@
#include "MVKSmallVector.h"
#include "MVKPixelFormats.h"
#include "MVKOSExtensions.h"
#include "mvk_private_api.h"
#include "mvk_datatypes.hpp"
#include <string>
#include <mutex>
@ -51,6 +51,7 @@ class MVKDeviceMemory;
class MVKFence;
class MVKSemaphore;
class MVKTimelineSemaphore;
class MVKDeferredOperation;
class MVKEvent;
class MVKSemaphoreImpl;
class MVKQueryPool;
@ -63,6 +64,7 @@ class MVKSamplerYcbcrConversion;
class MVKDescriptorSetLayout;
class MVKDescriptorPool;
class MVKDescriptorUpdateTemplate;
class MVKResourcesCommandEncoderState;
class MVKFramebuffer;
class MVKRenderPass;
class MVKCommandPool;
@ -79,8 +81,8 @@ const static uint32_t kMVKQueueFamilyCount = 4;
const static uint32_t kMVKQueueCountPerQueueFamily = 1; // Must be 1. See comments in MVKPhysicalDevice::getQueueFamilies()
const static uint32_t kMVKMinSwapchainImageCount = 2;
const static uint32_t kMVKMaxSwapchainImageCount = 3;
const static uint32_t kMVKCachedViewportScissorCount = 16;
const static uint32_t kMVKCachedColorAttachmentCount = 8;
const static uint32_t kMVKMaxColorAttachmentCount = 8;
const static uint32_t kMVKMaxViewportScissorCount = 16;
const static uint32_t kMVKMaxDescriptorSetCount = SPIRV_CROSS_NAMESPACE::kMaxArgumentBuffers;
#if !MVK_XCODE_12
@ -137,7 +139,7 @@ public:
void getProperties(VkPhysicalDeviceProperties2* properties);
/** Returns the name of this device. */
inline const char* getName() { return _properties.deviceName; }
const char* getName() { return _properties.deviceName; }
/** Populates the specified structure with the format properties of this device. */
void getFormatProperties(VkFormat format, VkFormatProperties* pFormatProperties);
@ -284,7 +286,7 @@ public:
#pragma mark Memory models
/** Returns a pointer to the memory characteristics of this device. */
inline const VkPhysicalDeviceMemoryProperties* getMemoryProperties() { return &_memoryProperties; }
const VkPhysicalDeviceMemoryProperties* getMemoryProperties() { return &_memoryProperties; }
/** Populates the specified memory properties with the memory characteristics of this device. */
VkResult getMemoryProperties(VkPhysicalDeviceMemoryProperties* pMemoryProperties);
@ -296,31 +298,31 @@ public:
* Returns a bit mask of all memory type indices.
* Each bit [0..31] in the returned bit mask indicates a distinct memory type.
*/
inline uint32_t getAllMemoryTypes() { return _allMemoryTypes; }
uint32_t getAllMemoryTypes() { return _allMemoryTypes; }
/**
* Returns a bit mask of all memory type indices that allow host visibility to the memory.
* Each bit [0..31] in the returned bit mask indicates a distinct memory type.
*/
inline uint32_t getHostVisibleMemoryTypes() { return _hostVisibleMemoryTypes; }
uint32_t getHostVisibleMemoryTypes() { return _hostVisibleMemoryTypes; }
/**
* Returns a bit mask of all memory type indices that are coherent between host and device.
* Each bit [0..31] in the returned bit mask indicates a distinct memory type.
*/
inline uint32_t getHostCoherentMemoryTypes() { return _hostCoherentMemoryTypes; }
uint32_t getHostCoherentMemoryTypes() { return _hostCoherentMemoryTypes; }
/**
* Returns a bit mask of all memory type indices that do NOT allow host visibility to the memory.
* Each bit [0..31] in the returned bit mask indicates a distinct memory type.
*/
inline uint32_t getPrivateMemoryTypes() { return _privateMemoryTypes; }
uint32_t getPrivateMemoryTypes() { return _privateMemoryTypes; }
/**
* Returns a bit mask of all memory type indices that are lazily allocated.
* Each bit [0..31] in the returned bit mask indicates a distinct memory type.
*/
inline uint32_t getLazilyAllocatedMemoryTypes() { return _lazilyAllocatedMemoryTypes; }
uint32_t getLazilyAllocatedMemoryTypes() { return _lazilyAllocatedMemoryTypes; }
/** Returns whether this is a unified memory device. */
bool getHasUnifiedMemory();
@ -335,21 +337,13 @@ public:
#pragma mark Metal
/** Populates the specified structure with the Metal-specific features of this device. */
inline const MVKPhysicalDeviceMetalFeatures* getMetalFeatures() { return &_metalFeatures; }
const MVKPhysicalDeviceMetalFeatures* getMetalFeatures() { return &_metalFeatures; }
/** Returns whether or not vertex instancing can be used to implement multiview. */
inline bool canUseInstancingForMultiview() { return _metalFeatures.layeredRendering && _metalFeatures.deferredStoreActions; }
bool canUseInstancingForMultiview() { return _metalFeatures.layeredRendering && _metalFeatures.deferredStoreActions; }
/** Returns the underlying Metal device. */
inline id<MTLDevice> getMTLDevice() { return _mtlDevice; }
/*** Replaces the underlying Metal device .*/
inline void replaceMTLDevice(id<MTLDevice> mtlDevice) {
if (mtlDevice != _mtlDevice) {
[_mtlDevice release];
_mtlDevice = [mtlDevice retain];
}
}
id<MTLDevice> getMTLDevice() { return _mtlDevice; }
/** Returns whether the MSL version is supported on this device. */
bool mslVersionIsAtLeast(MTLLanguageVersion minVer) { return _metalFeatures.mslVersionEnum >= minVer; }
@ -386,7 +380,7 @@ public:
* Returns a reference to this object suitable for use as a Vulkan API handle.
* This is the compliment of the getMVKPhysicalDevice() method.
*/
inline VkPhysicalDevice getVkPhysicalDevice() { return (VkPhysicalDevice)getVkHandle(); }
VkPhysicalDevice getVkPhysicalDevice() { return (VkPhysicalDevice)getVkHandle(); }
/**
* Retrieves the MVKPhysicalDevice instance referenced by the VkPhysicalDevice handle.
@ -403,6 +397,7 @@ protected:
MTLFeatureSet getMaximalMTLFeatureSet();
void initMetalFeatures();
void initFeatures();
void initMTLDevice();
void initProperties();
void initLimits();
void initGPUInfoProperties();
@ -473,16 +468,16 @@ public:
MVKInstance* getInstance() override { return _physicalDevice->getInstance(); }
/** Returns the physical device underlying this logical device. */
inline MVKPhysicalDevice* getPhysicalDevice() { return _physicalDevice; }
MVKPhysicalDevice* getPhysicalDevice() { return _physicalDevice; }
/** Returns info about the pixel format supported by the physical device. */
inline MVKPixelFormats* getPixelFormats() { return &_physicalDevice->_pixelFormats; }
MVKPixelFormats* getPixelFormats() { return &_physicalDevice->_pixelFormats; }
/** Returns the name of this device. */
inline const char* getName() { return _pProperties->deviceName; }
const char* getName() { return _pProperties->deviceName; }
/** Returns the common resource factory for creating command resources. */
inline MVKCommandResourceFactory* getCommandResourceFactory() { return _commandResourceFactory; }
MVKCommandResourceFactory* getCommandResourceFactory() { return _commandResourceFactory; }
/** Returns the function pointer corresponding to the specified named entry point. */
PFN_vkVoidFunction getProcAddr(const char* pName);
@ -563,6 +558,10 @@ public:
const VkAllocationCallbacks* pAllocator);
void destroySemaphore(MVKSemaphore* mvkSem4,
const VkAllocationCallbacks* pAllocator);
MVKDeferredOperation* createDeferredOperation(const VkAllocationCallbacks* pAllocator);
void destroyDeferredOperation(MVKDeferredOperation* mvkDeferredOperation,
const VkAllocationCallbacks* pAllocator);
MVKEvent* createEvent(const VkEventCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator);
@ -630,6 +629,8 @@ public:
MVKFramebuffer* createFramebuffer(const VkFramebufferCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator);
MVKFramebuffer* createFramebuffer(const VkRenderingInfo* pRenderingInfo,
const VkAllocationCallbacks* pAllocator);
void destroyFramebuffer(MVKFramebuffer* mvkFB,
const VkAllocationCallbacks* pAllocator);
@ -637,6 +638,8 @@ public:
const VkAllocationCallbacks* pAllocator);
MVKRenderPass* createRenderPass(const VkRenderPassCreateInfo2* pCreateInfo,
const VkAllocationCallbacks* pAllocator);
MVKRenderPass* createRenderPass(const VkRenderingInfo* pRenderingInfo,
const VkAllocationCallbacks* pAllocator);
void destroyRenderPass(MVKRenderPass* mvkRP,
const VkAllocationCallbacks* pAllocator);
@ -660,6 +663,22 @@ public:
#pragma mark Operations
/** Tell the GPU to be ready to use any of the GPU-addressable buffers. */
void encodeGPUAddressableBuffers(MVKResourcesCommandEncoderState* rezEncState,
MVKShaderStage stage);
/** Adds the specified host semaphore to be woken upon device loss. */
void addSemaphore(MVKSemaphoreImpl* sem4);
/** Removes the specified host semaphore. */
void removeSemaphore(MVKSemaphoreImpl* sem4);
/** Adds the specified timeline semaphore to be woken at the specified value upon device loss. */
void addTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value);
/** Removes the specified timeline semaphore. */
void removeTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value);
/** Applies the specified global memory barrier to all resource issued by this device. */
void applyMemoryBarrier(VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
@ -677,7 +696,7 @@ public:
* number of nanoseconds between the two calls. The convenience function mvkGetElapsedMilliseconds()
* can be used to perform this calculation.
*/
inline uint64_t getPerformanceTimestamp() { return _isPerformanceTracking ? mvkGetTimestamp() : 0; }
uint64_t getPerformanceTimestamp() { return _isPerformanceTracking ? mvkGetTimestamp() : 0; }
/**
* If performance is being tracked, adds the performance for an activity with a duration
@ -685,8 +704,8 @@ public:
*
* If endTime is zero or not supplied, the current time is used.
*/
inline void addActivityPerformance(MVKPerformanceTracker& activityTracker,
uint64_t startTime, uint64_t endTime = 0) {
void addActivityPerformance(MVKPerformanceTracker& activityTracker,
uint64_t startTime, uint64_t endTime = 0) {
if (_isPerformanceTracking) {
updateActivityPerformance(activityTracker, startTime, endTime);
@ -720,7 +739,7 @@ public:
#pragma mark Metal
/** Returns the underlying Metal device. */
inline id<MTLDevice> getMTLDevice() { return _physicalDevice->getMTLDevice(); }
id<MTLDevice> getMTLDevice() { return _physicalDevice->getMTLDevice(); }
/** Returns whether this device is using Metal argument buffers. */
bool isUsingMetalArgumentBuffers() { return _isUsingMetalArgumentBuffers; };
@ -796,7 +815,7 @@ public:
void stopAutoGPUCapture(MVKConfigAutoGPUCaptureScope autoGPUCaptureScope);
/** Returns whether this instance is currently automatically capturing a GPU trace. */
inline bool isCurrentlyAutoGPUCapturing() { return _isCurrentlyAutoGPUCapturing; }
bool isCurrentlyAutoGPUCapturing() { return _isCurrentlyAutoGPUCapturing; }
/** Returns the Metal objects underpinning the Vulkan objects indicated in the pNext chain of pMetalObjectsInfo. */
void getMetalObjects(VkExportMetalObjectsInfoEXT* pMetalObjectsInfo);
@ -844,7 +863,7 @@ public:
* Returns a reference to this object suitable for use as a Vulkan API handle.
* This is the compliment of the getMVKDevice() method.
*/
inline VkDevice getVkDevice() { return (VkDevice)getVkHandle(); }
VkDevice getVkDevice() { return (VkDevice)getVkHandle(); }
/**
* Retrieves the MVKDevice instance referenced by the VkDevice handle.
@ -855,26 +874,18 @@ public:
}
protected:
friend class MVKSemaphoreEmulated;
friend class MVKTimelineSemaphoreMTLEvent;
friend class MVKTimelineSemaphoreEmulated;
friend class MVKFence;
friend class MVKEventEmulated;
void propagateDebugName() override {}
MVKResource* addResource(MVKResource* rez);
MVKResource* removeResource(MVKResource* rez);
void addSemaphore(MVKSemaphoreImpl* sem4);
void removeSemaphore(MVKSemaphoreImpl* sem4);
void addTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value);
void removeTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value);
MVKBuffer* addBuffer(MVKBuffer* mvkBuff);
MVKBuffer* removeBuffer(MVKBuffer* mvkBuff);
MVKImage* addImage(MVKImage* mvkImg);
MVKImage* removeImage(MVKImage* mvkImg);
void initPerformanceTracking();
void initPhysicalDevice(MVKPhysicalDevice* physicalDevice, const VkDeviceCreateInfo* pCreateInfo);
void initQueues(const VkDeviceCreateInfo* pCreateInfo);
void reservePrivateData(const VkDeviceCreateInfo* pCreateInfo);
void enableFeatures(const VkDeviceCreateInfo* pCreateInfo);
void enableFeatures(VkBaseInStructure* pEnabled, const VkBaseInStructure* pRequested, const VkBaseInStructure* pAvailable, uint32_t count);
void enableFeatures(VkBool32* pEnabledBools, const VkBool32* pRequestedBools, const VkBool32* pAvailableBools, uint32_t count);
template<typename S> void enableFeatures(S* pEnabled, const S* pRequested, const S* pAvailable, uint32_t count);
template<typename S> void enableFeatures(S* pRequested, VkBool32* pEnabledBools, const VkBool32* pRequestedBools, const VkBool32* pAvailableBools, uint32_t count);
void enableExtensions(const VkDeviceCreateInfo* pCreateInfo);
const char* getActivityPerformanceDescription(MVKPerformanceTracker& activity, MVKPerformanceStatistics& perfStats);
void logActivityPerformance(MVKPerformanceTracker& activity, MVKPerformanceStatistics& perfStats, bool isInline = false);
@ -887,6 +898,7 @@ protected:
MVKCommandResourceFactory* _commandResourceFactory = nullptr;
MVKSmallVector<MVKSmallVector<MVKQueue*, kMVKQueueCountPerQueueFamily>, kMVKQueueFamilyCount> _queuesByQueueFamilyIndex;
MVKSmallVector<MVKResource*, 256> _resources;
MVKSmallVector<MVKBuffer*, 8> _gpuAddressableBuffers;
MVKSmallVector<MVKPrivateDataSlot*> _privateDataSlots;
MVKSmallVector<bool> _privateDataSlotsAvailability;
MVKSmallVector<MVKSemaphoreImpl*> _awaitingSemaphores;
@ -1050,6 +1062,14 @@ protected:
/** Returns the registry ID of the specified device, or zero if the device does not have a registry ID. */
uint64_t mvkGetRegistryID(id<MTLDevice> mtlDevice);
/**
* Returns a value identifying the physical location of the specified device.
* The returned value is a hash of the location, locationNumber, peerGroupID,
* and peerIndex properties of the device. On devices with only one built-in GPU,
* the returned value will be zero.
*/
uint64_t mvkGetLocationID(id<MTLDevice> mtlDevice);
/** Returns whether the MTLDevice supports BC texture compression. */
bool mvkSupportsBCTextureCompression(id<MTLDevice> mtlDevice);

View File

@ -32,7 +32,7 @@
#include "MVKCommandPool.h"
#include "MVKFoundation.h"
#include "MVKCodec.h"
#include "MVKEnvironment.h"
#include "MVKStrings.h"
#include <MoltenVKShaderConverter/SPIRVToMSLConverter.h>
#import "CAMetalLayer+MoltenVK.h"
@ -407,6 +407,11 @@ void MVKPhysicalDevice::getFeatures(VkPhysicalDeviceFeatures2* features) {
atomicFloatFeatures->sparseImageFloat32AtomicAdd = false;
break;
}
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT: {
auto* demoteFeatures = (VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT*)next;
demoteFeatures->shaderDemoteToHelperInvocation = mvkOSVersionIsAtLeast(11.0, 14.0);
break;
}
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SWAPCHAIN_MAINTENANCE_1_FEATURES_EXT: {
auto* swapchainMaintenance1Features = (VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT*)next;
swapchainMaintenance1Features->swapchainMaintenance1 = true;
@ -727,15 +732,21 @@ void MVKPhysicalDevice::getProperties(VkPhysicalDeviceProperties2* properties) {
}
}
// Since these are uint8_t arrays, use Big-Endian byte ordering,
// so a hex dump of the array is human readable in its parts.
void MVKPhysicalDevice::populateDeviceIDProperties(VkPhysicalDeviceVulkan11Properties* pVk11Props) {
uint8_t* uuid;
size_t uuidComponentOffset;
// ---- Device ID ----------------------------------------------
// ---- Device UUID ----------------------------------------------
uuid = pVk11Props->deviceUUID;
uuidComponentOffset = 0;
mvkClear(uuid, VK_UUID_SIZE);
// From Vulkan spec: deviceUUID must be universally unique for the device,
// AND must be immutable for a given device across instances, processes,
// driver APIs, driver versions, and system reboots.
// First 4 bytes contains GPU vendor ID
uint32_t vendorID = _properties.vendorID;
*(uint32_t*)&uuid[uuidComponentOffset] = NSSwapHostIntToBig(vendorID);
@ -746,10 +757,10 @@ void MVKPhysicalDevice::populateDeviceIDProperties(VkPhysicalDeviceVulkan11Prope
*(uint32_t*)&uuid[uuidComponentOffset] = NSSwapHostIntToBig(deviceID);
uuidComponentOffset += sizeof(deviceID);
// Last 8 bytes contain the GPU registry ID
uint64_t regID = mvkGetRegistryID(_mtlDevice);
*(uint64_t*)&uuid[uuidComponentOffset] = NSSwapHostLongLongToBig(regID);
uuidComponentOffset += sizeof(regID);
// Last 8 bytes contain the GPU location identifier
uint64_t locID = mvkGetLocationID(_mtlDevice);
*(uint64_t*)&uuid[uuidComponentOffset] = NSSwapHostLongLongToBig(locID);
uuidComponentOffset += sizeof(locID);
// ---- Driver ID ----------------------------------------------
uuid = pVk11Props->driverUUID;
@ -772,10 +783,10 @@ void MVKPhysicalDevice::populateDeviceIDProperties(VkPhysicalDeviceVulkan11Prope
*(uint32_t*)&uuid[uuidComponentOffset] = NSSwapHostIntToBig(gpuCap);
uuidComponentOffset += sizeof(gpuCap);
// ---- LUID ignored for Metal devices ------------------------
mvkClear(pVk11Props->deviceLUID, VK_LUID_SIZE);
pVk11Props->deviceNodeMask = 0;
pVk11Props->deviceLUIDValid = VK_FALSE;
// ---- Device LUID ------------------------
*(uint64_t*)pVk11Props->deviceLUID = NSSwapHostLongLongToBig(mvkGetRegistryID(_mtlDevice));
pVk11Props->deviceNodeMask = 1; // Per Vulkan spec
pVk11Props->deviceLUIDValid = VK_TRUE;
}
void MVKPhysicalDevice::populateSubgroupProperties(VkPhysicalDeviceVulkan11Properties* pVk11Props) {
@ -1585,6 +1596,7 @@ MVKPhysicalDevice::MVKPhysicalDevice(MVKInstance* mvkInstance, id<MTLDevice> mtl
_supportedExtensions(this, true),
_pixelFormats(this) { // Set after _mtlDevice
initMTLDevice();
initProperties(); // Call first.
initMetalFeatures(); // Call second.
initFeatures(); // Call third.
@ -1597,6 +1609,15 @@ MVKPhysicalDevice::MVKPhysicalDevice(MVKInstance* mvkInstance, id<MTLDevice> mtl
logGPUInfo();
}
void MVKPhysicalDevice::initMTLDevice() {
#if MVK_XCODE_14_3 && MVK_MACOS && !MVK_MACCAT
if ([_mtlDevice respondsToSelector: @selector(setShouldMaximizeConcurrentCompilation:)]) {
[_mtlDevice setShouldMaximizeConcurrentCompilation: mvkConfig().shouldMaximizeConcurrentCompilation];
MVKLogInfoIf(mvkConfig().debugMode, "maximumConcurrentCompilationTaskCount %lu", _mtlDevice.maximumConcurrentCompilationTaskCount);
}
#endif
}
// Initializes the physical device properties (except limits).
void MVKPhysicalDevice::initProperties() {
mvkClear(&_properties); // Start with everything cleared
@ -1803,7 +1824,11 @@ void MVKPhysicalDevice::initMetalFeatures() {
if ( mvkOSVersionIsAtLeast(13.0) ) {
_metalFeatures.mslVersionEnum = MTLLanguageVersion2_2;
_metalFeatures.placementHeaps = mvkConfig().useMTLHeap;
#if MVK_OS_SIMULATOR
_metalFeatures.nativeTextureSwizzle = false;
#else
_metalFeatures.nativeTextureSwizzle = true;
#endif
if (supportsMTLGPUFamily(Apple3)) {
_metalFeatures.native3DCompressedTextures = true;
}
@ -2093,7 +2118,7 @@ void MVKPhysicalDevice::initMetalFeatures() {
// and a wider combination of GPU's on older macOS versions is under way.
#if MVK_MACOS
_metalFeatures.descriptorSetArgumentBuffers = (_metalFeatures.argumentBuffers &&
(mvkOSVersionIsAtLeast(10.16) ||
(mvkOSVersionIsAtLeast(11.0) ||
_properties.vendorID == kIntelVendorId));
#endif
// Currently, if we don't support descriptor set argument buffers, we can't support argument buffers.
@ -2197,9 +2222,13 @@ void MVKPhysicalDevice::initFeatures() {
_features.dualSrcBlend = true;
}
#if MVK_OS_SIMULATOR
_features.depthClamp = false;
#else
if (supportsMTLFeatureSet(iOS_GPUFamily2_v4)) {
_features.depthClamp = true;
}
#endif
if (supportsMTLFeatureSet(iOS_GPUFamily3_v2)) {
_features.tessellationShader = true;
@ -2269,17 +2298,17 @@ void MVKPhysicalDevice::initFeatures() {
void MVKPhysicalDevice::initLimits() {
#if MVK_TVOS
_properties.limits.maxColorAttachments = kMVKCachedColorAttachmentCount;
_properties.limits.maxColorAttachments = kMVKMaxColorAttachmentCount;
#endif
#if MVK_IOS
if (supportsMTLFeatureSet(iOS_GPUFamily2_v1)) {
_properties.limits.maxColorAttachments = kMVKCachedColorAttachmentCount;
_properties.limits.maxColorAttachments = kMVKMaxColorAttachmentCount;
} else {
_properties.limits.maxColorAttachments = 4; // < kMVKCachedColorAttachmentCount
_properties.limits.maxColorAttachments = 4; // < kMVKMaxColorAttachmentCount
}
#endif
#if MVK_MACOS
_properties.limits.maxColorAttachments = kMVKCachedColorAttachmentCount;
_properties.limits.maxColorAttachments = kMVKMaxColorAttachmentCount;
#endif
_properties.limits.maxFragmentOutputAttachments = _properties.limits.maxColorAttachments;
@ -2309,7 +2338,7 @@ void MVKPhysicalDevice::initLimits() {
float maxVPDim = max(_properties.limits.maxViewportDimensions[0], _properties.limits.maxViewportDimensions[1]);
_properties.limits.viewportBoundsRange[0] = (-2.0 * maxVPDim);
_properties.limits.viewportBoundsRange[1] = (2.0 * maxVPDim) - 1;
_properties.limits.maxViewports = _features.multiViewport ? kMVKCachedViewportScissorCount : 1;
_properties.limits.maxViewports = _features.multiViewport ? kMVKMaxViewportScissorCount : 1;
_properties.limits.maxImageDimension3D = _metalFeatures.maxTextureLayers;
_properties.limits.maxImageArrayLayers = _metalFeatures.maxTextureLayers;
@ -2547,7 +2576,7 @@ void MVKPhysicalDevice::initLimits() {
_properties.limits.pointSizeGranularity = 1;
_properties.limits.lineWidthRange[0] = 1;
_properties.limits.lineWidthRange[1] = 1;
_properties.limits.lineWidthGranularity = 0;
_properties.limits.lineWidthGranularity = 1;
_properties.limits.standardSampleLocations = VK_TRUE;
_properties.limits.strictLines = _properties.vendorID == kIntelVendorId || _properties.vendorID == kNVVendorId;
@ -2724,6 +2753,8 @@ void MVKPhysicalDevice::initGPUInfoProperties() {
}
#endif //MVK_IOS_OR_TVOS
// Since this is a uint8_t array, use Big-Endian byte ordering,
// so a hex dump of the array is human readable in its parts.
void MVKPhysicalDevice::initPipelineCacheUUID() {
// Clear the UUID
@ -3034,11 +3065,6 @@ void MVKPhysicalDevice::initExtensions() {
MVKExtensionList* pWritableExtns = (MVKExtensionList*)&_supportedExtensions;
pWritableExtns->disableAllButEnabledDeviceExtensions();
#if MVK_IOS_OR_TVOS
if (!_metalFeatures.depthResolve) {
pWritableExtns->vk_KHR_depth_stencil_resolve.enabled = false;
}
#endif
if (!_metalFeatures.samplerMirrorClampToEdge) {
pWritableExtns->vk_KHR_sampler_mirror_clamp_to_edge.enabled = false;
}
@ -3545,15 +3571,14 @@ uint32_t MVKDevice::getVulkanMemoryTypeIndex(MTLStorageMode mtlStorageMode) {
MVKBuffer* MVKDevice::createBuffer(const VkBufferCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator) {
return (MVKBuffer*)addResource(new MVKBuffer(this, pCreateInfo));
return addBuffer(new MVKBuffer(this, pCreateInfo));
}
void MVKDevice::destroyBuffer(MVKBuffer* mvkBuff,
const VkAllocationCallbacks* pAllocator) {
if (mvkBuff) {
removeResource(mvkBuff);
mvkBuff->destroy();
}
if ( !mvkBuff ) { return; }
removeBuffer(mvkBuff);
mvkBuff->destroy();
}
MVKBufferView* MVKDevice::createBufferView(const VkBufferViewCreateInfo* pCreateInfo,
@ -3582,20 +3607,14 @@ MVKImage* MVKDevice::createImage(const VkImageCreateInfo* pCreateInfo,
MVKImage* mvkImg = (swapchainInfo)
? new MVKPeerSwapchainImage(this, pCreateInfo, (MVKSwapchain*)swapchainInfo->swapchain, uint32_t(-1))
: new MVKImage(this, pCreateInfo);
for (auto& memoryBinding : mvkImg->_memoryBindings) {
addResource(memoryBinding);
}
return mvkImg;
return addImage(mvkImg);
}
void MVKDevice::destroyImage(MVKImage* mvkImg,
const VkAllocationCallbacks* pAllocator) {
if (mvkImg) {
for (auto& memoryBinding : mvkImg->_memoryBindings) {
removeResource(memoryBinding);
}
mvkImg->destroy();
}
if ( !mvkImg ) { return; }
removeImage(mvkImg);
mvkImg->destroy();
}
MVKImageView* MVKDevice::createImageView(const VkImageViewCreateInfo* pCreateInfo,
@ -3610,20 +3629,6 @@ void MVKDevice::destroyImageView(MVKImageView* mvkImgView,
MVKSwapchain* MVKDevice::createSwapchain(const VkSwapchainCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator) {
#if MVK_MACOS
// If we have selected a high-power GPU and want to force the window system
// to use it, force the window system to use a high-power GPU by calling the
// MTLCreateSystemDefaultDevice function, and if that GPU is the same as the
// selected GPU, update the MTLDevice instance used by the MVKPhysicalDevice.
id<MTLDevice> mtlDevice = _physicalDevice->getMTLDevice();
if (mvkConfig().switchSystemGPU && !(mtlDevice.isLowPower || mtlDevice.isHeadless) ) {
id<MTLDevice> sysMTLDevice = MTLCreateSystemDefaultDevice();
if (mvkGetRegistryID(sysMTLDevice) == mvkGetRegistryID(mtlDevice)) {
_physicalDevice->replaceMTLDevice(sysMTLDevice);
}
}
#endif
return new MVKSwapchain(this, pCreateInfo);
}
@ -3636,22 +3641,16 @@ MVKPresentableSwapchainImage* MVKDevice::createPresentableSwapchainImage(const V
MVKSwapchain* swapchain,
uint32_t swapchainIndex,
const VkAllocationCallbacks* pAllocator) {
MVKPresentableSwapchainImage* mvkImg = new MVKPresentableSwapchainImage(this, pCreateInfo,
swapchain, swapchainIndex);
for (auto& memoryBinding : mvkImg->_memoryBindings) {
addResource(memoryBinding);
}
return mvkImg;
auto* pImg = new MVKPresentableSwapchainImage(this, pCreateInfo, swapchain, swapchainIndex);
addImage(pImg);
return pImg;
}
void MVKDevice::destroyPresentableSwapchainImage(MVKPresentableSwapchainImage* mvkImg,
const VkAllocationCallbacks* pAllocator) {
if (mvkImg) {
for (auto& memoryBinding : mvkImg->_memoryBindings) {
removeResource(memoryBinding);
}
mvkImg->destroy();
}
if ( !mvkImg ) { return; }
removeImage(mvkImg);
mvkImg->destroy();
}
MVKFence* MVKDevice::createFence(const VkFenceCreateInfo* pCreateInfo,
@ -3705,6 +3704,15 @@ void MVKDevice::destroySemaphore(MVKSemaphore* mvkSem4,
if (mvkSem4) { mvkSem4->destroy(); }
}
MVKDeferredOperation* MVKDevice::createDeferredOperation(const VkAllocationCallbacks* pAllocator) {
return new MVKDeferredOperation(this);
}
void MVKDevice::destroyDeferredOperation(MVKDeferredOperation* mvkDeferredOperation,
const VkAllocationCallbacks* pAllocator) {
if(mvkDeferredOperation) { mvkDeferredOperation->destroy(); }
}
MVKEvent* MVKDevice::createEvent(const VkEventCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator) {
const VkExportMetalObjectCreateInfoEXT* pExportInfo = nullptr;
@ -3906,6 +3914,11 @@ MVKFramebuffer* MVKDevice::createFramebuffer(const VkFramebufferCreateInfo* pCre
return new MVKFramebuffer(this, pCreateInfo);
}
MVKFramebuffer* MVKDevice::createFramebuffer(const VkRenderingInfo* pRenderingInfo,
const VkAllocationCallbacks* pAllocator) {
return new MVKFramebuffer(this, pRenderingInfo);
}
void MVKDevice::destroyFramebuffer(MVKFramebuffer* mvkFB,
const VkAllocationCallbacks* pAllocator) {
if (mvkFB) { mvkFB->destroy(); }
@ -3921,6 +3934,11 @@ MVKRenderPass* MVKDevice::createRenderPass(const VkRenderPassCreateInfo2* pCreat
return new MVKRenderPass(this, pCreateInfo);
}
MVKRenderPass* MVKDevice::createRenderPass(const VkRenderingInfo* pRenderingInfo,
const VkAllocationCallbacks* pAllocator) {
return new MVKRenderPass(this, pRenderingInfo);
}
void MVKDevice::destroyRenderPass(MVKRenderPass* mvkRP,
const VkAllocationCallbacks* pAllocator) {
if (mvkRP) { mvkRP->destroy(); }
@ -3987,42 +4005,79 @@ void MVKDevice::destroyPrivateDataSlot(VkPrivateDataSlotEXT privateDataSlot,
mvkPDS->destroy();
}
#pragma mark Operations
// Adds the specified resource for tracking, and returns the added resource.
MVKResource* MVKDevice::addResource(MVKResource* rez) {
// If the underlying MTLBuffer is referenced in a shader only via its gpuAddress,
// the GPU might not be aware that the MTLBuffer needs to be made resident.
// Track the buffer as needing to be made resident if a shader is bound that uses
// PhysicalStorageBufferAddresses to access the contents of the underlying MTLBuffer.
MVKBuffer* MVKDevice::addBuffer(MVKBuffer* mvkBuff) {
if ( !mvkBuff ) { return mvkBuff; }
lock_guard<mutex> lock(_rezLock);
_resources.push_back(rez);
return rez;
_resources.push_back(mvkBuff);
if (mvkIsAnyFlagEnabled(mvkBuff->getUsage(), VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT)) {
_gpuAddressableBuffers.push_back(mvkBuff);
}
return mvkBuff;
}
// Removes the specified resource for tracking and returns the removed resource.
MVKResource* MVKDevice::removeResource(MVKResource* rez) {
MVKBuffer* MVKDevice::removeBuffer(MVKBuffer* mvkBuff) {
if ( !mvkBuff ) { return mvkBuff; }
lock_guard<mutex> lock(_rezLock);
mvkRemoveFirstOccurance(_resources, rez);
return rez;
mvkRemoveFirstOccurance(_resources, mvkBuff);
if (mvkIsAnyFlagEnabled(mvkBuff->getUsage(), VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT)) {
mvkRemoveFirstOccurance(_gpuAddressableBuffers, mvkBuff);
}
return mvkBuff;
}
void MVKDevice::encodeGPUAddressableBuffers(MVKResourcesCommandEncoderState* rezEncState, MVKShaderStage stage) {
MTLResourceUsage mtlUsage = MTLResourceUsageRead | MTLResourceUsageWrite;
MTLRenderStages mtlRendStage = (stage == kMVKShaderStageFragment) ? MTLRenderStageFragment : MTLRenderStageVertex;
lock_guard<mutex> lock(_rezLock);
for (auto& buff : _gpuAddressableBuffers) {
rezEncState->encodeResourceUsage(stage, buff->getMTLBuffer(), mtlUsage, mtlRendStage);
}
}
MVKImage* MVKDevice::addImage(MVKImage* mvkImg) {
if ( !mvkImg ) { return mvkImg; }
lock_guard<mutex> lock(_rezLock);
for (auto& mb : mvkImg->_memoryBindings) {
_resources.push_back(mb);
}
return mvkImg;
}
MVKImage* MVKDevice::removeImage(MVKImage* mvkImg) {
if ( !mvkImg ) { return mvkImg; }
lock_guard<mutex> lock(_rezLock);
for (auto& mb : mvkImg->_memoryBindings) {
mvkRemoveFirstOccurance(_resources, mb);
}
return mvkImg;
}
// Adds the specified host semaphore to be woken upon device loss.
void MVKDevice::addSemaphore(MVKSemaphoreImpl* sem4) {
lock_guard<mutex> lock(_sem4Lock);
_awaitingSemaphores.push_back(sem4);
}
// Removes the specified host semaphore.
void MVKDevice::removeSemaphore(MVKSemaphoreImpl* sem4) {
lock_guard<mutex> lock(_sem4Lock);
mvkRemoveFirstOccurance(_awaitingSemaphores, sem4);
}
// Adds the specified timeline semaphore to be woken at the specified value upon device loss.
void MVKDevice::addTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value) {
lock_guard<mutex> lock(_sem4Lock);
_awaitingTimelineSem4s.emplace_back(sem4, value);
}
// Removes the specified timeline semaphore.
void MVKDevice::removeTimelineSemaphore(MVKTimelineSemaphore* sem4, uint64_t value) {
lock_guard<mutex> lock(_sem4Lock);
mvkRemoveFirstOccurance(_awaitingTimelineSem4s, make_pair(sem4, value));
@ -4069,7 +4124,6 @@ void MVKDevice::logActivityPerformance(MVKPerformanceTracker& activity, MVKPerfo
}
void MVKDevice::logPerformanceSummary() {
if (_activityPerformanceLoggingStyle == MVK_CONFIG_ACTIVITY_PERFORMANCE_LOGGING_STYLE_IMMEDIATE) { return; }
// Get a copy to minimize time under lock
MVKPerformanceStatistics perfStats;
@ -4429,6 +4483,18 @@ MVKDevice::MVKDevice(MVKPhysicalDevice* physicalDevice, const VkDeviceCreateInfo
initQueues(pCreateInfo);
reservePrivateData(pCreateInfo);
#if MVK_MACOS
// After enableExtensions
// If the VK_KHR_swapchain extension is enabled, we expect to render to the screen.
// In a multi-GPU system, if we are using the high-power GPU and want the window system
// to also use that GPU to avoid copying content between GPUs, force the window system
// to use the high-power GPU by calling the MTLCreateSystemDefaultDevice() function.
if (_enabledExtensions.vk_KHR_swapchain.enabled && mvkConfig().switchSystemGPU &&
!(_physicalDevice->_mtlDevice.isLowPower || _physicalDevice->_mtlDevice.isHeadless) ) {
MTLCreateSystemDefaultDevice();
}
#endif
// After enableExtensions && enableFeatures
// Use Metal arg buffs if available, and either config wants them always,
// or config wants them with descriptor indexing and descriptor indexing has been enabled.
@ -4549,7 +4615,8 @@ void MVKDevice::enableFeatures(const VkDeviceCreateInfo* pCreateInfo) {
//Enable device features based on requested and available features,
// including extended features that are requested in the pNext chain.
if (pCreateInfo->pEnabledFeatures) {
enableFeatures(&_enabledFeatures.robustBufferAccess,
enableFeatures(pCreateInfo->pEnabledFeatures,
&_enabledFeatures.robustBufferAccess,
&pCreateInfo->pEnabledFeatures->robustBufferAccess,
&pdFeats2.features.robustBufferAccess, 55);
}
@ -4558,29 +4625,36 @@ void MVKDevice::enableFeatures(const VkDeviceCreateInfo* pCreateInfo) {
switch ((uint32_t)next->sType) {
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
auto* requestedFeatures = (VkPhysicalDeviceFeatures2*)next;
enableFeatures(&_enabledFeatures.robustBufferAccess,
enableFeatures(requestedFeatures,
&_enabledFeatures.robustBufferAccess,
&requestedFeatures->features.robustBufferAccess,
&pdFeats2.features.robustBufferAccess, 55);
break;
}
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: {
auto* requestedFeatures = (VkPhysicalDeviceVulkan11Features*)next;
enableFeatures(&_enabled16BitStorageFeatures.storageBuffer16BitAccess,
enableFeatures(requestedFeatures,
&_enabled16BitStorageFeatures.storageBuffer16BitAccess,
&requestedFeatures->storageBuffer16BitAccess,
&pd16BitStorageFeatures.storageBuffer16BitAccess, 4);
enableFeatures(&_enabledMultiviewFeatures.multiview,
enableFeatures(requestedFeatures,
&_enabledMultiviewFeatures.multiview,
&requestedFeatures->multiview,
&pdMultiviewFeatures.multiview, 3);
enableFeatures(&_enabledVariablePointerFeatures.variablePointersStorageBuffer,
enableFeatures(requestedFeatures,
&_enabledVariablePointerFeatures.variablePointersStorageBuffer,
&requestedFeatures->variablePointersStorageBuffer,
&pdVariablePointerFeatures.variablePointersStorageBuffer, 2);
enableFeatures(&_enabledProtectedMemoryFeatures.protectedMemory,
enableFeatures(requestedFeatures,
&_enabledProtectedMemoryFeatures.protectedMemory,
&requestedFeatures->protectedMemory,
&pdProtectedMemoryFeatures.protectedMemory, 1);
enableFeatures(&_enabledSamplerYcbcrConversionFeatures.samplerYcbcrConversion,
enableFeatures(requestedFeatures,
&_enabledSamplerYcbcrConversionFeatures.samplerYcbcrConversion,
&requestedFeatures->samplerYcbcrConversion,
&pdSamplerYcbcrConversionFeatures.samplerYcbcrConversion, 1);
enableFeatures(&_enabledShaderDrawParametersFeatures.shaderDrawParameters,
enableFeatures(requestedFeatures,
&_enabledShaderDrawParametersFeatures.shaderDrawParameters,
&requestedFeatures->shaderDrawParameters,
&pdShaderDrawParametersFeatures.shaderDrawParameters, 1);
break;
@ -4588,55 +4662,72 @@ void MVKDevice::enableFeatures(const VkDeviceCreateInfo* pCreateInfo) {
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES: {
auto& pdvulkan12FeaturesNoExt = _physicalDevice->_vulkan12FeaturesNoExt;
auto* requestedFeatures = (VkPhysicalDeviceVulkan12Features*)next;
enableFeatures(&_enabledVulkan12FeaturesNoExt.samplerMirrorClampToEdge,
enableFeatures(requestedFeatures,
&_enabledVulkan12FeaturesNoExt.samplerMirrorClampToEdge,
&requestedFeatures->samplerMirrorClampToEdge,
&pdvulkan12FeaturesNoExt.samplerMirrorClampToEdge, 2);
enableFeatures(&_enabled8BitStorageFeatures.storageBuffer8BitAccess,
enableFeatures(requestedFeatures,
&_enabled8BitStorageFeatures.storageBuffer8BitAccess,
&requestedFeatures->storageBuffer8BitAccess,
&pd8BitStorageFeatures.storageBuffer8BitAccess, 3);
enableFeatures(&_enabledShaderAtomicInt64Features.shaderBufferInt64Atomics,
enableFeatures(requestedFeatures,
&_enabledShaderAtomicInt64Features.shaderBufferInt64Atomics,
&requestedFeatures->shaderBufferInt64Atomics,
&pdShaderAtomicInt64Features.shaderBufferInt64Atomics, 2);
enableFeatures(&_enabledShaderFloat16Int8Features.shaderFloat16,
enableFeatures(requestedFeatures,
&_enabledShaderFloat16Int8Features.shaderFloat16,
&requestedFeatures->shaderFloat16,
&pdShaderFloat16Int8Features.shaderFloat16, 2);
enableFeatures(&_enabledVulkan12FeaturesNoExt.descriptorIndexing,
enableFeatures(requestedFeatures,
&_enabledVulkan12FeaturesNoExt.descriptorIndexing,
&requestedFeatures->descriptorIndexing,
&pdvulkan12FeaturesNoExt.descriptorIndexing, 1);
enableFeatures(&_enabledDescriptorIndexingFeatures.shaderInputAttachmentArrayDynamicIndexing,
enableFeatures(requestedFeatures,
&_enabledDescriptorIndexingFeatures.shaderInputAttachmentArrayDynamicIndexing,
&requestedFeatures->shaderInputAttachmentArrayDynamicIndexing,
&pdDescriptorIndexingFeatures.shaderInputAttachmentArrayDynamicIndexing, 20);
enableFeatures(&_enabledVulkan12FeaturesNoExt.samplerFilterMinmax,
enableFeatures(requestedFeatures,
&_enabledVulkan12FeaturesNoExt.samplerFilterMinmax,
&requestedFeatures->samplerFilterMinmax,
&pdvulkan12FeaturesNoExt.samplerFilterMinmax, 1);
enableFeatures(&_enabledScalarBlockLayoutFeatures.scalarBlockLayout,
enableFeatures(requestedFeatures,
&_enabledScalarBlockLayoutFeatures.scalarBlockLayout,
&requestedFeatures->scalarBlockLayout,
&pdScalarBlockLayoutFeatures.scalarBlockLayout, 1);
enableFeatures(&_enabledImagelessFramebufferFeatures.imagelessFramebuffer,
enableFeatures(requestedFeatures,
&_enabledImagelessFramebufferFeatures.imagelessFramebuffer,
&requestedFeatures->imagelessFramebuffer,
&pdImagelessFramebufferFeatures.imagelessFramebuffer, 1);
enableFeatures(&_enabledUniformBufferStandardLayoutFeatures.uniformBufferStandardLayout,
enableFeatures(requestedFeatures,
&_enabledUniformBufferStandardLayoutFeatures.uniformBufferStandardLayout,
&requestedFeatures->uniformBufferStandardLayout,
&pdUniformBufferStandardLayoutFeatures.uniformBufferStandardLayout, 1);
enableFeatures(&_enabledShaderSubgroupExtendedTypesFeatures.shaderSubgroupExtendedTypes,
enableFeatures(requestedFeatures,
&_enabledShaderSubgroupExtendedTypesFeatures.shaderSubgroupExtendedTypes,
&requestedFeatures->shaderSubgroupExtendedTypes,
&pdShaderSubgroupExtendedTypesFeatures.shaderSubgroupExtendedTypes, 1);
enableFeatures(&_enabledSeparateDepthStencilLayoutsFeatures.separateDepthStencilLayouts,
enableFeatures(requestedFeatures,
&_enabledSeparateDepthStencilLayoutsFeatures.separateDepthStencilLayouts,
&requestedFeatures->separateDepthStencilLayouts,
&pdSeparateDepthStencilLayoutsFeatures.separateDepthStencilLayouts, 1);
enableFeatures(&_enabledHostQueryResetFeatures.hostQueryReset,
enableFeatures(requestedFeatures,
&_enabledHostQueryResetFeatures.hostQueryReset,
&requestedFeatures->hostQueryReset,
&pdHostQueryResetFeatures.hostQueryReset, 1);
enableFeatures(&_enabledTimelineSemaphoreFeatures.timelineSemaphore,
enableFeatures(requestedFeatures,
&_enabledTimelineSemaphoreFeatures.timelineSemaphore,
&requestedFeatures->timelineSemaphore,
&pdTimelineSemaphoreFeatures.timelineSemaphore, 1);
enableFeatures(&_enabledBufferDeviceAddressFeatures.bufferDeviceAddress,
enableFeatures(requestedFeatures,
&_enabledBufferDeviceAddressFeatures.bufferDeviceAddress,
&requestedFeatures->bufferDeviceAddress,
&pdBufferDeviceAddressFeatures.bufferDeviceAddress, 3);
enableFeatures(&_enabledVulkanMemoryModelFeatures.vulkanMemoryModel,
enableFeatures(requestedFeatures,
&_enabledVulkanMemoryModelFeatures.vulkanMemoryModel,
&requestedFeatures->vulkanMemoryModel,
&pdVulkanMemoryModelFeatures.vulkanMemoryModel, 3);
enableFeatures(&_enabledVulkan12FeaturesNoExt.shaderOutputViewportIndex,
enableFeatures(requestedFeatures,
&_enabledVulkan12FeaturesNoExt.shaderOutputViewportIndex,
&requestedFeatures->shaderOutputViewportIndex,
&pdvulkan12FeaturesNoExt.shaderOutputViewportIndex, 3);
break;
@ -4644,17 +4735,17 @@ void MVKDevice::enableFeatures(const VkDeviceCreateInfo* pCreateInfo) {
#define MVK_DEVICE_FEATURE(structName, enumName, flagCount) \
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_##enumName##_FEATURES: { \
enableFeatures((VkBaseInStructure*)&_enabled##structName##Features, \
next, \
(VkBaseInStructure*)&pd##structName##Features, \
enableFeatures(&_enabled##structName##Features, \
(VkPhysicalDevice##structName##Features*)next, \
&pd##structName##Features, \
flagCount); \
break; \
}
#define MVK_DEVICE_FEATURE_EXTN(structName, enumName, extnSfx, flagCount) \
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_##enumName##_FEATURES_##extnSfx: { \
enableFeatures((VkBaseInStructure*)&_enabled##structName##Features, \
next, \
(VkBaseInStructure*)&pd##structName##Features, \
enableFeatures(&_enabled##structName##Features, \
(VkPhysicalDevice##structName##Features##extnSfx*)next, \
&pd##structName##Features, \
flagCount); \
break; \
}
@ -4666,18 +4757,23 @@ void MVKDevice::enableFeatures(const VkDeviceCreateInfo* pCreateInfo) {
}
}
void MVKDevice::enableFeatures(VkBaseInStructure* pEnabled, const VkBaseInStructure* pRequested, const VkBaseInStructure* pAvailable, uint32_t count) {
enableFeatures((VkBool32*)(&(pEnabled->pNext) + 1),
(VkBool32*)(&(pRequested->pNext) + 1),
(VkBool32*)(&(pAvailable->pNext) + 1),
template<typename S>
void MVKDevice::enableFeatures(S* pEnabled, const S* pRequested, const S* pAvailable, uint32_t count) {
enableFeatures(pRequested,
(VkBool32*)mvkGetAddressOfFirstMember(pEnabled),
(VkBool32*)mvkGetAddressOfFirstMember(pRequested),
(VkBool32*)mvkGetAddressOfFirstMember(pAvailable),
count);
}
void MVKDevice::enableFeatures(VkBool32* pEnabledBools, const VkBool32* pRequestedBools, const VkBool32* pAvailableBools, uint32_t count) {
template<typename S>
void MVKDevice::enableFeatures(S* pRequested, VkBool32* pEnabledBools, const VkBool32* pRequestedBools, const VkBool32* pAvailableBools, uint32_t count) {
for (uint32_t i = 0; i < count; i++) {
pEnabledBools[i] = pRequestedBools[i] && pAvailableBools[i];
if (pRequestedBools[i] && !pAvailableBools[i]) {
setConfigurationResult(reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateDevice(): Requested feature is not available on this device."));
uintptr_t mbrOffset = (uintptr_t)&pRequestedBools[i] - (uintptr_t)mvkGetAddressOfFirstMember(pRequested);
size_t mbrIdxOrd = (mbrOffset / sizeof(VkBool32)) + 1;
setConfigurationResult(reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateDevice(): Requested physical device feature specified by the %zu%s flag in %s is not available on this device.", mbrIdxOrd, mvk::getOrdinalSuffix(mbrIdxOrd), mvk::getTypeName(pRequested).c_str()));
}
}
}
@ -4763,20 +4859,39 @@ uint64_t mvkGetRegistryID(id<MTLDevice> mtlDevice) {
return [mtlDevice respondsToSelector: @selector(registryID)] ? mtlDevice.registryID : 0;
}
// Since MacCatalyst does not support supportsBCTextureCompression, it is not possible
// for Apple Silicon to indicate a lack of support for BCn when running MacCatalyst.
// Therefore, assume for now that this means MacCatalyst does not actually support BCn.
// Further evidence may change this approach.
bool mvkSupportsBCTextureCompression(id<MTLDevice> mtlDevice) {
#if MVK_IOS || MVK_TVOS || MVK_MACCAT
return false;
#endif
uint64_t mvkGetLocationID(id<MTLDevice> mtlDevice) {
uint64_t hash = 0;
#if MVK_MACOS && !MVK_MACCAT
#if MVK_XCODE_12
// All of these device properties were added at the same time,
// so only need to check for the presence of one of them.
if ([mtlDevice respondsToSelector: @selector(location)]) {
uint64_t val;
val = mtlDevice.location;
hash = mvkHash(&val, 1, hash);
val = mtlDevice.locationNumber;
hash = mvkHash(&val, 1, hash);
val = mtlDevice.peerGroupID;
hash = mvkHash(&val, 1, hash);
val = mtlDevice.peerIndex;
hash = mvkHash(&val, 1, hash);
}
#endif
return hash;
}
// If the supportsBCTextureCompression query is available, use it.
// Otherwise only macOS supports BC compression.
bool mvkSupportsBCTextureCompression(id<MTLDevice> mtlDevice) {
#if MVK_XCODE_14_3 || (MVK_XCODE_12 && MVK_MACOS && !MVK_MACCAT)
if ([mtlDevice respondsToSelector: @selector(supportsBCTextureCompression)]) {
return mtlDevice.supportsBCTextureCompression;
}
#endif
return true;
#endif
return MVK_MACOS && !MVK_MACCAT;
}

View File

@ -78,10 +78,10 @@ public:
* Maps the memory address at the specified offset from the start of this memory allocation,
* and returns the address in the specified data reference.
*/
VkResult map(VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData);
VkResult map(const VkMemoryMapInfoKHR* mapInfo, void** ppData);
/** Unmaps a previously mapped memory range. */
void unmap();
VkResult unmap(const VkMemoryUnmapInfoKHR* unmapInfo);
/**
* If this device memory is currently mapped to host memory, returns the range within

View File

@ -20,7 +20,6 @@
#include "MVKBuffer.h"
#include "MVKImage.h"
#include "MVKQueue.h"
#include "MVKEnvironment.h"
#include "mvk_datatypes.hpp"
#include "MVKFoundation.h"
#include <cstdlib>
@ -36,8 +35,7 @@ void MVKDeviceMemory::propagateDebugName() {
setLabelIfNotNil(_mtlBuffer, _debugName);
}
VkResult MVKDeviceMemory::map(VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData) {
VkResult MVKDeviceMemory::map(const VkMemoryMapInfoKHR* pMemoryMapInfo, void** ppData) {
if ( !isMemoryHostAccessible() ) {
return reportError(VK_ERROR_MEMORY_MAP_FAILED, "Private GPU-only memory cannot be mapped to host memory.");
}
@ -50,25 +48,23 @@ VkResult MVKDeviceMemory::map(VkDeviceSize offset, VkDeviceSize size, VkMemoryMa
return reportError(VK_ERROR_OUT_OF_HOST_MEMORY, "Could not allocate %llu bytes of host-accessible device memory.", _allocationSize);
}
_mappedRange.offset = offset;
_mappedRange.size = adjustMemorySize(size, offset);
_mappedRange.offset = pMemoryMapInfo->offset;
_mappedRange.size = adjustMemorySize(pMemoryMapInfo->size, pMemoryMapInfo->offset);
*ppData = (void*)((uintptr_t)_pMemory + offset);
*ppData = (void*)((uintptr_t)_pMemory + pMemoryMapInfo->offset);
// Coherent memory does not require flushing by app, so we must flush now
// to support Metal textures that actually reside in non-coherent memory.
if (mvkIsAnyFlagEnabled(_vkMemPropFlags, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) {
pullFromDevice(offset, size);
pullFromDevice(pMemoryMapInfo->offset, pMemoryMapInfo->size);
}
return VK_SUCCESS;
}
void MVKDeviceMemory::unmap() {
VkResult MVKDeviceMemory::unmap(const VkMemoryUnmapInfoKHR* pUnmapMemoryInfo) {
if ( !isMapped() ) {
reportError(VK_ERROR_MEMORY_MAP_FAILED, "Memory is not mapped. Call vkMapMemory() first.");
return;
return reportError(VK_ERROR_MEMORY_MAP_FAILED, "Memory is not mapped. Call vkMapMemory() first.");
}
// Coherent memory does not require flushing by app, so we must flush now
@ -79,6 +75,8 @@ void MVKDeviceMemory::unmap() {
_mappedRange.offset = 0;
_mappedRange.size = 0;
return VK_SUCCESS;
}
VkResult MVKDeviceMemory::flushToDevice(VkDeviceSize offset, VkDeviceSize size) {
@ -161,7 +159,7 @@ VkResult MVKDeviceMemory::addImageMemoryBinding(MVKImageMemoryBinding* mvkImg) {
// If a dedicated alloc, ensure this image is the one and only image
// I am dedicated to. If my image is aliasable, though, allow other aliasable
// images to bind to me.
if (_isDedicated && (_imageMemoryBindings.empty() || !(contains(_imageMemoryBindings, mvkImg) || (_imageMemoryBindings[0]->_image->getIsAliasable() && mvkImg->_image->getIsAliasable()))) ) {
if (_isDedicated && (_imageMemoryBindings.empty() || !(mvkContains(_imageMemoryBindings, mvkImg) || (_imageMemoryBindings[0]->_image->getIsAliasable() && mvkImg->_image->getIsAliasable()))) ) {
return reportError(VK_ERROR_OUT_OF_DEVICE_MEMORY, "Could not bind VkImage %p to a VkDeviceMemory dedicated to resource %p. A dedicated allocation may only be used with the resource it was dedicated to.", mvkImg, getDedicatedResource() );
}
@ -181,7 +179,7 @@ bool MVKDeviceMemory::ensureMTLHeap() {
if (_mtlHeap) { return true; }
// Can't create a MTLHeap on a imported memory
// Can't create a MTLHeap on imported memory
if (_isHostMemImported) { return true; }
// Don't bother if we don't have placement heaps.
@ -285,6 +283,7 @@ MVKDeviceMemory::MVKDeviceMemory(MVKDevice* device,
const VkMemoryAllocateInfo* pAllocateInfo,
const VkAllocationCallbacks* pAllocator) : MVKVulkanAPIDeviceObject(device) {
// Set Metal memory parameters
_vkMemAllocFlags = 0;
_vkMemPropFlags = _device->_pMemoryProperties->memoryTypes[pAllocateInfo->memoryTypeIndex].propertyFlags;
_mtlStorageMode = mvkMTLStorageModeFromVkMemoryPropertyFlags(_vkMemPropFlags);
_mtlCPUCacheMode = mvkMTLCPUCacheModeFromVkMemoryPropertyFlags(_vkMemPropFlags);

View File

@ -58,6 +58,8 @@ public:
MVKFramebuffer(MVKDevice* device, const VkFramebufferCreateInfo* pCreateInfo);
MVKFramebuffer(MVKDevice* device, const VkRenderingInfo* pRenderingInfo);
~MVKFramebuffer() override;
protected:
@ -69,12 +71,3 @@ protected:
VkExtent2D _extent;
uint32_t _layerCount;
};
#pragma mark -
#pragma mark Support functions
/** Returns an image-less MVKFramebuffer object created from the rendering info. */
MVKFramebuffer* mvkCreateFramebuffer(MVKDevice* device,
const VkRenderingInfo* pRenderingInfo,
MVKRenderPass* mvkRenderPass);

View File

@ -82,8 +82,8 @@ id<MTLTexture> MVKFramebuffer::getDummyAttachmentMTLTexture(MVKRenderSubpass* su
MVKFramebuffer::MVKFramebuffer(MVKDevice* device,
const VkFramebufferCreateInfo* pCreateInfo) : MVKVulkanAPIDeviceObject(device) {
_extent = { .width = pCreateInfo->width, .height = pCreateInfo->height };
_layerCount = pCreateInfo->layers;
_extent = { .width = pCreateInfo->width, .height = pCreateInfo->height };
// If this is not an image-less framebuffer, add the attachments
if ( !mvkIsAnyFlagEnabled(pCreateInfo->flags, VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) ) {
@ -94,51 +94,25 @@ MVKFramebuffer::MVKFramebuffer(MVKDevice* device,
}
}
MVKFramebuffer::~MVKFramebuffer() {
[_mtlDummyTex release];
}
MVKFramebuffer::MVKFramebuffer(MVKDevice* device,
const VkRenderingInfo* pRenderingInfo) : MVKVulkanAPIDeviceObject(device) {
_layerCount = pRenderingInfo->layerCount;
#pragma mark -
#pragma mark Support functions
MVKFramebuffer* mvkCreateFramebuffer(MVKDevice* device,
const VkRenderingInfo* pRenderingInfo,
MVKRenderPass* mvkRenderPass) {
uint32_t attCnt = 0;
VkExtent3D fbExtent = {};
_extent = {};
for (uint32_t caIdx = 0; caIdx < pRenderingInfo->colorAttachmentCount; caIdx++) {
auto& clrAtt = pRenderingInfo->pColorAttachments[caIdx];
if (clrAtt.imageView) {
fbExtent = ((MVKImageView*)clrAtt.imageView)->getExtent3D();
attCnt++;
if (clrAtt.resolveImageView && clrAtt.resolveMode != VK_RESOLVE_MODE_NONE) {
attCnt++;
}
_extent = mvkVkExtent2DFromVkExtent3D(((MVKImageView*)clrAtt.imageView)->getExtent3D());
}
}
auto* pDSAtt = pRenderingInfo->pDepthAttachment ? pRenderingInfo->pDepthAttachment : pRenderingInfo->pStencilAttachment;
if (pDSAtt) {
if (pDSAtt->imageView) {
fbExtent = ((MVKImageView*)pDSAtt->imageView)->getExtent3D();
attCnt++;
}
if (pDSAtt->resolveImageView && pDSAtt->resolveMode != VK_RESOLVE_MODE_NONE) {
attCnt++;
}
if (pRenderingInfo->pDepthAttachment && pRenderingInfo->pDepthAttachment->imageView) {
_extent = mvkVkExtent2DFromVkExtent3D(((MVKImageView*)pRenderingInfo->pDepthAttachment->imageView)->getExtent3D());
}
if (pRenderingInfo->pStencilAttachment && pRenderingInfo->pStencilAttachment->imageView) {
_extent = mvkVkExtent2DFromVkExtent3D(((MVKImageView*)pRenderingInfo->pStencilAttachment->imageView)->getExtent3D());
}
VkFramebufferCreateInfo fbCreateInfo;
fbCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
fbCreateInfo.pNext = nullptr;
fbCreateInfo.flags = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT;
fbCreateInfo.renderPass = (VkRenderPass)mvkRenderPass;
fbCreateInfo.attachmentCount = attCnt;
fbCreateInfo.pAttachments = nullptr;
fbCreateInfo.width = fbExtent.width;
fbCreateInfo.height = fbExtent.height;
fbCreateInfo.layers = pRenderingInfo->layerCount;
return device->createFramebuffer(&fbCreateInfo, nullptr);
}
MVKFramebuffer::~MVKFramebuffer() {
[_mtlDummyTex release];
}

View File

@ -449,7 +449,7 @@ public:
#pragma mark Metal
/** Presents the contained drawable to the OS. */
void presentCAMetalDrawable(id<MTLCommandBuffer> mtlCmdBuff, MVKImagePresentInfo& presentInfo);
void presentCAMetalDrawable(id<MTLCommandBuffer> mtlCmdBuff, MVKImagePresentInfo presentInfo);
#pragma mark Construction
@ -463,7 +463,7 @@ protected:
friend MVKSwapchain;
id<CAMetalDrawable> getCAMetalDrawable() override;
void addPresentedHandler(id<CAMetalDrawable> mtlDrawable, MVKImagePresentInfo& presentInfo);
void addPresentedHandler(id<CAMetalDrawable> mtlDrawable, MVKImagePresentInfo presentInfo);
void releaseMetalDrawable();
MVKSwapchainImageAvailability getAvailability();
void makeAvailable(const MVKSwapchainSignaler& signaler);
@ -560,6 +560,9 @@ public:
/** Returns the 3D extent of this image at the specified mipmap level. */
VkExtent3D getExtent3D(uint8_t planeIndex = 0, uint32_t mipLevel = 0) { return _image->getExtent3D(planeIndex, mipLevel); }
/** Return the underlying image. */
MVKImage* getImage() { return _image; }
#pragma mark Metal
/** Returns the Metal texture underlying this image view. */

View File

@ -21,7 +21,6 @@
#include "MVKSwapchain.h"
#include "MVKCommandBuffer.h"
#include "MVKCmdDebug.h"
#include "MVKEnvironment.h"
#include "MVKFoundation.h"
#include "MVKOSExtensions.h"
#include "MVKCodec.h"
@ -63,7 +62,7 @@ id<MTLTexture> MVKImagePlane::getMTLTexture() {
offset: memoryBinding->getDeviceMemoryOffset() + _subresources[0].layout.offset];
if (_image->_isAliasable) { [_mtlTexture makeAliasable]; }
} else if (_image->_isAliasable && dvcMem && dvcMem->isDedicatedAllocation() &&
!contains(dvcMem->_imageMemoryBindings, memoryBinding)) {
!mvkContains(dvcMem->_imageMemoryBindings, memoryBinding)) {
// This is a dedicated allocation, but it belongs to another aliasable image.
// In this case, use the MTLTexture from the memory's dedicated image.
// We know the other image must be aliasable, or I couldn't have been bound
@ -1295,8 +1294,9 @@ id<CAMetalDrawable> MVKPresentableSwapchainImage::getCAMetalDrawable() {
}
// Present the drawable and make myself available only once the command buffer has completed.
// Pass MVKImagePresentInfo by value because it may not exist when the callback runs.
void MVKPresentableSwapchainImage::presentCAMetalDrawable(id<MTLCommandBuffer> mtlCmdBuff,
MVKImagePresentInfo& presentInfo) {
MVKImagePresentInfo presentInfo) {
lock_guard<mutex> lock(_availabilityLock);
_swapchain->willPresentSurface(getMTLTexture(0), mtlCmdBuff);
@ -1313,8 +1313,9 @@ void MVKPresentableSwapchainImage::presentCAMetalDrawable(id<MTLCommandBuffer> m
mtlDrwbl.layer.displaySyncEnabledMVK = (presentInfo.presentMode != VK_PRESENT_MODE_IMMEDIATE_KHR);
}
if (presentInfo.hasPresentTime) {
// Convert from nsecs to seconds for Metal
addPresentedHandler(mtlDrwbl, presentInfo);
}
if (presentInfo.desiredPresentTime) {
[mtlDrwbl presentAtTime: (double)presentInfo.desiredPresentTime * 1.0e-9];
} else {
[mtlDrwbl present];
@ -1358,8 +1359,9 @@ void MVKPresentableSwapchainImage::presentCAMetalDrawable(id<MTLCommandBuffer> m
signalPresentationSemaphore(signaler, mtlCmdBuff);
}
// Pass MVKImagePresentInfo by value because it may not exist when the callback runs.
void MVKPresentableSwapchainImage::addPresentedHandler(id<CAMetalDrawable> mtlDrawable,
MVKImagePresentInfo& presentInfo) {
MVKImagePresentInfo presentInfo) {
#if !MVK_OS_SIMULATOR
if ([mtlDrawable respondsToSelector: @selector(addPresentedHandler:)]) {
retain(); // Ensure this image is not destroyed while awaiting presentation

View File

@ -18,7 +18,6 @@
#pragma once
#include "MVKEnvironment.h"
#include "MVKLayers.h"
#include "MVKVulkanAPIObject.h"
#include "MVKSmallVector.h"

View File

@ -22,6 +22,7 @@
#include "MVKFoundation.h"
#include "MVKSurface.h"
#include "MVKOSExtensions.h"
#include "mvk_deprecated_api.h"
using namespace std;
@ -340,11 +341,6 @@ MVKInstance::MVKInstance(const VkInstanceCreateInfo* pCreateInfo) : _enabledExte
initProcAddrs(); // Init function pointers
setConfigurationResult(verifyLayers(pCreateInfo->enabledLayerCount, pCreateInfo->ppEnabledLayerNames));
MVKExtensionList* pWritableExtns = (MVKExtensionList*)&_enabledExtensions;
setConfigurationResult(pWritableExtns->enable(pCreateInfo->enabledExtensionCount,
pCreateInfo->ppEnabledExtensionNames,
getDriverLayer()->getSupportedInstanceExtensions()));
logVersions(); // Log the MoltenVK and Vulkan versions
// Populate the array of physical GPU devices.
@ -366,6 +362,13 @@ MVKInstance::MVKInstance(const VkInstanceCreateInfo* pCreateInfo) : _enabledExte
setConfigurationResult(reportError(VK_ERROR_INCOMPATIBLE_DRIVER, "To support Mac Catalyst, MoltenVK requires macOS 11.0 or above."));
}
// Enable extensions after logging the system and GPU info, for any logging done during extension enablement.
setConfigurationResult(verifyLayers(pCreateInfo->enabledLayerCount, pCreateInfo->ppEnabledLayerNames));
MVKExtensionList* pWritableExtns = (MVKExtensionList*)&_enabledExtensions;
setConfigurationResult(pWritableExtns->enable(pCreateInfo->enabledExtensionCount,
pCreateInfo->ppEnabledExtensionNames,
getDriverLayer()->getSupportedInstanceExtensions()));
MVKLogInfo("Created VkInstance for Vulkan version %s, as requested by app, with the following %d Vulkan extensions enabled:%s",
mvkGetVulkanVersionString(_appInfo.apiVersion).c_str(),
_enabledExtensions.getEnabledCount(),
@ -437,10 +440,14 @@ void MVKInstance::initDebugCallbacks(const VkInstanceCreateInfo* pCreateInfo) {
#define ADD_INST_EXT2_ENTRY_POINT(func, EXT1, EXT2) ADD_ENTRY_POINT(func, 0, VK_##EXT1##_EXTENSION_NAME, VK_##EXT2##_EXTENSION_NAME, false)
#define ADD_DVC_EXT2_ENTRY_POINT(func, EXT1, EXT2) ADD_ENTRY_POINT(func, 0, VK_##EXT1##_EXTENSION_NAME, VK_##EXT2##_EXTENSION_NAME, true)
// Add an open function, not tied to core or an extension.
#define ADD_INST_OPEN_ENTRY_POINT(func) ADD_ENTRY_POINT(func, 0, nullptr, nullptr, false)
#define ADD_DVC_OPEN_ENTRY_POINT(func) ADD_ENTRY_POINT(func, 0, nullptr, nullptr, true)
// Initializes the function pointer map.
void MVKInstance::initProcAddrs() {
// Instance functions
// Instance functions.
ADD_INST_ENTRY_POINT(vkDestroyInstance);
ADD_INST_ENTRY_POINT(vkEnumeratePhysicalDevices);
ADD_INST_ENTRY_POINT(vkGetPhysicalDeviceFeatures);
@ -468,7 +475,57 @@ void MVKInstance::initProcAddrs() {
ADD_INST_1_3_PROMOTED_ENTRY_POINT(vkGetPhysicalDeviceToolProperties, EXT_TOOLING_INFO);
// Device functions:
// Instance extension functions.
ADD_INST_EXT_ENTRY_POINT(vkDestroySurfaceKHR, KHR_SURFACE);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceSurfaceSupportKHR, KHR_SURFACE);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceSurfaceCapabilitiesKHR, KHR_SURFACE);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceSurfaceFormatsKHR, KHR_SURFACE);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceSurfacePresentModesKHR, KHR_SURFACE);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceSurfaceCapabilities2KHR, KHR_GET_SURFACE_CAPABILITIES_2);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceSurfaceFormats2KHR, KHR_GET_SURFACE_CAPABILITIES_2);
ADD_INST_EXT_ENTRY_POINT(vkCreateDebugReportCallbackEXT, EXT_DEBUG_REPORT);
ADD_INST_EXT_ENTRY_POINT(vkDestroyDebugReportCallbackEXT, EXT_DEBUG_REPORT);
ADD_INST_EXT_ENTRY_POINT(vkDebugReportMessageEXT, EXT_DEBUG_REPORT);
ADD_INST_EXT_ENTRY_POINT(vkSetDebugUtilsObjectNameEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkSetDebugUtilsObjectTagEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkQueueBeginDebugUtilsLabelEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkQueueEndDebugUtilsLabelEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkQueueInsertDebugUtilsLabelEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkCmdBeginDebugUtilsLabelEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkCmdEndDebugUtilsLabelEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkCmdInsertDebugUtilsLabelEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkCreateDebugUtilsMessengerEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkDestroyDebugUtilsMessengerEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkSubmitDebugUtilsMessageEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkCreateMetalSurfaceEXT, EXT_METAL_SURFACE);
#ifdef VK_USE_PLATFORM_IOS_MVK
ADD_INST_EXT_ENTRY_POINT(vkCreateIOSSurfaceMVK, MVK_IOS_SURFACE);
#endif
#ifdef VK_USE_PLATFORM_MACOS_MVK
ADD_INST_EXT_ENTRY_POINT(vkCreateMacOSSurfaceMVK, MVK_MACOS_SURFACE);
#endif
// MoltenVK-specific instannce functions, not tied to a Vulkan API version or an extension.
ADD_INST_OPEN_ENTRY_POINT(vkGetMoltenVKConfigurationMVK);
ADD_INST_OPEN_ENTRY_POINT(vkSetMoltenVKConfigurationMVK);
ADD_INST_OPEN_ENTRY_POINT(vkGetPhysicalDeviceMetalFeaturesMVK);
ADD_INST_OPEN_ENTRY_POINT(vkGetPerformanceStatisticsMVK);
// For deprecated MoltenVK-specific functions, suppress compiler deprecation warning.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
ADD_INST_EXT_ENTRY_POINT(vkGetVersionStringsMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetMTLDeviceMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkSetMTLTextureMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetMTLTextureMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetMTLBufferMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkUseIOSurfaceMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetIOSurfaceMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetMTLCommandQueueMVK, MVK_MOLTENVK);
#pragma clang diagnostic pop
// Device functions.
ADD_DVC_ENTRY_POINT(vkGetDeviceProcAddr);
ADD_DVC_ENTRY_POINT(vkDestroyDevice);
ADD_DVC_ENTRY_POINT(vkGetDeviceQueue);
@ -660,51 +717,14 @@ void MVKInstance::initProcAddrs() {
ADD_DVC_1_3_PROMOTED_ENTRY_POINT(vkQueueSubmit2, KHR, KHR_SYNCHRONIZATION_2);
ADD_DVC_1_3_PROMOTED_ENTRY_POINT(vkSetPrivateData, EXT, EXT_PRIVATE_DATA);
// Instance extension functions:
ADD_INST_EXT_ENTRY_POINT(vkDestroySurfaceKHR, KHR_SURFACE);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceSurfaceSupportKHR, KHR_SURFACE);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceSurfaceCapabilitiesKHR, KHR_SURFACE);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceSurfaceFormatsKHR, KHR_SURFACE);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceSurfacePresentModesKHR, KHR_SURFACE);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceSurfaceCapabilities2KHR, KHR_GET_SURFACE_CAPABILITIES_2);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceSurfaceFormats2KHR, KHR_GET_SURFACE_CAPABILITIES_2);
ADD_INST_EXT_ENTRY_POINT(vkCreateDebugReportCallbackEXT, EXT_DEBUG_REPORT);
ADD_INST_EXT_ENTRY_POINT(vkDestroyDebugReportCallbackEXT, EXT_DEBUG_REPORT);
ADD_INST_EXT_ENTRY_POINT(vkDebugReportMessageEXT, EXT_DEBUG_REPORT);
ADD_INST_EXT_ENTRY_POINT(vkSetDebugUtilsObjectNameEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkSetDebugUtilsObjectTagEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkQueueBeginDebugUtilsLabelEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkQueueEndDebugUtilsLabelEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkQueueInsertDebugUtilsLabelEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkCmdBeginDebugUtilsLabelEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkCmdEndDebugUtilsLabelEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkCmdInsertDebugUtilsLabelEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkCreateDebugUtilsMessengerEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkDestroyDebugUtilsMessengerEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkSubmitDebugUtilsMessageEXT, EXT_DEBUG_UTILS);
ADD_INST_EXT_ENTRY_POINT(vkCreateMetalSurfaceEXT, EXT_METAL_SURFACE);
#ifdef VK_USE_PLATFORM_IOS_MVK
ADD_INST_EXT_ENTRY_POINT(vkCreateIOSSurfaceMVK, MVK_IOS_SURFACE);
#endif
#ifdef VK_USE_PLATFORM_MACOS_MVK
ADD_INST_EXT_ENTRY_POINT(vkCreateMacOSSurfaceMVK, MVK_MACOS_SURFACE);
#endif
ADD_INST_EXT_ENTRY_POINT(vkGetMoltenVKConfigurationMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkSetMoltenVKConfigurationMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetPhysicalDeviceMetalFeaturesMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetPerformanceStatisticsMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetVersionStringsMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetMTLDeviceMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkSetMTLTextureMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetMTLTextureMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetMTLBufferMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkUseIOSurfaceMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetIOSurfaceMVK, MVK_MOLTENVK);
ADD_INST_EXT_ENTRY_POINT(vkGetMTLCommandQueueMVK, MVK_MOLTENVK);
// Device extension functions:
// Device extension functions.
ADD_DVC_EXT_ENTRY_POINT(vkCreateDeferredOperationKHR, KHR_DEFERRED_HOST_OPERATIONS);
ADD_DVC_EXT_ENTRY_POINT(vkDeferredOperationJoinKHR, KHR_DEFERRED_HOST_OPERATIONS);
ADD_DVC_EXT_ENTRY_POINT(vkDestroyDeferredOperationKHR, KHR_DEFERRED_HOST_OPERATIONS);
ADD_DVC_EXT_ENTRY_POINT(vkGetDeferredOperationMaxConcurrencyKHR, KHR_DEFERRED_HOST_OPERATIONS);
ADD_DVC_EXT_ENTRY_POINT(vkGetDeferredOperationResultKHR, KHR_DEFERRED_HOST_OPERATIONS);
ADD_DVC_EXT_ENTRY_POINT(vkMapMemory2KHR, KHR_MAP_MEMORY_2);
ADD_DVC_EXT_ENTRY_POINT(vkUnmapMemory2KHR, KHR_MAP_MEMORY_2);
ADD_DVC_EXT_ENTRY_POINT(vkCmdPushDescriptorSetKHR, KHR_PUSH_DESCRIPTOR);
ADD_DVC_EXT2_ENTRY_POINT(vkCmdPushDescriptorSetWithTemplateKHR, KHR_PUSH_DESCRIPTOR, KHR_DESCRIPTOR_UPDATE_TEMPLATE);
ADD_DVC_EXT_ENTRY_POINT(vkCreateSwapchainKHR, KHR_SWAPCHAIN);

View File

@ -63,6 +63,7 @@ public:
/** Updates a descriptor set in a command encoder. */
void pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKArrayRef<VkWriteDescriptorSet> descriptorWrites,
uint32_t set);
@ -164,6 +165,9 @@ public:
mvkIsAnyFlagEnabled(_flags, VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT));
}
/** Returns whether the shader for the stage uses physical storage buffer addresses. */
virtual bool usesPhysicalStorageBufferAddressesCapability(MVKShaderStage stage) = 0;
/** Constructs an instance for the device. layout, and parent (which may be NULL). */
MVKPipeline(MVKDevice* device, MVKPipelineCache* pipelineCache, MVKPipelineLayout* layout,
VkPipelineCreateFlags flags, MVKPipeline* parent);
@ -270,6 +274,8 @@ public:
/** Returns whether this pipeline has custom sample positions enabled. */
bool isUsingCustomSamplePositions() { return _isUsingCustomSamplePositions; }
bool usesPhysicalStorageBufferAddressesCapability(MVKShaderStage stage) override;
/**
* Returns whether the MTLBuffer vertex shader buffer index is valid for a stage of this pipeline.
* It is if it is a descriptor binding within the descriptor binding range,
@ -338,25 +344,28 @@ protected:
MVKMTLFunction getMTLFunction(SPIRVToMSLConversionConfiguration& shaderConfig,
const VkPipelineShaderStageCreateInfo* pShaderStage,
const char* pStageName);
const VkPipelineShaderStageCreateInfo* _pVertexSS = nullptr;
const VkPipelineShaderStageCreateInfo* _pTessCtlSS = nullptr;
const VkPipelineShaderStageCreateInfo* _pTessEvalSS = nullptr;
const VkPipelineShaderStageCreateInfo* _pFragmentSS = nullptr;
void markIfUsingPhysicalStorageBufferAddressesCapability(SPIRVToMSLConversionResultInfo& resultsInfo,
MVKShaderStage stage);
VkPipelineTessellationStateCreateInfo _tessInfo;
VkPipelineRasterizationStateCreateInfo _rasterInfo;
VkPipelineDepthStencilStateCreateInfo _depthStencilInfo;
MVKSmallVector<VkViewport, kMVKCachedViewportScissorCount> _viewports;
MVKSmallVector<VkRect2D, kMVKCachedViewportScissorCount> _scissors;
MVKSmallVector<VkViewport, kMVKMaxViewportScissorCount> _viewports;
MVKSmallVector<VkRect2D, kMVKMaxViewportScissorCount> _scissors;
MVKSmallVector<VkDynamicState> _dynamicState;
MVKSmallVector<MTLSamplePosition> _customSamplePositions;
MVKSmallVector<MVKTranslatedVertexBinding> _translatedVertexBindings;
MVKSmallVector<MVKZeroDivisorVertexBinding> _zeroDivisorVertexBindings;
MVKSmallVector<MVKStagedMTLArgumentEncoders> _mtlArgumentEncoders;
MVKSmallVector<MVKStagedDescriptorBindingUse> _descriptorBindingUse;
MVKSmallVector<MVKShaderStage> _stagesUsingPhysicalStorageBufferAddressesCapability;
std::unordered_map<uint32_t, id<MTLRenderPipelineState>> _multiviewMTLPipelineStates;
const VkPipelineShaderStageCreateInfo* _pVertexSS = nullptr;
const VkPipelineShaderStageCreateInfo* _pTessCtlSS = nullptr;
const VkPipelineShaderStageCreateInfo* _pTessEvalSS = nullptr;
const VkPipelineShaderStageCreateInfo* _pFragmentSS = nullptr;
MTLComputePipelineDescriptor* _mtlTessVertexStageDesc = nil;
id<MTLFunction> _mtlTessVertexFunctions[3] = {nil, nil, nil};
@ -365,18 +374,17 @@ protected:
id<MTLComputePipelineState> _mtlTessVertexStageIndex32State = nil;
id<MTLComputePipelineState> _mtlTessControlStageState = nil;
id<MTLRenderPipelineState> _mtlPipelineState = nil;
std::unordered_map<uint32_t, id<MTLRenderPipelineState>> _multiviewMTLPipelineStates;
float _blendConstants[4] = { 0.0, 0.0, 0.0, 1.0 };
MTLCullMode _mtlCullMode;
MTLWinding _mtlFrontWinding;
MTLTriangleFillMode _mtlFillMode;
MTLDepthClipMode _mtlDepthClipMode;
MTLPrimitiveType _mtlPrimitiveType;
float _blendConstants[4] = { 0.0, 0.0, 0.0, 1.0 };
uint32_t _outputControlPointCount;
MVKShaderImplicitRezBinding _reservedVertexAttributeBufferCount;
MVKShaderImplicitRezBinding _viewRangeBufferIndex;
MVKShaderImplicitRezBinding _outputBufferIndex;
uint32_t _outputControlPointCount;
uint32_t _tessCtlPatchOutputBufferIndex = 0;
uint32_t _tessCtlLevelBufferIndex = 0;
@ -400,7 +408,6 @@ protected:
bool _needsFragmentViewRangeBuffer = false;
bool _isRasterizing = false;
bool _isRasterizingColor = false;
bool _isRasterizingDepthStencil = false;
bool _isUsingCustomSamplePositions = false;
};
@ -425,6 +432,8 @@ public:
/** Returns the array of descriptor binding use for the descriptor set. */
MVKBitArray& getDescriptorBindingUse(uint32_t descSetIndex, MVKShaderStage stage) override { return _descriptorBindingUse[descSetIndex]; }
bool usesPhysicalStorageBufferAddressesCapability(MVKShaderStage stage) override;
/** Constructs an instance for the device and parent (which may be NULL). */
MVKComputePipeline(MVKDevice* device,
MVKPipelineCache* pipelineCache,
@ -446,6 +455,7 @@ protected:
bool _needsDynamicOffsetBuffer = false;
bool _needsDispatchBaseBuffer = false;
bool _allowsDispatchBase = false;
bool _usesPhysicalStorageBufferAddressesCapability = false;
};

View File

@ -64,11 +64,12 @@ void MVKPipelineLayout::bindDescriptorSets(MVKCommandEncoder* cmdEncoder,
// A null cmdEncoder can be passed to perform a validation pass
void MVKPipelineLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
VkPipelineBindPoint pipelineBindPoint,
MVKArrayRef<VkWriteDescriptorSet> descriptorWrites,
uint32_t set) {
if (!cmdEncoder) { clearConfigurationResult(); }
MVKDescriptorSetLayout* dsl = _descriptorSetLayouts[set];
dsl->pushDescriptorSet(cmdEncoder, descriptorWrites, _dslMTLResourceIndexOffsets[set]);
dsl->pushDescriptorSet(cmdEncoder, pipelineBindPoint, descriptorWrites, _dslMTLResourceIndexOffsets[set]);
if (!cmdEncoder) { setConfigurationResult(dsl->getConfigurationResult()); }
}
@ -377,7 +378,10 @@ id<MTLComputePipelineState> MVKGraphicsPipeline::getTessVertexStageIndex32State(
#pragma mark Construction
// Extracts and returns a VkPipelineRenderingCreateInfo from the renderPass or pNext chain of pCreateInfo, or returns null if not found
// Extracts and returns a VkPipelineRenderingCreateInfo from the renderPass or pNext
// chain of pCreateInfo, or returns an empty struct if neither of those are found.
// Although the Vulkan spec is vague and unclear, there are CTS that set both renderPass
// and VkPipelineRenderingCreateInfo to null in VkGraphicsPipelineCreateInfo.
static const VkPipelineRenderingCreateInfo* getRenderingCreateInfo(const VkGraphicsPipelineCreateInfo* pCreateInfo) {
if (pCreateInfo->renderPass) {
return ((MVKRenderPass*)pCreateInfo->renderPass)->getSubpass(pCreateInfo->subpass)->getPipelineRenderingCreateInfo();
@ -388,7 +392,8 @@ static const VkPipelineRenderingCreateInfo* getRenderingCreateInfo(const VkGraph
default: break;
}
}
return nullptr;
static VkPipelineRenderingCreateInfo emptyRendInfo = { .sType = VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO };
return &emptyRendInfo;
}
MVKGraphicsPipeline::MVKGraphicsPipeline(MVKDevice* device,
@ -401,7 +406,6 @@ MVKGraphicsPipeline::MVKGraphicsPipeline(MVKDevice* device,
const VkPipelineRenderingCreateInfo* pRendInfo = getRenderingCreateInfo(pCreateInfo);
_isRasterizing = !isRasterizationDisabled(pCreateInfo);
_isRasterizingColor = _isRasterizing && mvkHasColorAttachments(pRendInfo);
_isRasterizingDepthStencil = _isRasterizing && mvkGetDepthStencilFormat(pRendInfo) != VK_FORMAT_UNDEFINED;
// Get the tessellation shaders, if present. Do this now, because we need to extract
// reflection data from them that informs everything else.
@ -488,8 +492,9 @@ MVKGraphicsPipeline::MVKGraphicsPipeline(MVKDevice* device,
initCustomSamplePositions(pCreateInfo);
// Depth stencil content - clearing will disable depth and stencil testing
// Must ignore allowed bad pDepthStencilState pointer if rasterization disabled or no depth attachment
mvkSetOrClear(&_depthStencilInfo, _isRasterizingDepthStencil ? pCreateInfo->pDepthStencilState : nullptr);
// Must ignore allowed bad pDepthStencilState pointer if rasterization disabled or no depth or stencil attachment format
bool isRasterizingDepthStencil = _isRasterizing && (pRendInfo->depthAttachmentFormat || pRendInfo->stencilAttachmentFormat);
mvkSetOrClear(&_depthStencilInfo, isRasterizingDepthStencil ? pCreateInfo->pDepthStencilState : nullptr);
// Viewports and scissors - must ignore allowed bad pViewportState pointer if rasterization is disabled
auto pVPState = _isRasterizing ? pCreateInfo->pViewportState : nullptr;
@ -929,6 +934,7 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLRenderPipelineDescriptor*
_needsVertexDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
_needsVertexViewRangeBuffer = funcRslts.needsViewRangeBuffer;
_needsVertexOutputBuffer = funcRslts.needsOutputBuffer;
markIfUsingPhysicalStorageBufferAddressesCapability(funcRslts, kMVKShaderStageVertex);
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageVertex);
@ -998,6 +1004,7 @@ bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLComputePipelineDescriptor
_needsVertexBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
_needsVertexDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
_needsVertexOutputBuffer = funcRslts.needsOutputBuffer;
markIfUsingPhysicalStorageBufferAddressesCapability(funcRslts, kMVKShaderStageVertex);
}
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageVertex);
@ -1047,8 +1054,8 @@ bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescripto
MVKMTLFunction func = getMTLFunction(shaderConfig, _pTessCtlSS, "Tessellation control");
id<MTLFunction> mtlFunc = func.getMTLFunction();
plDesc.computeFunction = mtlFunc;
if ( !mtlFunc ) { return false; }
plDesc.computeFunction = mtlFunc;
auto& funcRslts = func.shaderConversionResults;
_needsTessCtlSwizzleBuffer = funcRslts.needsSwizzleBuffer;
@ -1057,6 +1064,7 @@ bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescripto
_needsTessCtlOutputBuffer = funcRslts.needsOutputBuffer;
_needsTessCtlPatchOutputBuffer = funcRslts.needsPatchOutputBuffer;
_needsTessCtlInputBuffer = funcRslts.needsInputThreadgroupMem;
markIfUsingPhysicalStorageBufferAddressesCapability(funcRslts, kMVKShaderStageTessCtl);
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageTessCtl);
@ -1113,6 +1121,7 @@ bool MVKGraphicsPipeline::addTessEvalShaderToPipeline(MTLRenderPipelineDescripto
_needsTessEvalSwizzleBuffer = funcRslts.needsSwizzleBuffer;
_needsTessEvalBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
_needsTessEvalDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
markIfUsingPhysicalStorageBufferAddressesCapability(funcRslts, kMVKShaderStageTessEval);
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageTessEval);
@ -1170,6 +1179,7 @@ bool MVKGraphicsPipeline::addFragmentShaderToPipeline(MTLRenderPipelineDescripto
_needsFragmentBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
_needsFragmentDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
_needsFragmentViewRangeBuffer = funcRslts.needsViewRangeBuffer;
markIfUsingPhysicalStorageBufferAddressesCapability(funcRslts, kMVKShaderStageFragment);
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageFragment);
@ -1464,16 +1474,19 @@ void MVKGraphicsPipeline::addFragmentOutputToPipeline(MTLRenderPipelineDescripto
}
}
// Depth & stencil attachments
// Depth & stencil attachment formats
MVKPixelFormats* pixFmts = getPixelFormats();
MTLPixelFormat mtlDSFormat = pixFmts->getMTLPixelFormat(mvkGetDepthStencilFormat(pRendInfo));
if (pixFmts->isDepthFormat(mtlDSFormat)) { plDesc.depthAttachmentPixelFormat = mtlDSFormat; }
if (pixFmts->isStencilFormat(mtlDSFormat)) { plDesc.stencilAttachmentPixelFormat = mtlDSFormat; }
// In Vulkan, it's perfectly valid to render with no attachments. In Metal we need to check for
// support for it. If we have no attachments, then we may have to add a dummy attachment.
if (!caCnt && !pixFmts->isDepthFormat(mtlDSFormat) && !pixFmts->isStencilFormat(mtlDSFormat) &&
!getDevice()->_pMetalFeatures->renderWithoutAttachments) {
MTLPixelFormat mtlDepthPixFmt = pixFmts->getMTLPixelFormat(pRendInfo->depthAttachmentFormat);
if (pixFmts->isDepthFormat(mtlDepthPixFmt)) { plDesc.depthAttachmentPixelFormat = mtlDepthPixFmt; }
MTLPixelFormat mtlStencilPixFmt = pixFmts->getMTLPixelFormat(pRendInfo->stencilAttachmentFormat);
if (pixFmts->isStencilFormat(mtlStencilPixFmt)) { plDesc.stencilAttachmentPixelFormat = mtlStencilPixFmt; }
// In Vulkan, it's perfectly valid to render without any attachments. In Metal, if that
// isn't supported, and we have no attachments, then we have to add a dummy attachment.
if (!getDevice()->_pMetalFeatures->renderWithoutAttachments &&
!caCnt && !pRendInfo->depthAttachmentFormat && !pRendInfo->stencilAttachmentFormat) {
MTLRenderPipelineColorAttachmentDescriptor* colorDesc = plDesc.colorAttachments[0];
colorDesc.pixelFormat = MTLPixelFormatR8Unorm;
@ -1549,7 +1562,6 @@ void MVKGraphicsPipeline::initShaderConversionConfig(SPIRVToMSLConversionConfigu
const VkPipelineRenderingCreateInfo* pRendInfo = getRenderingCreateInfo(pCreateInfo);
MVKPixelFormats* pixFmts = getPixelFormats();
MTLPixelFormat mtlDSFormat = pixFmts->getMTLPixelFormat(mvkGetDepthStencilFormat(pRendInfo));
// Disable any unused color attachments, because Metal validation can complain if the
// fragment shader outputs a color value without a corresponding color attachment.
@ -1569,8 +1581,8 @@ void MVKGraphicsPipeline::initShaderConversionConfig(SPIRVToMSLConversionConfigu
shaderConfig.options.mslOptions.ios_support_base_vertex_instance = getDevice()->_pMetalFeatures->baseVertexInstanceDrawing;
shaderConfig.options.mslOptions.texture_1D_as_2D = mvkConfig().texture1DAs2D;
shaderConfig.options.mslOptions.enable_point_size_builtin = isRenderingPoints(pCreateInfo) || reflectData.pointMode;
shaderConfig.options.mslOptions.enable_frag_depth_builtin = pixFmts->isDepthFormat(mtlDSFormat);
shaderConfig.options.mslOptions.enable_frag_stencil_ref_builtin = pixFmts->isStencilFormat(mtlDSFormat);
shaderConfig.options.mslOptions.enable_frag_depth_builtin = pixFmts->isDepthFormat(pixFmts->getMTLPixelFormat(pRendInfo->depthAttachmentFormat));
shaderConfig.options.mslOptions.enable_frag_stencil_ref_builtin = pixFmts->isStencilFormat(pixFmts->getMTLPixelFormat(pRendInfo->stencilAttachmentFormat));
shaderConfig.options.shouldFlipVertexY = mvkConfig().shaderConversionFlipVertexY;
shaderConfig.options.mslOptions.swizzle_texture_samples = _fullImageViewSwizzle && !getDevice()->_pMetalFeatures->nativeTextureSwizzle;
shaderConfig.options.mslOptions.tess_domain_origin_lower_left = pTessDomainOriginState && pTessDomainOriginState->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT;
@ -1804,6 +1816,17 @@ MVKMTLFunction MVKGraphicsPipeline::getMTLFunction(SPIRVToMSLConversionConfigura
return func;
}
void MVKGraphicsPipeline::markIfUsingPhysicalStorageBufferAddressesCapability(SPIRVToMSLConversionResultInfo& resultsInfo,
MVKShaderStage stage) {
if (resultsInfo.usesPhysicalStorageBufferAddressesCapability) {
_stagesUsingPhysicalStorageBufferAddressesCapability.push_back(stage);
}
}
bool MVKGraphicsPipeline::usesPhysicalStorageBufferAddressesCapability(MVKShaderStage stage) {
return mvkContains(_stagesUsingPhysicalStorageBufferAddressesCapability, stage);
}
MVKGraphicsPipeline::~MVKGraphicsPipeline() {
@synchronized (getMTLDevice()) {
[_mtlTessVertexStageDesc release];
@ -1952,6 +1975,7 @@ MVKMTLFunction MVKComputePipeline::getMTLFunction(const VkComputePipelineCreateI
_needsBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
_needsDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
_needsDispatchBaseBuffer = funcRslts.needsDispatchBaseBuffer;
_usesPhysicalStorageBufferAddressesCapability = funcRslts.usesPhysicalStorageBufferAddressesCapability;
addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageCompute);
@ -1962,6 +1986,10 @@ uint32_t MVKComputePipeline::getImplicitBufferIndex(uint32_t bufferIndexOffset)
return _device->_pMetalFeatures->maxPerStageBufferCount - (bufferIndexOffset + 1);
}
bool MVKComputePipeline::usesPhysicalStorageBufferAddressesCapability(MVKShaderStage stage) {
return _usesPhysicalStorageBufferAddressesCapability;
}
MVKComputePipeline::~MVKComputePipeline() {
@synchronized (getMTLDevice()) {
[_mtlPipelineState release];
@ -2428,7 +2456,8 @@ namespace mvk {
scr.needsDynamicOffsetBuffer,
scr.needsInputThreadgroupMem,
scr.needsDispatchBaseBuffer,
scr.needsViewRangeBuffer);
scr.needsViewRangeBuffer,
scr.usesPhysicalStorageBufferAddressesCapability);
}
}

View File

@ -18,10 +18,9 @@
#pragma once
#include "mvk_datatypes.h"
#include "MVKEnvironment.h"
#include "MVKOSExtensions.h"
#include "MVKBaseObject.h"
#include "MVKOSExtensions.h"
#include "mvk_datatypes.h"
#include <spirv_msl.hpp>
#include <unordered_map>

View File

@ -109,24 +109,26 @@ using namespace std;
#endif
#if MVK_IOS_OR_TVOS
# define MTLPixelFormatDepth24Unorm_Stencil8 MTLPixelFormatInvalid
# define MTLPixelFormatX24_Stencil8 MTLPixelFormatInvalid
# define MTLPixelFormatBC1_RGBA MTLPixelFormatInvalid
# define MTLPixelFormatBC1_RGBA_sRGB MTLPixelFormatInvalid
# define MTLPixelFormatBC2_RGBA MTLPixelFormatInvalid
# define MTLPixelFormatBC2_RGBA_sRGB MTLPixelFormatInvalid
# define MTLPixelFormatBC3_RGBA MTLPixelFormatInvalid
# define MTLPixelFormatBC3_RGBA_sRGB MTLPixelFormatInvalid
# define MTLPixelFormatBC4_RUnorm MTLPixelFormatInvalid
# define MTLPixelFormatBC4_RSnorm MTLPixelFormatInvalid
# define MTLPixelFormatBC5_RGUnorm MTLPixelFormatInvalid
# define MTLPixelFormatBC5_RGSnorm MTLPixelFormatInvalid
# define MTLPixelFormatBC6H_RGBUfloat MTLPixelFormatInvalid
# define MTLPixelFormatBC6H_RGBFloat MTLPixelFormatInvalid
# define MTLPixelFormatBC7_RGBAUnorm MTLPixelFormatInvalid
# define MTLPixelFormatBC7_RGBAUnorm_sRGB MTLPixelFormatInvalid
# if !MVK_XCODE_14_3 // iOS/tvOS 16.4
# define MTLPixelFormatBC1_RGBA MTLPixelFormatInvalid
# define MTLPixelFormatBC1_RGBA_sRGB MTLPixelFormatInvalid
# define MTLPixelFormatBC2_RGBA MTLPixelFormatInvalid
# define MTLPixelFormatBC2_RGBA_sRGB MTLPixelFormatInvalid
# define MTLPixelFormatBC3_RGBA MTLPixelFormatInvalid
# define MTLPixelFormatBC3_RGBA_sRGB MTLPixelFormatInvalid
# define MTLPixelFormatBC4_RUnorm MTLPixelFormatInvalid
# define MTLPixelFormatBC4_RSnorm MTLPixelFormatInvalid
# define MTLPixelFormatBC5_RGUnorm MTLPixelFormatInvalid
# define MTLPixelFormatBC5_RGSnorm MTLPixelFormatInvalid
# define MTLPixelFormatBC6H_RGBUfloat MTLPixelFormatInvalid
# define MTLPixelFormatBC6H_RGBFloat MTLPixelFormatInvalid
# define MTLPixelFormatBC7_RGBAUnorm MTLPixelFormatInvalid
# define MTLPixelFormatBC7_RGBAUnorm_sRGB MTLPixelFormatInvalid
# endif
# define MTLPixelFormatDepth16Unorm_Stencil8 MTLPixelFormatDepth32Float_Stencil8
# define MTLPixelFormatDepth24Unorm_Stencil8 MTLPixelFormatInvalid
# define MTLPixelFormatX24_Stencil8 MTLPixelFormatInvalid
#endif
#if MVK_TVOS
@ -1217,20 +1219,20 @@ void MVKPixelFormats::initMTLPixelFormatCapabilities() {
addMTLPixelFormatDescSRGB( ASTC_12x12_sRGB, ASTC_12x12, None, None, ASTC_12x12_LDR );
addMTLPixelFormatDesc ( ASTC_12x12_HDR, ASTC_12x12, None, None );
addMTLPixelFormatDesc ( BC1_RGBA, BC1_RGBA, None, RF );
addMTLPixelFormatDescSRGB( BC1_RGBA_sRGB, BC1_RGBA, None, RF, BC1_RGBA );
addMTLPixelFormatDesc ( BC2_RGBA, BC2_RGBA, None, RF );
addMTLPixelFormatDescSRGB( BC2_RGBA_sRGB, BC2_RGBA, None, RF, BC2_RGBA );
addMTLPixelFormatDesc ( BC3_RGBA, BC3_RGBA, None, RF );
addMTLPixelFormatDescSRGB( BC3_RGBA_sRGB, BC3_RGBA, None, RF, BC3_RGBA );
addMTLPixelFormatDesc ( BC4_RUnorm, BC4_R, None, RF );
addMTLPixelFormatDesc ( BC4_RSnorm, BC4_R, None, RF );
addMTLPixelFormatDesc ( BC5_RGUnorm, BC5_RG, None, RF );
addMTLPixelFormatDesc ( BC5_RGSnorm, BC5_RG, None, RF );
addMTLPixelFormatDesc ( BC6H_RGBUfloat, BC6H_RGB, None, RF );
addMTLPixelFormatDesc ( BC6H_RGBFloat, BC6H_RGB, None, RF );
addMTLPixelFormatDesc ( BC7_RGBAUnorm, BC7_RGBA, None, RF );
addMTLPixelFormatDescSRGB( BC7_RGBAUnorm_sRGB, BC7_RGBA, None, RF, BC7_RGBAUnorm );
addMTLPixelFormatDesc ( BC1_RGBA, BC1_RGBA, RF, RF );
addMTLPixelFormatDescSRGB( BC1_RGBA_sRGB, BC1_RGBA, RF, RF, BC1_RGBA );
addMTLPixelFormatDesc ( BC2_RGBA, BC2_RGBA, RF, RF );
addMTLPixelFormatDescSRGB( BC2_RGBA_sRGB, BC2_RGBA, RF, RF, BC2_RGBA );
addMTLPixelFormatDesc ( BC3_RGBA, BC3_RGBA, RF, RF );
addMTLPixelFormatDescSRGB( BC3_RGBA_sRGB, BC3_RGBA, RF, RF, BC3_RGBA );
addMTLPixelFormatDesc ( BC4_RUnorm, BC4_R, RF, RF );
addMTLPixelFormatDesc ( BC4_RSnorm, BC4_R, RF, RF );
addMTLPixelFormatDesc ( BC5_RGUnorm, BC5_RG, RF, RF );
addMTLPixelFormatDesc ( BC5_RGSnorm, BC5_RG, RF, RF );
addMTLPixelFormatDesc ( BC6H_RGBUfloat, BC6H_RGB, RF, RF );
addMTLPixelFormatDesc ( BC6H_RGBFloat, BC6H_RGB, RF, RF );
addMTLPixelFormatDesc ( BC7_RGBAUnorm, BC7_RGBA, RF, RF );
addMTLPixelFormatDescSRGB( BC7_RGBAUnorm_sRGB, BC7_RGBA, RF, RF, BC7_RGBAUnorm );
// YUV pixel formats
addMTLPixelFormatDesc ( GBGR422, None, RF, RF );
@ -1450,10 +1452,10 @@ void MVKPixelFormats::modifyMTLFormatCapabilities() {
// Mac Catalyst does not support feature sets, so we redefine them to GPU families in MVKDevice.h.
#if MVK_MACCAT
#define addFeatSetMTLPixFmtCaps(FEAT_SET, MTL_FMT, CAPS) \
addMTLPixelFormatCapabilities(mtlDevice, MTLFeatureSet_ ##FEAT_SET, 10.16, MTLPixelFormat ##MTL_FMT, kMVKMTLFmtCaps ##CAPS)
addMTLPixelFormatCapabilities(mtlDevice, MTLFeatureSet_ ##FEAT_SET, 11.0, MTLPixelFormat ##MTL_FMT, kMVKMTLFmtCaps ##CAPS)
#define addFeatSetMTLVtxFmtCaps(FEAT_SET, MTL_FMT, CAPS) \
addMTLVertexFormatCapabilities(mtlDevice, MTLFeatureSet_ ##FEAT_SET, 10.16, MTLVertexFormat ##MTL_FMT, kMVKMTLFmtCaps ##CAPS)
addMTLVertexFormatCapabilities(mtlDevice, MTLFeatureSet_ ##FEAT_SET, 11.0, MTLVertexFormat ##MTL_FMT, kMVKMTLFmtCaps ##CAPS)
#else
#define addFeatSetMTLPixFmtCaps(FEAT_SET, MTL_FMT, CAPS) \
@ -1477,20 +1479,23 @@ void MVKPixelFormats::modifyMTLFormatCapabilities() {
addMTLVertexFormatCapabilities(mtlDevice, MTLGPUFamily ##GPU_FAM, OS_VER, MTLVertexFormat ##MTL_FMT, kMVKMTLFmtCaps ##CAPS)
// Modifies the format capability tables based on the capabilities of the specific MTLDevice
#if MVK_MACOS
void MVKPixelFormats::modifyMTLFormatCapabilities(id<MTLDevice> mtlDevice) {
addFeatSetMTLPixFmtCaps( macOS_GPUFamily1_v1, R32Uint, Atomic );
addFeatSetMTLPixFmtCaps( macOS_GPUFamily1_v1, R32Sint, Atomic );
if (mtlDevice.isDepth24Stencil8PixelFormatSupported) {
addFeatSetMTLPixFmtCaps( macOS_GPUFamily1_v1, Depth24Unorm_Stencil8, DRFMR );
if ( !mvkSupportsBCTextureCompression(mtlDevice) ) {
disableAllMTLPixFmtCaps( BC1_RGBA );
disableAllMTLPixFmtCaps( BC1_RGBA_sRGB );
disableAllMTLPixFmtCaps( BC2_RGBA );
disableAllMTLPixFmtCaps( BC2_RGBA_sRGB );
disableAllMTLPixFmtCaps( BC3_RGBA );
disableAllMTLPixFmtCaps( BC3_RGBA_sRGB );
disableAllMTLPixFmtCaps( BC4_RUnorm );
disableAllMTLPixFmtCaps( BC4_RSnorm );
disableAllMTLPixFmtCaps( BC5_RGUnorm );
disableAllMTLPixFmtCaps( BC5_RGSnorm );
disableAllMTLPixFmtCaps( BC6H_RGBUfloat );
disableAllMTLPixFmtCaps( BC6H_RGBFloat );
disableAllMTLPixFmtCaps( BC7_RGBAUnorm );
disableAllMTLPixFmtCaps( BC7_RGBAUnorm_sRGB );
}
addFeatSetMTLPixFmtCaps( macOS_GPUFamily1_v2, Depth16Unorm, DRFMR );
addFeatSetMTLPixFmtCaps( macOS_GPUFamily1_v3, BGR10A2Unorm, RFCMRB );
#if MVK_XCODE_12
if ([mtlDevice respondsToSelector: @selector(supports32BitMSAA)] &&
!mtlDevice.supports32BitMSAA) {
@ -1522,104 +1527,101 @@ void MVKPixelFormats::modifyMTLFormatCapabilities(id<MTLDevice> mtlDevice) {
disableMTLPixFmtCaps( RG32Float, Filter );
disableMTLPixFmtCaps( RGBA32Float, Filter );
}
#endif
if ( !mvkSupportsBCTextureCompression(mtlDevice) ) {
disableAllMTLPixFmtCaps( BC1_RGBA );
disableAllMTLPixFmtCaps( BC1_RGBA_sRGB );
disableAllMTLPixFmtCaps( BC2_RGBA );
disableAllMTLPixFmtCaps( BC2_RGBA_sRGB );
disableAllMTLPixFmtCaps( BC3_RGBA );
disableAllMTLPixFmtCaps( BC3_RGBA_sRGB );
disableAllMTLPixFmtCaps( BC4_RUnorm );
disableAllMTLPixFmtCaps( BC4_RSnorm );
disableAllMTLPixFmtCaps( BC5_RGUnorm );
disableAllMTLPixFmtCaps( BC5_RGSnorm );
disableAllMTLPixFmtCaps( BC6H_RGBUfloat );
disableAllMTLPixFmtCaps( BC6H_RGBFloat );
disableAllMTLPixFmtCaps( BC7_RGBAUnorm );
disableAllMTLPixFmtCaps( BC7_RGBAUnorm_sRGB );
#if MVK_MACOS
addFeatSetMTLPixFmtCaps( macOS_GPUFamily1_v1, R32Uint, Atomic );
addFeatSetMTLPixFmtCaps( macOS_GPUFamily1_v1, R32Sint, Atomic );
if (mtlDevice.isDepth24Stencil8PixelFormatSupported) {
addFeatSetMTLPixFmtCaps( macOS_GPUFamily1_v1, Depth24Unorm_Stencil8, DRFMR );
}
addGPUOSMTLPixFmtCaps( Apple5, 10.16, R8Unorm_sRGB, All );
addFeatSetMTLPixFmtCaps( macOS_GPUFamily1_v2, Depth16Unorm, DRFMR );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, RG8Unorm_sRGB, All );
addFeatSetMTLPixFmtCaps( macOS_GPUFamily1_v3, BGR10A2Unorm, RFCMRB );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, B5G6R5Unorm, RFCMRB );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, A1BGR5Unorm, RFCMRB );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ABGR4Unorm, RFCMRB );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, BGR5A1Unorm, RFCMRB );
#if MVK_XCODE_12
addGPUOSMTLPixFmtCaps( Apple5, 11.0, R8Unorm_sRGB, All );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, RGBA8Unorm_sRGB, All );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, BGRA8Unorm_sRGB, All );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, RG8Unorm_sRGB, All );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, B5G6R5Unorm, RFCMRB );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, A1BGR5Unorm, RFCMRB );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ABGR4Unorm, RFCMRB );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, BGR5A1Unorm, RFCMRB );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, RGBA8Unorm_sRGB, All );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, BGRA8Unorm_sRGB, All );
// Blending is actually supported for this format, but format channels cannot be individually write-enabled during blending.
// Disabling blending is the least-intrusive way to handle this in a Vulkan-friendly way.
addGPUOSMTLPixFmtCaps( Apple5, 10.16, RGB9E5Float, All );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, RGB9E5Float, All );
disableMTLPixFmtCaps ( RGB9E5Float, Blend);
addGPUOSMTLPixFmtCaps( Apple5, 10.16, PVRTC_RGBA_2BPP, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, PVRTC_RGBA_2BPP_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, PVRTC_RGBA_4BPP, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, PVRTC_RGBA_4BPP_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, PVRTC_RGBA_2BPP, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, PVRTC_RGBA_2BPP_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, PVRTC_RGBA_4BPP, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, PVRTC_RGBA_4BPP_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ETC2_RGB8, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ETC2_RGB8_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ETC2_RGB8A1, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ETC2_RGB8A1_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, EAC_RGBA8, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, EAC_RGBA8_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, EAC_R11Unorm, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, EAC_R11Snorm, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, EAC_RG11Unorm, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, EAC_RG11Snorm, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ETC2_RGB8, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ETC2_RGB8_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ETC2_RGB8A1, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ETC2_RGB8A1_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, EAC_RGBA8, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, EAC_RGBA8_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, EAC_R11Unorm, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, EAC_R11Snorm, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, EAC_RG11Unorm, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, EAC_RG11Snorm, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_4x4_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_4x4_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_4x4_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_5x4_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_5x4_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_5x4_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_5x5_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_5x5_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_5x5_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_6x5_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_6x5_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_6x5_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_6x6_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_6x6_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_6x6_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_8x5_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_8x5_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_8x5_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_8x6_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_8x6_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_8x6_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_8x8_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_8x8_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_8x8_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_10x5_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_10x5_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_10x5_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_10x6_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_10x6_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_10x6_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_10x8_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_10x8_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_10x8_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_10x10_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_10x10_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_10x10_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_12x10_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_12x10_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_12x10_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_12x12_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, ASTC_12x12_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 10.16, ASTC_12x12_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_4x4_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_4x4_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_4x4_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_5x4_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_5x4_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_5x4_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_5x5_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_5x5_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_5x5_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_6x5_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_6x5_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_6x5_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_6x6_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_6x6_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_6x6_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_8x5_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_8x5_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_8x5_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_8x6_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_8x6_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_8x6_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_8x8_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_8x8_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_8x8_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_10x5_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_10x5_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_10x5_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_10x6_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_10x6_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_10x6_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_10x8_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_10x8_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_10x8_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_10x10_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_10x10_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_10x10_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_12x10_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_12x10_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_12x10_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_12x12_LDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, ASTC_12x12_sRGB, RF );
addGPUOSMTLPixFmtCaps( Apple6, 11.0, ASTC_12x12_HDR, RF );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, BGRA10_XR, All );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, BGRA10_XR_sRGB, All );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, BGR10_XR, All );
addGPUOSMTLPixFmtCaps( Apple5, 10.16, BGR10_XR_sRGB, All );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, BGRA10_XR, All );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, BGRA10_XR_sRGB, All );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, BGR10_XR, All );
addGPUOSMTLPixFmtCaps( Apple5, 11.0, BGR10_XR_sRGB, All );
#endif
addFeatSetMTLVtxFmtCaps( macOS_GPUFamily1_v3, UCharNormalized, Vertex );
@ -1632,11 +1634,9 @@ void MVKPixelFormats::modifyMTLFormatCapabilities(id<MTLDevice> mtlDevice) {
addFeatSetMTLVtxFmtCaps( macOS_GPUFamily1_v3, Short, Vertex );
addFeatSetMTLVtxFmtCaps( macOS_GPUFamily1_v3, Half, Vertex );
addFeatSetMTLVtxFmtCaps( macOS_GPUFamily1_v3, UChar4Normalized_BGRA, Vertex );
}
#endif
#if MVK_TVOS
void MVKPixelFormats::modifyMTLFormatCapabilities(id<MTLDevice> mtlDevice) {
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v2, R8Unorm_sRGB, All );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily2_v1, R8Unorm_sRGB, All );
@ -1675,43 +1675,43 @@ void MVKPixelFormats::modifyMTLFormatCapabilities(id<MTLDevice> mtlDevice) {
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, RGBA32Sint, RWC );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, RGBA32Float, RWC );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_4x4_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_4x4_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_5x4_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_5x4_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_5x5_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_5x5_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_6x5_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_6x5_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_6x6_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_6x6_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_8x5_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_8x5_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_8x6_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_8x6_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_8x8_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_8x8_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_10x5_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_10x5_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_10x6_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_10x6_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_10x8_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_10x8_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_10x10_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_10x10_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_12x10_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_12x10_sRGB, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_12x12_LDR, RF );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily1_v1, ASTC_12x12_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_4x4_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_4x4_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_5x4_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_5x4_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_5x5_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_5x5_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_6x5_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_6x5_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_6x6_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_6x6_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_8x5_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_8x5_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_8x6_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_8x6_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_8x8_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_8x8_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_10x5_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_10x5_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_10x6_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_10x6_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_10x8_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_10x8_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_10x10_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_10x10_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_12x10_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_12x10_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_12x12_LDR, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily1_v1, ASTC_12x12_sRGB, RF );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily2_v1, Depth32Float, DRMR );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily2_v1, Depth32Float_Stencil8, DRMR );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily2_v1, Stencil8, DRMR );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily2_v1, BGRA10_XR, All );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily2_v1, BGRA10_XR_sRGB, All );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily2_v1, BGR10_XR, All );
addFeatSetMTLPixFmtCaps(tvOS_GPUFamily2_v1, BGR10_XR_sRGB, All );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily2_v1, BGRA10_XR, All );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily2_v1, BGRA10_XR_sRGB, All );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily2_v1, BGR10_XR, All );
addFeatSetMTLPixFmtCaps( tvOS_GPUFamily2_v1, BGR10_XR_sRGB, All );
addGPUOSMTLPixFmtCaps( Apple1, 13.0, Depth16Unorm, DRFM );
addGPUOSMTLPixFmtCaps( Apple3, 13.0, Depth16Unorm, DRFMR );
@ -1732,53 +1732,51 @@ void MVKPixelFormats::modifyMTLFormatCapabilities(id<MTLDevice> mtlDevice) {
#if MVK_OS_SIMULATOR
if (!([mtlDevice respondsToSelector: @selector(supportsFamily:)] &&
[mtlDevice supportsFamily: MTLGPUFamilyApple5])) {
disableAllMTLPixFmtCaps(R8Unorm_sRGB);
disableAllMTLPixFmtCaps(RG8Unorm_sRGB);
disableAllMTLPixFmtCaps(B5G6R5Unorm);
disableAllMTLPixFmtCaps(A1BGR5Unorm);
disableAllMTLPixFmtCaps(ABGR4Unorm);
disableAllMTLPixFmtCaps(BGR5A1Unorm);
disableAllMTLPixFmtCaps( R8Unorm_sRGB );
disableAllMTLPixFmtCaps( RG8Unorm_sRGB );
disableAllMTLPixFmtCaps( B5G6R5Unorm );
disableAllMTLPixFmtCaps( A1BGR5Unorm );
disableAllMTLPixFmtCaps( ABGR4Unorm );
disableAllMTLPixFmtCaps( BGR5A1Unorm );
disableAllMTLPixFmtCaps(BGRA10_XR);
disableAllMTLPixFmtCaps(BGRA10_XR_sRGB);
disableAllMTLPixFmtCaps(BGR10_XR);
disableAllMTLPixFmtCaps(BGR10_XR_sRGB);
disableAllMTLPixFmtCaps( BGRA10_XR );
disableAllMTLPixFmtCaps( BGRA10_XR_sRGB );
disableAllMTLPixFmtCaps( BGR10_XR );
disableAllMTLPixFmtCaps( BGR10_XR_sRGB );
disableAllMTLPixFmtCaps(GBGR422);
disableAllMTLPixFmtCaps(BGRG422);
disableAllMTLPixFmtCaps( GBGR422 );
disableAllMTLPixFmtCaps( BGRG422 );
disableMTLPixFmtCaps(RGB9E5Float, ColorAtt);
disableMTLPixFmtCaps( RGB9E5Float, ColorAtt );
disableMTLPixFmtCaps(R8Unorm_sRGB, Write);
disableMTLPixFmtCaps(RG8Unorm_sRGB, Write);
disableMTLPixFmtCaps(RGBA8Unorm_sRGB, Write);
disableMTLPixFmtCaps(BGRA8Unorm_sRGB, Write);
disableMTLPixFmtCaps(PVRTC_RGBA_2BPP_sRGB, Write);
disableMTLPixFmtCaps(PVRTC_RGBA_4BPP_sRGB, Write);
disableMTLPixFmtCaps(ETC2_RGB8_sRGB, Write);
disableMTLPixFmtCaps(ETC2_RGB8A1_sRGB, Write);
disableMTLPixFmtCaps(EAC_RGBA8_sRGB, Write);
disableMTLPixFmtCaps(ASTC_4x4_sRGB, Write);
disableMTLPixFmtCaps(ASTC_5x4_sRGB, Write);
disableMTLPixFmtCaps(ASTC_5x5_sRGB, Write);
disableMTLPixFmtCaps(ASTC_6x5_sRGB, Write);
disableMTLPixFmtCaps(ASTC_6x6_sRGB, Write);
disableMTLPixFmtCaps(ASTC_8x5_sRGB, Write);
disableMTLPixFmtCaps(ASTC_8x6_sRGB, Write);
disableMTLPixFmtCaps(ASTC_8x8_sRGB, Write);
disableMTLPixFmtCaps(ASTC_10x5_sRGB, Write);
disableMTLPixFmtCaps(ASTC_10x6_sRGB, Write);
disableMTLPixFmtCaps(ASTC_10x8_sRGB, Write);
disableMTLPixFmtCaps(ASTC_10x10_sRGB, Write);
disableMTLPixFmtCaps(ASTC_12x10_sRGB, Write);
disableMTLPixFmtCaps(ASTC_12x12_sRGB, Write);
disableMTLPixFmtCaps( R8Unorm_sRGB, Write );
disableMTLPixFmtCaps( RG8Unorm_sRGB, Write );
disableMTLPixFmtCaps( RGBA8Unorm_sRGB, Write );
disableMTLPixFmtCaps( BGRA8Unorm_sRGB, Write );
disableMTLPixFmtCaps( PVRTC_RGBA_2BPP_sRGB, Write );
disableMTLPixFmtCaps( PVRTC_RGBA_4BPP_sRGB, Write );
disableMTLPixFmtCaps( ETC2_RGB8_sRGB, Write );
disableMTLPixFmtCaps( ETC2_RGB8A1_sRGB, Write );
disableMTLPixFmtCaps( EAC_RGBA8_sRGB, Write );
disableMTLPixFmtCaps( ASTC_4x4_sRGB, Write );
disableMTLPixFmtCaps( ASTC_5x4_sRGB, Write );
disableMTLPixFmtCaps( ASTC_5x5_sRGB, Write );
disableMTLPixFmtCaps( ASTC_6x5_sRGB, Write );
disableMTLPixFmtCaps( ASTC_6x6_sRGB, Write );
disableMTLPixFmtCaps( ASTC_8x5_sRGB, Write );
disableMTLPixFmtCaps( ASTC_8x6_sRGB, Write );
disableMTLPixFmtCaps( ASTC_8x8_sRGB, Write );
disableMTLPixFmtCaps( ASTC_10x5_sRGB, Write );
disableMTLPixFmtCaps( ASTC_10x6_sRGB, Write );
disableMTLPixFmtCaps( ASTC_10x8_sRGB, Write );
disableMTLPixFmtCaps( ASTC_10x10_sRGB, Write );
disableMTLPixFmtCaps( ASTC_12x10_sRGB, Write );
disableMTLPixFmtCaps( ASTC_12x12_sRGB, Write );
}
#endif
}
#endif
#if MVK_IOS
void MVKPixelFormats::modifyMTLFormatCapabilities(id<MTLDevice> mtlDevice) {
addFeatSetMTLPixFmtCaps( iOS_GPUFamily2_v3, R8Unorm_sRGB, All );
addFeatSetMTLPixFmtCaps( iOS_GPUFamily3_v1, R8Unorm_sRGB, All );
@ -1891,50 +1889,50 @@ void MVKPixelFormats::modifyMTLFormatCapabilities(id<MTLDevice> mtlDevice) {
#if MVK_OS_SIMULATOR
if (!([mtlDevice respondsToSelector: @selector(supportsFamily:)] &&
[mtlDevice supportsFamily: MTLGPUFamilyApple5])) {
disableAllMTLPixFmtCaps(R8Unorm_sRGB);
disableAllMTLPixFmtCaps(RG8Unorm_sRGB);
disableAllMTLPixFmtCaps(B5G6R5Unorm);
disableAllMTLPixFmtCaps(A1BGR5Unorm);
disableAllMTLPixFmtCaps(ABGR4Unorm);
disableAllMTLPixFmtCaps(BGR5A1Unorm);
disableAllMTLPixFmtCaps( R8Unorm_sRGB );
disableAllMTLPixFmtCaps( RG8Unorm_sRGB );
disableAllMTLPixFmtCaps( B5G6R5Unorm );
disableAllMTLPixFmtCaps( A1BGR5Unorm );
disableAllMTLPixFmtCaps( ABGR4Unorm );
disableAllMTLPixFmtCaps( BGR5A1Unorm );
disableAllMTLPixFmtCaps(BGRA10_XR);
disableAllMTLPixFmtCaps(BGRA10_XR_sRGB);
disableAllMTLPixFmtCaps(BGR10_XR);
disableAllMTLPixFmtCaps(BGR10_XR_sRGB);
disableAllMTLPixFmtCaps( BGRA10_XR );
disableAllMTLPixFmtCaps( BGRA10_XR_sRGB );
disableAllMTLPixFmtCaps( BGR10_XR );
disableAllMTLPixFmtCaps( BGR10_XR_sRGB );
disableAllMTLPixFmtCaps(GBGR422);
disableAllMTLPixFmtCaps(BGRG422);
disableAllMTLPixFmtCaps( GBGR422 );
disableAllMTLPixFmtCaps( BGRG422 );
disableMTLPixFmtCaps(RGB9E5Float, ColorAtt);
disableMTLPixFmtCaps( RGB9E5Float, ColorAtt );
disableMTLPixFmtCaps(R8Unorm_sRGB, Write);
disableMTLPixFmtCaps(RG8Unorm_sRGB, Write);
disableMTLPixFmtCaps(RGBA8Unorm_sRGB, Write);
disableMTLPixFmtCaps(BGRA8Unorm_sRGB, Write);
disableMTLPixFmtCaps(PVRTC_RGBA_2BPP_sRGB, Write);
disableMTLPixFmtCaps(PVRTC_RGBA_4BPP_sRGB, Write);
disableMTLPixFmtCaps(ETC2_RGB8_sRGB, Write);
disableMTLPixFmtCaps(ETC2_RGB8A1_sRGB, Write);
disableMTLPixFmtCaps(EAC_RGBA8_sRGB, Write);
disableMTLPixFmtCaps(ASTC_4x4_sRGB, Write);
disableMTLPixFmtCaps(ASTC_5x4_sRGB, Write);
disableMTLPixFmtCaps(ASTC_5x5_sRGB, Write);
disableMTLPixFmtCaps(ASTC_6x5_sRGB, Write);
disableMTLPixFmtCaps(ASTC_6x6_sRGB, Write);
disableMTLPixFmtCaps(ASTC_8x5_sRGB, Write);
disableMTLPixFmtCaps(ASTC_8x6_sRGB, Write);
disableMTLPixFmtCaps(ASTC_8x8_sRGB, Write);
disableMTLPixFmtCaps(ASTC_10x5_sRGB, Write);
disableMTLPixFmtCaps(ASTC_10x6_sRGB, Write);
disableMTLPixFmtCaps(ASTC_10x8_sRGB, Write);
disableMTLPixFmtCaps(ASTC_10x10_sRGB, Write);
disableMTLPixFmtCaps(ASTC_12x10_sRGB, Write);
disableMTLPixFmtCaps(ASTC_12x12_sRGB, Write);
disableMTLPixFmtCaps( R8Unorm_sRGB, Write );
disableMTLPixFmtCaps( RG8Unorm_sRGB, Write );
disableMTLPixFmtCaps( RGBA8Unorm_sRGB, Write );
disableMTLPixFmtCaps( BGRA8Unorm_sRGB, Write );
disableMTLPixFmtCaps( PVRTC_RGBA_2BPP_sRGB, Write );
disableMTLPixFmtCaps( PVRTC_RGBA_4BPP_sRGB, Write );
disableMTLPixFmtCaps( ETC2_RGB8_sRGB, Write );
disableMTLPixFmtCaps( ETC2_RGB8A1_sRGB, Write );
disableMTLPixFmtCaps( EAC_RGBA8_sRGB, Write );
disableMTLPixFmtCaps( ASTC_4x4_sRGB, Write );
disableMTLPixFmtCaps( ASTC_5x4_sRGB, Write );
disableMTLPixFmtCaps( ASTC_5x5_sRGB, Write );
disableMTLPixFmtCaps( ASTC_6x5_sRGB, Write );
disableMTLPixFmtCaps( ASTC_6x6_sRGB, Write );
disableMTLPixFmtCaps( ASTC_8x5_sRGB, Write );
disableMTLPixFmtCaps( ASTC_8x6_sRGB, Write );
disableMTLPixFmtCaps( ASTC_8x8_sRGB, Write );
disableMTLPixFmtCaps( ASTC_10x5_sRGB, Write );
disableMTLPixFmtCaps( ASTC_10x6_sRGB, Write );
disableMTLPixFmtCaps( ASTC_10x8_sRGB, Write );
disableMTLPixFmtCaps( ASTC_10x10_sRGB, Write );
disableMTLPixFmtCaps( ASTC_12x10_sRGB, Write );
disableMTLPixFmtCaps( ASTC_12x12_sRGB, Write );
}
#endif
}
#endif
}
#undef addFeatSetMTLPixFmtCaps
#undef addGPUOSMTLPixFmtCaps

View File

@ -284,7 +284,7 @@ id<MTLBuffer> MVKOcclusionQueryPool::getResultBuffer(MVKCommandEncoder*, uint32_
}
id<MTLComputeCommandEncoder> MVKOcclusionQueryPool::encodeComputeCopyResults(MVKCommandEncoder* cmdEncoder, uint32_t firstQuery, uint32_t, uint32_t index) {
id<MTLComputeCommandEncoder> mtlCmdEnc = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseCopyQueryPoolResults);
id<MTLComputeCommandEncoder> mtlCmdEnc = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseCopyQueryPoolResults, true);
[mtlCmdEnc setBuffer: getVisibilityResultMTLBuffer() offset: getVisibilityResultOffset(firstQuery) atIndex: index];
return mtlCmdEnc;
}
@ -316,7 +316,9 @@ MVKOcclusionQueryPool::MVKOcclusionQueryPool(MVKDevice* device,
VkDeviceSize newBuffLen = min(reqBuffLen, maxBuffLen);
if (reqBuffLen > maxBuffLen) {
reportError(VK_ERROR_OUT_OF_DEVICE_MEMORY, "vkCreateQueryPool(): Each query pool can support a maximum of %d queries.", uint32_t(newBuffLen / kMVKQuerySlotSizeInBytes));
reportError(VK_ERROR_OUT_OF_DEVICE_MEMORY,
"vkCreateQueryPool(): Each occlusion query pool can support a maximum of %d queries.",
uint32_t(newBuffLen / kMVKQuerySlotSizeInBytes));
}
NSUInteger mtlBuffLen = mvkAlignByteCount(newBuffLen, _device->_pMetalFeatures->mtlBufferAlignment);
@ -356,9 +358,9 @@ void MVKGPUCounterQueryPool::initMTLCounterSampleBuffer(const VkQueryPoolCreateI
NSError* err = nil;
_mtlCounterBuffer = [getMTLDevice() newCounterSampleBufferWithDescriptor: tsDesc error: &err];
if (err) {
setConfigurationResult(reportError(VK_ERROR_INITIALIZATION_FAILED,
"Could not create MTLCounterSampleBuffer for query pool of type %s. Reverting to emulated behavior. (Error code %li): %s",
queryTypeName, (long)err.code, err.localizedDescription.UTF8String));
reportError(VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Could not create MTLCounterSampleBuffer of size %llu, for %d queries, in query pool of type %s. Reverting to emulated behavior. (Error code %li): %s",
(VkDeviceSize)pCreateInfo->queryCount * kMVKQuerySlotSizeInBytes, pCreateInfo->queryCount, queryTypeName, (long)err.code, err.localizedDescription.UTF8String);
}
}
};
@ -432,12 +434,12 @@ id<MTLComputeCommandEncoder> MVKTimestampQueryPool::encodeComputeCopyResults(MVK
destinationBuffer: tempBuff->_mtlBuffer
destinationOffset: tempBuff->_offset];
id<MTLComputeCommandEncoder> mtlCmdEnc = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseCopyQueryPoolResults);
id<MTLComputeCommandEncoder> mtlCmdEnc = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseCopyQueryPoolResults, true);
[mtlCmdEnc setBuffer: tempBuff->_mtlBuffer offset: tempBuff->_offset atIndex: index];
return mtlCmdEnc;
} else {
// We can set the timestamp bytes into the compute encoder.
id<MTLComputeCommandEncoder> mtlCmdEnc = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseCopyQueryPoolResults);
id<MTLComputeCommandEncoder> mtlCmdEnc = cmdEncoder->getMTLComputeEncoder(kMVKCommandUseCopyQueryPoolResults, true);
cmdEncoder->setComputeBytes(mtlCmdEnc, &_timestamps[firstQuery], queryCount * _queryElementCount * sizeof(uint64_t), index);
return mtlCmdEnc;
}

View File

@ -621,7 +621,7 @@ MVKQueuePresentSurfaceSubmission::MVKQueuePresentSurfaceSubmission(MVKQueue* que
_presentInfo.reserve(scCnt);
for (uint32_t scIdx = 0; scIdx < scCnt; scIdx++) {
MVKSwapchain* mvkSC = (MVKSwapchain*)pPresentInfo->pSwapchains[scIdx];
MVKImagePresentInfo presentInfo = {};
MVKImagePresentInfo presentInfo = {}; // Start with everything zeroed
presentInfo.presentableImage = mvkSC->getPresentableImage(pPresentInfo->pImageIndices[scIdx]);
presentInfo.presentMode = pPresentModes ? pPresentModes[scIdx] : VK_PRESENT_MODE_MAX_ENUM_KHR;
presentInfo.fence = pFences ? (MVKFence*)pFences[scIdx] : nullptr;
@ -629,8 +629,6 @@ MVKQueuePresentSurfaceSubmission::MVKQueuePresentSurfaceSubmission(MVKQueue* que
presentInfo.hasPresentTime = true;
presentInfo.presentID = pPresentTimes[scIdx].presentID;
presentInfo.desiredPresentTime = pPresentTimes[scIdx].desiredPresentTime;
} else {
presentInfo.hasPresentTime = false;
}
_presentInfo.push_back(presentInfo);
VkResult scRslt = mvkSC->getSurfaceStatus();

View File

@ -54,9 +54,6 @@ public:
/** Returns whether this subpass has any color attachments. */
bool hasColorAttachments();
/** Returns whether this subpass has a depth/stencil attachment. */
bool hasDepthStencilAttachment() { return _depthStencilAttachment.attachment != VK_ATTACHMENT_UNUSED; }
/** Returns the number of color attachments, which may be zero for depth-only rendering. */
uint32_t getColorAttachmentCount() { return uint32_t(_colorAttachments.size()); }
@ -69,8 +66,17 @@ public:
/** Returns whether or not the color attachment is used as both a color attachment and an input attachment. */
bool isColorAttachmentAlsoInputAttachment(uint32_t colorAttIdx);
/** Returns the format of the depth/stencil attachment. */
VkFormat getDepthStencilFormat();
/** Returns whether or not the depth attachment is being used. */
bool isDepthAttachmentUsed() { return _depthAttachment.attachment != VK_ATTACHMENT_UNUSED; }
/** Returns whether or not the stencil attachment is being used. */
bool isStencilAttachmentUsed() { return _stencilAttachment.attachment != VK_ATTACHMENT_UNUSED; }
/** Return the depth attachment format. */
VkFormat getDepthFormat();
/** Return the stencil attachment format. */
VkFormat getStencilFormat();
/** Returns the Vulkan sample count of the attachments used in this subpass. */
VkSampleCountFlagBits getSampleCount();
@ -146,10 +152,11 @@ public:
MVKRenderSubpass(MVKRenderPass* renderPass, const VkSubpassDescription2* pCreateInfo);
private:
MVKRenderSubpass(MVKRenderPass* renderPass, const VkRenderingInfo* pRenderingInfo);
protected:
friend class MVKRenderPass;
friend class MVKRenderPassAttachment;
friend class MVKAttachmentDescription;
uint32_t getViewMaskGroupForMetalPass(uint32_t passIdx);
MVKMTLFmtCaps getRequiredFormatCapabilitiesForAttachmentAt(uint32_t rpAttIdx);
@ -162,8 +169,10 @@ private:
MVKSmallVector<uint32_t, kMVKDefaultAttachmentCount> _preserveAttachments;
MVKSmallVector<VkFormat, kMVKDefaultAttachmentCount> _colorAttachmentFormats;
VkPipelineRenderingCreateInfo _pipelineRenderingCreateInfo;
VkAttachmentReference2 _depthStencilAttachment;
VkAttachmentReference2 _depthStencilResolveAttachment;
VkAttachmentReference2 _depthAttachment;
VkAttachmentReference2 _stencilAttachment;
VkAttachmentReference2 _depthResolveAttachment;
VkAttachmentReference2 _stencilResolveAttachment;
VkResolveModeFlagBits _depthResolveMode = VK_RESOLVE_MODE_NONE;
VkResolveModeFlagBits _stencilResolveMode = VK_RESOLVE_MODE_NONE;
VkSampleCountFlagBits _defaultSampleCount = VK_SAMPLE_COUNT_1_BIT;
@ -172,10 +181,10 @@ private:
#pragma mark -
#pragma mark MVKRenderPassAttachment
#pragma mark MVKAttachmentDescription
/** Represents an attachment within a Vulkan render pass. */
class MVKRenderPassAttachment : public MVKBaseObject {
class MVKAttachmentDescription : public MVKBaseObject {
public:
@ -218,13 +227,20 @@ public:
/** Returns whether this attachment should be cleared in the subpass. */
bool shouldClearAttachment(MVKRenderSubpass* subpass, bool isStencil);
MVKRenderPassAttachment(MVKRenderPass* renderPass,
MVKAttachmentDescription(MVKRenderPass* renderPass,
const VkAttachmentDescription* pCreateInfo);
MVKRenderPassAttachment(MVKRenderPass* renderPass,
MVKAttachmentDescription(MVKRenderPass* renderPass,
const VkAttachmentDescription2* pCreateInfo);
MVKAttachmentDescription(MVKRenderPass* renderPass,
const VkRenderingAttachmentInfo* pAttInfo,
bool isResolveAttachment);
protected:
friend class MVKRenderPass;
friend class MVKRenderSubpass;
bool isFirstUseOfAttachment(MVKRenderSubpass* subpass);
bool isLastUseOfAttachment(MVKRenderSubpass* subpass);
MTLStoreAction getMTLStoreAction(MVKRenderSubpass* subpass,
@ -234,7 +250,7 @@ protected:
bool canResolveFormat,
bool isStencil,
bool storeOverride);
void validateFormat();
void linkToSubpasses();
VkAttachmentDescription2 _info;
MVKRenderPass* _renderPass;
@ -282,13 +298,15 @@ public:
MVKRenderPass(MVKDevice* device, const VkRenderPassCreateInfo2* pCreateInfo);
MVKRenderPass(MVKDevice* device, const VkRenderingInfo* pRenderingInfo);
protected:
friend class MVKRenderSubpass;
friend class MVKRenderPassAttachment;
friend class MVKAttachmentDescription;
void propagateDebugName() override {}
MVKSmallVector<MVKRenderPassAttachment> _attachments;
MVKSmallVector<MVKAttachmentDescription> _attachments;
MVKSmallVector<MVKRenderSubpass> _subpasses;
MVKSmallVector<VkSubpassDependency2> _subpassDependencies;
VkRenderingFlags _renderingFlags = 0;
@ -297,21 +315,47 @@ protected:
#pragma mark -
#pragma mark Support functions
#pragma mark MVKRenderingAttachmentIterator
/** Returns a MVKRenderPass object created from the rendering info. */
MVKRenderPass* mvkCreateRenderPass(MVKDevice* device, const VkRenderingInfo* pRenderingInfo);
typedef std::function<void(const VkRenderingAttachmentInfo* pAttInfo,
VkImageAspectFlagBits aspect,
bool isResolveAttachment)> MVKRenderingAttachmentInfoOperation;
/**
* Extracts the usable attachments and their clear values from the rendering info,
* and sets them in the corresponding arrays, which must be large enough to hold
* all of the extracted values, and returns the number of attachments extracted.
* For consistency, the clear value of any resolve attachments are populated,
* even though they are ignored.
* Iterates the attachments in a VkRenderingInfo, and processes an operation
* on each attachment, once for the imageView, and once for the resolveImageView.
*
* Attachments are sequentially processed in this order:
* [color, color-resolve], ...,
* depth, depth-resolve,
* stencil, stencil-resolve
* skipping any attachments that do not have a VkImageView
*/
uint32_t mvkGetAttachments(const VkRenderingInfo* pRenderingInfo,
MVKImageView* attachments[],
VkClearValue clearValues[]);
class MVKRenderingAttachmentIterator : public MVKBaseObject {
public:
MVKVulkanAPIObject* getVulkanAPIObject() override { return nullptr; }
/** Iterates the attachments with the specified lambda function. */
void iterate(MVKRenderingAttachmentInfoOperation attOperation);
MVKRenderingAttachmentIterator(const VkRenderingInfo* pRenderingInfo);
protected:
void handleAttachment(const VkRenderingAttachmentInfo* pAttInfo,
VkImageAspectFlagBits aspect,
MVKRenderingAttachmentInfoOperation attOperation);
const VkRenderingAttachmentInfo* getAttachmentInfo(const VkRenderingAttachmentInfo* pAtt,
const VkRenderingAttachmentInfo* pAltAtt,
bool isStencil);
VkRenderingInfo _renderingInfo;
};
#pragma mark -
#pragma mark Support functions
/** Returns whether the view mask uses multiview. */
static constexpr bool mvkIsMultiview(uint32_t viewMask) { return viewMask != 0; }
@ -322,9 +366,6 @@ bool mvkIsColorAttachmentUsed(const VkPipelineRenderingCreateInfo* pRendInfo, ui
/** Returns whether any attachment is being used. */
bool mvkHasColorAttachments(const VkPipelineRenderingCreateInfo* pRendInfo);
/** Extracts and returns the combined depth/stencil format . */
VkFormat mvkGetDepthStencilFormat(const VkPipelineRenderingCreateInfo* pRendInfo);
/**
* Extracts the first view, number of views, and the portion of the mask
* to be rendered from the lowest clump of set bits in a view mask.

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,6 @@
#include "MVKResource.h"
#include "MVKCommandBuffer.h"
#include "MVKEnvironment.h"
#pragma mark MVKResource

View File

@ -111,8 +111,6 @@ MVKMTLFunction MVKShaderLibrary::getMTLFunction(const VkSpecializationInfo* pSpe
mtlFunc = [fs.newMTLFunction(_mtlLibrary, mtlFuncName, mtlFCVals) autorelease];
}
}
} else {
reportError(VK_ERROR_INVALID_SHADER_NV, "Shader module does not contain an entry point named '%s'.", mtlFuncName.UTF8String);
}
// Set the debug name. First try name of shader module, otherwise try name of owner.

View File

@ -19,7 +19,6 @@
#pragma once
#include "MVKVulkanAPIObject.h"
#include "MVKEnvironment.h"
#include <mutex>
#import <Metal/Metal.h>

View File

@ -121,7 +121,7 @@ protected:
void willPresentSurface(id<MTLTexture> mtlTexture, id<MTLCommandBuffer> mtlCmdBuff);
void renderWatermark(id<MTLTexture> mtlTexture, id<MTLCommandBuffer> mtlCmdBuff);
void markFrameInterval();
void recordPresentTime(MVKImagePresentInfo& presentInfo, uint64_t actualPresentTime = 0);
void recordPresentTime(const MVKImagePresentInfo& presentInfo, uint64_t actualPresentTime = 0);
CAMetalLayer* _mtlLayer = nil;
MVKWatermark* _licenseWatermark = nullptr;

View File

@ -175,7 +175,9 @@ void MVKSwapchain::markFrameInterval() {
perfLogCntLimit,
(1000.0 / _device->_performanceStatistics.queue.frameInterval.averageDuration),
mvkGetElapsedMilliseconds() / 1000.0);
_device->logPerformanceSummary();
if (mvkConfig().activityPerformanceLoggingStyle == MVK_CONFIG_ACTIVITY_PERFORMANCE_LOGGING_STYLE_FRAME_COUNT) {
_device->logPerformanceSummary();
}
}
}
@ -559,7 +561,7 @@ VkResult MVKSwapchain::getPastPresentationTiming(uint32_t *pCount, VkPastPresent
return res;
}
void MVKSwapchain::recordPresentTime(MVKImagePresentInfo& presentInfo, uint64_t actualPresentTime) {
void MVKSwapchain::recordPresentTime(const MVKImagePresentInfo& presentInfo, uint64_t actualPresentTime) {
std::lock_guard<std::mutex> lock(_presentHistoryLock);
if (_presentHistoryCount < kMaxPresentationHistory) {
_presentHistoryCount++;
@ -567,8 +569,11 @@ void MVKSwapchain::recordPresentTime(MVKImagePresentInfo& presentInfo, uint64_t
_presentHistoryHeadIndex = (_presentHistoryHeadIndex + 1) % kMaxPresentationHistory;
}
// If actual time not supplied, use desired time instead
// If actual present time is not available, use desired time instead, and if that
// hasn't been set, use the current time, which should be reasonably accurate (sub-ms),
// since we are here as part of the addPresentedHandler: callback.
if (actualPresentTime == 0) { actualPresentTime = presentInfo.desiredPresentTime; }
if (actualPresentTime == 0) { actualPresentTime = CACurrentMediaTime() * 1.0e9; }
_presentTimingHistory[_presentHistoryIndex].presentID = presentInfo.presentID;
_presentTimingHistory[_presentHistoryIndex].desiredPresentTime = presentInfo.desiredPresentTime;

View File

@ -459,6 +459,8 @@ public:
MVKFenceSitter(bool waitAll) : _blocker(waitAll, 0) {}
~MVKFenceSitter() override { [_listener release]; }
private:
friend class MVKFence;
friend class MVKTimelineSemaphoreMTLEvent;
@ -634,3 +636,71 @@ protected:
std::string _compilerType = "Unknown";
MVKPerformanceTracker* _pPerformanceTracker = nullptr;
};
#pragma mark -
#pragma mark MVKDeferredOperation
/** Defines the function pointer for each dependent function. */
union MVKDeferredOperationFunctionPointer
{
// Empty until deferred functions from other extensions have been defined
// Planning to use std::functions
};
/** Indicates what kind of function is being deferred. */
enum MVKDeferredOperationFunctionType
{
// Empty until deferred functions from other extensions have been defined
};
class MVKDeferredOperation : public MVKVulkanAPIDeviceObject {
public:
/** Returns the Vulkan type of this object. */
VkObjectType getVkObjectType() override { return VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR; }
/** Returns the debug report object type of this object. */
VkDebugReportObjectTypeEXT getVkDebugReportObjectType() override { return VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT; }
/** Begins executing the deferred operation on the current thread. */
VkResult join();
/** Gets the max number of threads that can execute the deferred operation concurrently*/
uint32_t getMaxConcurrency() {
std::lock_guard<std::mutex> lock(_maxConcurrencyLock);
return _maxConcurrency;
}
/** Gets the result of the execution of the deferred operation */
VkResult getResult() {
std::lock_guard<std::mutex> lock(_resultLock);
return _operationResult;
}
static const int kMVKMaxDeferredFunctionParameters = 3;
/** Sets all the variables needed for a deferred operation, however should never be called manually and only from other functions that take deferred operations*/
void deferOperation(MVKDeferredOperationFunctionPointer pointer, MVKDeferredOperationFunctionType type, void* parameters[kMVKMaxDeferredFunctionParameters]);
#pragma mark Construction
MVKDeferredOperation(MVKDevice* device) : MVKVulkanAPIDeviceObject(device) {}
protected:
/** Stores the result of the operation*/
VkResult _operationResult = VK_SUCCESS;
/** The mutex for the operation result being used to ensure thread safety. */
std::mutex _resultLock;
/** Stores a pointer to the function*/
MVKDeferredOperationFunctionPointer _functionPointer;
/** Stores what functions is being deferred*/
MVKDeferredOperationFunctionType _functionType;
/** The parameters in the operation being deferred*/
void* _functionParameters[kMVKMaxDeferredFunctionParameters] = {};
/** Stores the max amount of threads that should be used.. */
uint32_t _maxConcurrency = 0;
/** The mutex for the max concurrency being used to ensure thread safety. */
std::mutex _maxConcurrencyLock;
void propagateDebugName() override {}
};

View File

@ -620,5 +620,38 @@ MVKMetalCompiler::~MVKMetalCompiler() {
[_compileError release];
}
#pragma mark -
#pragma mark MVKDeferredOperation
VkResult MVKDeferredOperation::join() {
VkResult opResult;
switch(_functionType)
{
// Set operation result here by calling operation
default: return VK_THREAD_DONE_KHR;
};
_resultLock.lock();
_operationResult = opResult;
_resultLock.unlock();
_maxConcurrencyLock.lock();
_maxConcurrency = 0;
_maxConcurrencyLock.unlock();
return VK_SUCCESS;
}
void MVKDeferredOperation::deferOperation(MVKDeferredOperationFunctionPointer pointer, MVKDeferredOperationFunctionType type, void* parameters[kMVKMaxDeferredFunctionParameters])
{
_functionPointer = pointer;
_functionType = type;
for(int i = 0; i < kMVKMaxDeferredFunctionParameters; i++) {
_functionParameters[i] = parameters[i];
}
_maxConcurrencyLock.lock();
_maxConcurrency = mvkGetAvaliableCPUCores();
_maxConcurrencyLock.unlock();
}

View File

@ -41,101 +41,104 @@
#define MVK_EXTENSION_LAST(var, EXT, type, macos, ios, xros) MVK_EXTENSION(var, EXT, type, macos, ios, xros)
#endif
MVK_EXTENSION(KHR_16bit_storage, KHR_16BIT_STORAGE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_8bit_storage, KHR_8BIT_STORAGE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_bind_memory2, KHR_BIND_MEMORY_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_buffer_device_address, KHR_BUFFER_DEVICE_ADDRESS, DEVICE, 13.0, 16.0, 1.0)
MVK_EXTENSION(KHR_copy_commands2, KHR_COPY_COMMANDS_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_create_renderpass2, KHR_CREATE_RENDERPASS_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_dedicated_allocation, KHR_DEDICATED_ALLOCATION, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_depth_stencil_resolve, KHR_DEPTH_STENCIL_RESOLVE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_descriptor_update_template, KHR_DESCRIPTOR_UPDATE_TEMPLATE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_device_group, KHR_DEVICE_GROUP, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_device_group_creation, KHR_DEVICE_GROUP_CREATION, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_draw_indirect_count, KHR_DRAW_INDIRECT_COUNT, DEVICE, MVK_NA, MVK_NA, MVK_NA)
MVK_EXTENSION(KHR_driver_properties, KHR_DRIVER_PROPERTIES, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_dynamic_rendering, KHR_DYNAMIC_RENDERING, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_external_fence, KHR_EXTERNAL_FENCE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_external_fence_capabilities, KHR_EXTERNAL_FENCE_CAPABILITIES, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_external_memory, KHR_EXTERNAL_MEMORY, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_external_memory_capabilities, KHR_EXTERNAL_MEMORY_CAPABILITIES, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_external_semaphore, KHR_EXTERNAL_SEMAPHORE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_external_semaphore_capabilities, KHR_EXTERNAL_SEMAPHORE_CAPABILITIES, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_fragment_shader_barycentric, KHR_FRAGMENT_SHADER_BARYCENTRIC, DEVICE, 10.15, 14.0, 1.0)
MVK_EXTENSION(KHR_get_memory_requirements2, KHR_GET_MEMORY_REQUIREMENTS_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_get_physical_device_properties2, KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_get_surface_capabilities2, KHR_GET_SURFACE_CAPABILITIES_2, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_imageless_framebuffer, KHR_IMAGELESS_FRAMEBUFFER, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_image_format_list, KHR_IMAGE_FORMAT_LIST, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_maintenance1, KHR_MAINTENANCE1, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_maintenance2, KHR_MAINTENANCE2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_maintenance3, KHR_MAINTENANCE3, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_multiview, KHR_MULTIVIEW, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_portability_subset, KHR_PORTABILITY_SUBSET, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_push_descriptor, KHR_PUSH_DESCRIPTOR, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_relaxed_block_layout, KHR_RELAXED_BLOCK_LAYOUT, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_sampler_mirror_clamp_to_edge, KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE, DEVICE, 10.11, 14.0, 1.0)
MVK_EXTENSION(KHR_sampler_ycbcr_conversion, KHR_SAMPLER_YCBCR_CONVERSION, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_separate_depth_stencil_layouts, KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_shader_atomic_int64, KHR_SHADER_ATOMIC_INT64, DEVICE, MVK_NA, MVK_NA, MVK_NA)
MVK_EXTENSION(KHR_shader_draw_parameters, KHR_SHADER_DRAW_PARAMETERS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_shader_float_controls, KHR_SHADER_FLOAT_CONTROLS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_shader_float16_int8, KHR_SHADER_FLOAT16_INT8, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_shader_subgroup_extended_types, KHR_SHADER_SUBGROUP_EXTENDED_TYPES, DEVICE, 10.14, 13.0, 1.0)
MVK_EXTENSION(KHR_spirv_1_4, KHR_SPIRV_1_4, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_storage_buffer_storage_class, KHR_STORAGE_BUFFER_STORAGE_CLASS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_surface, KHR_SURFACE, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_swapchain, KHR_SWAPCHAIN, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_swapchain_mutable_format, KHR_SWAPCHAIN_MUTABLE_FORMAT, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_timeline_semaphore, KHR_TIMELINE_SEMAPHORE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_uniform_buffer_standard_layout, KHR_UNIFORM_BUFFER_STANDARD_LAYOUT, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_variable_pointers, KHR_VARIABLE_POINTERS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_vulkan_memory_model, KHR_VULKAN_MEMORY_MODEL, DEVICE, MVK_NA, MVK_NA, MVK_NA)
MVK_EXTENSION(EXT_buffer_device_address, EXT_BUFFER_DEVICE_ADDRESS, DEVICE, 13.0, 16.0, 1.0)
MVK_EXTENSION(EXT_debug_marker, EXT_DEBUG_MARKER, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_debug_report, EXT_DEBUG_REPORT, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_debug_utils, EXT_DEBUG_UTILS, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_descriptor_indexing, EXT_DESCRIPTOR_INDEXING, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_external_memory_host, EXT_EXTERNAL_MEMORY_HOST, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_fragment_shader_interlock, EXT_FRAGMENT_SHADER_INTERLOCK, DEVICE, 10.13, 11.0, 1.0)
MVK_EXTENSION(EXT_hdr_metadata, EXT_HDR_METADATA, DEVICE, 10.15, MVK_NA, MVK_NA)
MVK_EXTENSION(EXT_host_query_reset, EXT_HOST_QUERY_RESET, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_image_robustness, EXT_IMAGE_ROBUSTNESS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_inline_uniform_block, EXT_INLINE_UNIFORM_BLOCK, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_memory_budget, EXT_MEMORY_BUDGET, DEVICE, 10.13, 11.0, 1.0)
MVK_EXTENSION(EXT_metal_objects, EXT_METAL_OBJECTS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_metal_surface, EXT_METAL_SURFACE, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_pipeline_creation_cache_control, EXT_PIPELINE_CREATION_CACHE_CONTROL, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_post_depth_coverage, EXT_POST_DEPTH_COVERAGE, DEVICE, 11.0, 11.0, 1.0)
MVK_EXTENSION(EXT_private_data, EXT_PRIVATE_DATA, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_robustness2, EXT_ROBUSTNESS_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_sample_locations, EXT_SAMPLE_LOCATIONS, DEVICE, 10.13, 11.0, 1.0)
MVK_EXTENSION(EXT_sampler_filter_minmax, EXT_SAMPLER_FILTER_MINMAX, DEVICE, MVK_NA, MVK_NA, MVK_NA)
MVK_EXTENSION(EXT_scalar_block_layout, EXT_SCALAR_BLOCK_LAYOUT, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_separate_stencil_usage, EXT_SEPARATE_STENCIL_USAGE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_shader_atomic_float, EXT_SHADER_ATOMIC_FLOAT, DEVICE, 13.0, 16.0, 1.0)
MVK_EXTENSION(EXT_shader_stencil_export, EXT_SHADER_STENCIL_EXPORT, DEVICE, 10.14, 12.0, 1.0)
MVK_EXTENSION(EXT_shader_viewport_index_layer, EXT_SHADER_VIEWPORT_INDEX_LAYER, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_subgroup_size_control, EXT_SUBGROUP_SIZE_CONTROL, DEVICE, 10.14, 13.0, 1.0)
MVK_EXTENSION(EXT_surface_maintenance1, EXT_SURFACE_MAINTENANCE_1, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_swapchain_colorspace, EXT_SWAPCHAIN_COLOR_SPACE, INSTANCE, 10.11, 9.0, 1.0)
MVK_EXTENSION(EXT_swapchain_maintenance1, EXT_SWAPCHAIN_MAINTENANCE_1, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_texel_buffer_alignment, EXT_TEXEL_BUFFER_ALIGNMENT, DEVICE, 10.13, 11.0, 1.0)
MVK_EXTENSION(EXT_texture_compression_astc_hdr, EXT_TEXTURE_COMPRESSION_ASTC_HDR, DEVICE, 11.0, 13.0, 1.0)
MVK_EXTENSION(EXT_vertex_attribute_divisor, EXT_VERTEX_ATTRIBUTE_DIVISOR, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(AMD_draw_indirect_count, AMD_DRAW_INDIRECT_COUNT, DEVICE, MVK_NA, MVK_NA, MVK_NA)
MVK_EXTENSION(AMD_gpu_shader_half_float, AMD_GPU_SHADER_HALF_FLOAT, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(AMD_negative_viewport_height, AMD_NEGATIVE_VIEWPORT_HEIGHT, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(AMD_shader_image_load_store_lod, AMD_SHADER_IMAGE_LOAD_STORE_LOD, DEVICE, 11.0, 8.0, 1.0)
MVK_EXTENSION(AMD_shader_trinary_minmax, AMD_SHADER_TRINARY_MINMAX, DEVICE, 10.14, 12.0, 1.0)
MVK_EXTENSION(IMG_format_pvrtc, IMG_FORMAT_PVRTC, DEVICE, 11.0, 8.0, 1.0)
MVK_EXTENSION(INTEL_shader_integer_functions2, INTEL_SHADER_INTEGER_FUNCTIONS_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(GOOGLE_display_timing, GOOGLE_DISPLAY_TIMING, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(MVK_ios_surface, MVK_IOS_SURFACE, INSTANCE, MVK_NA, 8.0, MVK_NA)
MVK_EXTENSION(MVK_macos_surface, MVK_MACOS_SURFACE, INSTANCE, 10.11, MVK_NA, MVK_NA)
MVK_EXTENSION(MVK_moltenvk, MVK_MOLTENVK, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(NV_fragment_shader_barycentric, NV_FRAGMENT_SHADER_BARYCENTRIC, DEVICE, 10.15, 14.0, 1.0)
MVK_EXTENSION_LAST(NV_glsl_shader, NV_GLSL_SHADER, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_16bit_storage, KHR_16BIT_STORAGE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_8bit_storage, KHR_8BIT_STORAGE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_bind_memory2, KHR_BIND_MEMORY_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_buffer_device_address, KHR_BUFFER_DEVICE_ADDRESS, DEVICE, 13.0, 16.0, 1.0)
MVK_EXTENSION(KHR_copy_commands2, KHR_COPY_COMMANDS_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_create_renderpass2, KHR_CREATE_RENDERPASS_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_dedicated_allocation, KHR_DEDICATED_ALLOCATION, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_deferred_host_operations, KHR_DEFERRED_HOST_OPERATIONS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_depth_stencil_resolve, KHR_DEPTH_STENCIL_RESOLVE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_descriptor_update_template, KHR_DESCRIPTOR_UPDATE_TEMPLATE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_device_group, KHR_DEVICE_GROUP, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_device_group_creation, KHR_DEVICE_GROUP_CREATION, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_draw_indirect_count, KHR_DRAW_INDIRECT_COUNT, DEVICE, MVK_NA, MVK_NA, MVK_NA)
MVK_EXTENSION(KHR_driver_properties, KHR_DRIVER_PROPERTIES, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_dynamic_rendering, KHR_DYNAMIC_RENDERING, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_external_fence, KHR_EXTERNAL_FENCE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_external_fence_capabilities, KHR_EXTERNAL_FENCE_CAPABILITIES, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_external_memory, KHR_EXTERNAL_MEMORY, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_external_memory_capabilities, KHR_EXTERNAL_MEMORY_CAPABILITIES, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_external_semaphore, KHR_EXTERNAL_SEMAPHORE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_external_semaphore_capabilities, KHR_EXTERNAL_SEMAPHORE_CAPABILITIES, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_fragment_shader_barycentric, KHR_FRAGMENT_SHADER_BARYCENTRIC, DEVICE, 10.15, 14.0, 1.0)
MVK_EXTENSION(KHR_get_memory_requirements2, KHR_GET_MEMORY_REQUIREMENTS_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_get_physical_device_properties2, KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_get_surface_capabilities2, KHR_GET_SURFACE_CAPABILITIES_2, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_imageless_framebuffer, KHR_IMAGELESS_FRAMEBUFFER, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_image_format_list, KHR_IMAGE_FORMAT_LIST, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_maintenance1, KHR_MAINTENANCE1, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_maintenance2, KHR_MAINTENANCE2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_maintenance3, KHR_MAINTENANCE3, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_map_memory2, KHR_MAP_MEMORY_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_multiview, KHR_MULTIVIEW, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_portability_subset, KHR_PORTABILITY_SUBSET, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_push_descriptor, KHR_PUSH_DESCRIPTOR, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_relaxed_block_layout, KHR_RELAXED_BLOCK_LAYOUT, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_sampler_mirror_clamp_to_edge, KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE, DEVICE, 10.11, 14.0, 1.0)
MVK_EXTENSION(KHR_sampler_ycbcr_conversion, KHR_SAMPLER_YCBCR_CONVERSION, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_separate_depth_stencil_layouts, KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_shader_atomic_int64, KHR_SHADER_ATOMIC_INT64, DEVICE, MVK_NA, MVK_NA, MVK_NA)
MVK_EXTENSION(KHR_shader_draw_parameters, KHR_SHADER_DRAW_PARAMETERS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_shader_float_controls, KHR_SHADER_FLOAT_CONTROLS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_shader_float16_int8, KHR_SHADER_FLOAT16_INT8, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_shader_subgroup_extended_types, KHR_SHADER_SUBGROUP_EXTENDED_TYPES, DEVICE, 10.14, 13.0, 1.0)
MVK_EXTENSION(KHR_spirv_1_4, KHR_SPIRV_1_4, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_storage_buffer_storage_class, KHR_STORAGE_BUFFER_STORAGE_CLASS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_surface, KHR_SURFACE, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_swapchain, KHR_SWAPCHAIN, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_swapchain_mutable_format, KHR_SWAPCHAIN_MUTABLE_FORMAT, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_timeline_semaphore, KHR_TIMELINE_SEMAPHORE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_uniform_buffer_standard_layout, KHR_UNIFORM_BUFFER_STANDARD_LAYOUT, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_variable_pointers, KHR_VARIABLE_POINTERS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(KHR_vulkan_memory_model, KHR_VULKAN_MEMORY_MODEL, DEVICE, MVK_NA, MVK_NA, MVK_NA)
MVK_EXTENSION(EXT_buffer_device_address, EXT_BUFFER_DEVICE_ADDRESS, DEVICE, 13.0, 16.0, 1.0)
MVK_EXTENSION(EXT_debug_marker, EXT_DEBUG_MARKER, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_debug_report, EXT_DEBUG_REPORT, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_debug_utils, EXT_DEBUG_UTILS, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_descriptor_indexing, EXT_DESCRIPTOR_INDEXING, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_external_memory_host, EXT_EXTERNAL_MEMORY_HOST, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_fragment_shader_interlock, EXT_FRAGMENT_SHADER_INTERLOCK, DEVICE, 10.13, 11.0, 1.0)
MVK_EXTENSION(EXT_hdr_metadata, EXT_HDR_METADATA, DEVICE, 10.15, MVK_NA, MVK_NA)
MVK_EXTENSION(EXT_host_query_reset, EXT_HOST_QUERY_RESET, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_image_robustness, EXT_IMAGE_ROBUSTNESS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_inline_uniform_block, EXT_INLINE_UNIFORM_BLOCK, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_memory_budget, EXT_MEMORY_BUDGET, DEVICE, 10.13, 11.0, 1.0)
MVK_EXTENSION(EXT_metal_objects, EXT_METAL_OBJECTS, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_metal_surface, EXT_METAL_SURFACE, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_pipeline_creation_cache_control, EXT_PIPELINE_CREATION_CACHE_CONTROL, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_post_depth_coverage, EXT_POST_DEPTH_COVERAGE, DEVICE, 11.0, 11.0, 1.0)
MVK_EXTENSION(EXT_private_data, EXT_PRIVATE_DATA, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_robustness2, EXT_ROBUSTNESS_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_sample_locations, EXT_SAMPLE_LOCATIONS, DEVICE, 10.13, 11.0, 1.0)
MVK_EXTENSION(EXT_sampler_filter_minmax, EXT_SAMPLER_FILTER_MINMAX, DEVICE, MVK_NA, MVK_NA, MVK_NA)
MVK_EXTENSION(EXT_scalar_block_layout, EXT_SCALAR_BLOCK_LAYOUT, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_separate_stencil_usage, EXT_SEPARATE_STENCIL_USAGE, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_shader_atomic_float, EXT_SHADER_ATOMIC_FLOAT, DEVICE, 13.0, 16.0, 1.0)
MVK_EXTENSION(EXT_shader_demote_to_helper_invocation, EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION, DEVICE, 11.0, 14.0, 1.0)
MVK_EXTENSION(EXT_shader_stencil_export, EXT_SHADER_STENCIL_EXPORT, DEVICE, 10.14, 12.0, 1.0)
MVK_EXTENSION(EXT_shader_viewport_index_layer, EXT_SHADER_VIEWPORT_INDEX_LAYER, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_subgroup_size_control, EXT_SUBGROUP_SIZE_CONTROL, DEVICE, 10.14, 13.0, 1.0)
MVK_EXTENSION(EXT_surface_maintenance1, EXT_SURFACE_MAINTENANCE_1, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_swapchain_colorspace, EXT_SWAPCHAIN_COLOR_SPACE, INSTANCE, 10.11, 9.0, 1.0)
MVK_EXTENSION(EXT_swapchain_maintenance1, EXT_SWAPCHAIN_MAINTENANCE_1, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(EXT_texel_buffer_alignment, EXT_TEXEL_BUFFER_ALIGNMENT, DEVICE, 10.13, 11.0, 1.0)
MVK_EXTENSION(EXT_texture_compression_astc_hdr, EXT_TEXTURE_COMPRESSION_ASTC_HDR, DEVICE, 11.0, 13.0, 1.0)
MVK_EXTENSION(EXT_vertex_attribute_divisor, EXT_VERTEX_ATTRIBUTE_DIVISOR, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(AMD_draw_indirect_count, AMD_DRAW_INDIRECT_COUNT, DEVICE, MVK_NA, MVK_NA, MVK_NA)
MVK_EXTENSION(AMD_gpu_shader_half_float, AMD_GPU_SHADER_HALF_FLOAT, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(AMD_negative_viewport_height, AMD_NEGATIVE_VIEWPORT_HEIGHT, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(AMD_shader_image_load_store_lod, AMD_SHADER_IMAGE_LOAD_STORE_LOD, DEVICE, 11.0, 8.0, 1.0)
MVK_EXTENSION(AMD_shader_trinary_minmax, AMD_SHADER_TRINARY_MINMAX, DEVICE, 10.14, 12.0, 1.0)
MVK_EXTENSION(IMG_format_pvrtc, IMG_FORMAT_PVRTC, DEVICE, 11.0, 8.0, 1.0)
MVK_EXTENSION(INTEL_shader_integer_functions2, INTEL_SHADER_INTEGER_FUNCTIONS_2, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(GOOGLE_display_timing, GOOGLE_DISPLAY_TIMING, DEVICE, 10.11, 8.0, 1.0)
MVK_EXTENSION(MVK_ios_surface, MVK_IOS_SURFACE, INSTANCE, MVK_NA, 8.0, MVK_NA)
MVK_EXTENSION(MVK_macos_surface, MVK_MACOS_SURFACE, INSTANCE, 10.11, MVK_NA, MVK_NA)
MVK_EXTENSION(MVK_moltenvk, MVK_MOLTENVK, INSTANCE, 10.11, 8.0, 1.0)
MVK_EXTENSION(NV_fragment_shader_barycentric, NV_FRAGMENT_SHADER_BARYCENTRIC, DEVICE, 10.15, 14.0, 1.0)
MVK_EXTENSION_LAST(NV_glsl_shader, NV_GLSL_SHADER, DEVICE, 10.11, 8.0, 1.0)
#undef MVK_EXTENSION
#undef MVK_EXTENSION_LAST

View File

@ -19,7 +19,7 @@
#include "MVKExtensions.h"
#include "MVKFoundation.h"
#include "MVKOSExtensions.h"
#include "MVKEnvironment.h"
#include "mvk_deprecated_api.h"
#include <vulkan/vulkan_ios.h>
#include <vulkan/vulkan_macos.h>
@ -53,9 +53,6 @@ static bool mvkIsSupportedOnPlatform(VkExtensionProperties* pProperties) {
auto advExtns = mvkConfig().advertiseExtensions;
if ( !mvkIsAnyFlagEnabled(advExtns, MVK_CONFIG_ADVERTISE_EXTENSIONS_ALL) ) {
#define MVK_NA kMVKOSVersionUnsupported
if (mvkIsAnyFlagEnabled(advExtns, MVK_CONFIG_ADVERTISE_EXTENSIONS_MOLTENVK)) {
MVK_EXTENSION_MIN_OS(MVK_MOLTENVK, 10.11, 8.0, 1.0)
}
if (mvkIsAnyFlagEnabled(advExtns, MVK_CONFIG_ADVERTISE_EXTENSIONS_WSI)) {
MVK_EXTENSION_MIN_OS(EXT_METAL_SURFACE, 10.11, 8.0, 1.0)
MVK_EXTENSION_MIN_OS(MVK_IOS_SURFACE, MVK_NA, 8.0, 1.0)
@ -168,6 +165,11 @@ VkResult MVKExtensionList::enable(uint32_t count, const char* const* names, cons
result = reportError(VK_ERROR_EXTENSION_NOT_PRESENT, "Vulkan extension %s is not supported.", extnName);
} else {
enable(extnName);
if (mvkStringsAreEqual(extnName, VK_MVK_MOLTENVK_EXTENSION_NAME)) {
reportMessage(MVK_CONFIG_LOG_LEVEL_WARNING, "Extension %s is deprecated. For access to Metal objects, use extension %s. "
"For MoltenVK configuration, use the global vkGetMoltenVKConfigurationMVK() and vkSetMoltenVKConfigurationMVK() functions.",
VK_MVK_MOLTENVK_EXTENSION_NAME, VK_EXT_METAL_OBJECTS_EXTENSION_NAME);
}
}
}
return result;

View File

@ -17,7 +17,6 @@
*/
#include "MVKLayers.h"
#include "MVKEnvironment.h"
#include "MVKFoundation.h"
#include <mutex>

View File

@ -17,9 +17,7 @@
*/
#include "MVKGPUCapture.h"
#include "MVKQueue.h"
#include "MVKOSExtensions.h"
#include "MVKEnvironment.h"
#pragma mark -

View File

@ -18,7 +18,7 @@
#pragma once
#include "vk_mvk_moltenvk.h"
#include "MVKEnvironment.h"
#include <string>
#include <atomic>

View File

@ -21,7 +21,7 @@
#include "MVKInstance.h"
#include "MVKFoundation.h"
#include "MVKOSExtensions.h"
#include <cxxabi.h>
#include "MVKStrings.h"
using namespace std;
@ -44,13 +44,7 @@ static const char* getReportingLevelString(MVKConfigLogLevel logLevel) {
#pragma mark -
#pragma mark MVKBaseObject
string MVKBaseObject::getClassName() {
int status;
char* demangled = abi::__cxa_demangle(typeid(*this).name(), 0, 0, &status);
string clzName = demangled;
free(demangled);
return clzName;
}
string MVKBaseObject::getClassName() { return mvk::getTypeName(this); }
void MVKBaseObject::reportMessage(MVKConfigLogLevel logLevel, const char* format, ...) {
va_list args;

View File

@ -20,7 +20,6 @@
#pragma once
#include "MVKEnvironment.h"
#include <vector>
#include <string>

View File

@ -24,6 +24,8 @@
#include <algorithm>
#include <simd/simd.h>
#import <Foundation/Foundation.h>
using namespace std;
using simd::float3;
@ -148,7 +150,9 @@ static size_t mvkCompressDecompress(const uint8_t* srcBytes, size_t srcSize,
MVKConfigCompressionAlgorithm compAlgo,
bool isCompressing) {
size_t dstByteCount = 0;
if (compAlgo != MVK_CONFIG_COMPRESSION_ALGORITHM_NONE) {
bool compressionSupported = ([NSData instancesRespondToSelector: @selector(compressedDataUsingAlgorithm:error:)] &&
[NSData instancesRespondToSelector: @selector(decompressedDataUsingAlgorithm:error:)]);
if (compressionSupported && compAlgo != MVK_CONFIG_COMPRESSION_ALGORITHM_NONE) {
@autoreleasepool {
NSDataCompressionAlgorithm sysCompAlgo = getSystemCompressionAlgo(compAlgo);
NSData* srcData = [NSData dataWithBytesNoCopy: (void*)srcBytes length: srcSize freeWhenDone: NO];

View File

@ -63,6 +63,7 @@ static void mvkInitConfigFromEnvVars() {
MVK_SET_FROM_ENV_OR_BUILD_BOOL (evCfg.resumeLostDevice, MVK_CONFIG_RESUME_LOST_DEVICE);
MVK_SET_FROM_ENV_OR_BUILD_INT32 (evCfg.useMetalArgumentBuffers, MVK_CONFIG_USE_METAL_ARGUMENT_BUFFERS);
MVK_SET_FROM_ENV_OR_BUILD_INT32 (evCfg.shaderSourceCompressionAlgorithm, MVK_CONFIG_SHADER_COMPRESSION_ALGORITHM);
MVK_SET_FROM_ENV_OR_BUILD_BOOL (evCfg.shouldMaximizeConcurrentCompilation, MVK_CONFIG_SHOULD_MAXIMIZE_CONCURRENT_COMPILATION);
// Deprected legacy VkSemaphore MVK_ALLOW_METAL_FENCES and MVK_ALLOW_METAL_EVENTS config.
// Legacy MVK_ALLOW_METAL_EVENTS is covered by MVK_CONFIG_VK_SEMAPHORE_SUPPORT_STYLE,

View File

@ -20,8 +20,9 @@
#pragma once
#include "MVKCommonEnvironment.h"
#include "mvk_vulkan.h"
#include "mvk_config.h"
#include "MVKLogging.h"
#include "vk_mvk_moltenvk.h"
// Expose MoltenVK Apple surface extension functionality
@ -294,3 +295,11 @@ void mvkSetConfig(const MVKConfiguration& mvkConfig);
#ifndef MVK_CONFIG_SHADER_COMPRESSION_ALGORITHM
# define MVK_CONFIG_SHADER_COMPRESSION_ALGORITHM MVK_CONFIG_COMPRESSION_ALGORITHM_NONE
#endif
/**
* Maximize the concurrent executing compilation tasks.
* This functionality requires macOS 13.3. Disabled by default.
*/
#ifndef MVK_CONFIG_SHOULD_MAXIMIZE_CONCURRENT_COMPILATION
# define MVK_CONFIG_SHOULD_MAXIMIZE_CONCURRENT_COMPILATION 0
#endif

View File

@ -20,8 +20,7 @@
#pragma once
#include "MVKCommonEnvironment.h"
#include "mvk_vulkan.h"
#include "MVKEnvironment.h"
#include <algorithm>
#include <cassert>
#include <limits>
@ -218,6 +217,44 @@ static constexpr uint64_t mvkAlignByteCount(uint64_t byteCount, uint64_t byteAli
return mvkAlignByteRef(byteCount, byteAlignment, alignDown);
}
/**
* Compile time indication if the struct contains a specific member.
*
* If S::mbr is well-formed because the struct contains that member, the decltype() and
* comma operator together trigger a true_type, otherwise it falls back to a false_type.
*
* Credit to: https://fekir.info/post/detect-member-variables/
*/
#define mvk_define_has_member(mbr) \
template <typename T, typename = void> struct mvk_has_##mbr : std::false_type {}; \
template <typename T> struct mvk_has_##mbr<T, decltype((void)T::mbr, void())> : std::true_type {};
mvk_define_has_member(pNext); // Defines the mvk_has_pNext() function.
/** Returns the address of the first member of a structure, which is just the address of the structure. */
template <typename S>
void* mvkGetAddressOfFirstMember(const S* pStruct, std::false_type){
return (void*)pStruct;
}
/**
* Returns the address of the first member of a Vulkan structure containing a pNext member.
* The first member is the one after the pNext member.
*/
template <class S>
void* mvkGetAddressOfFirstMember(const S* pStruct, std::true_type){
return (void*)(&(pStruct->pNext) + 1);
}
/**
* Returns the address of the first member of a structure. If the structure is a Vulkan
* structure containing a pNext member, the first member is the one after the pNext member.
*/
template <class S>
void* mvkGetAddressOfFirstMember(const S* pStruct){
return mvkGetAddressOfFirstMember(pStruct, mvk_has_pNext<S>{});
}
/**
* Reverses the order of the rows in the specified data block.
* The transformation is performed in-place.
@ -484,7 +521,7 @@ void mvkReleaseContainerContents(C& container) {
/** Returns whether the container contains an item equal to the value. */
template<class C, class T>
bool contains(C& container, const T& val) {
bool mvkContains(C& container, const T& val) {
for (const T& cVal : container) { if (cVal == val) { return true; } }
return false;
}
@ -553,12 +590,13 @@ bool mvkAreEqual(const T* pV1, const T* pV2, size_t count = 1) {
}
/**
* If both pV1 and pV2 are not null, returns whether the contents of the two strings are equal,
* otherwise returns false. This functionality is different than the char version of mvkAreEqual(),
* Returns whether the contents of the two strings are equal, otherwise returns false.
* This functionality is different than the char version of mvkAreEqual(),
* which works on individual chars or char arrays, not strings.
* Returns false if either string is null.
*/
static constexpr bool mvkStringsAreEqual(const char* pV1, const char* pV2, size_t count = 1) {
return (pV1 && pV2) ? (strcmp(pV1, pV2) == 0) : false;
return pV1 && pV2 && (pV1 == pV2 || strcmp(pV1, pV2) == 0);
}
/**

View File

@ -17,10 +17,10 @@
*/
#include "MVKEnvironment.h"
#include "MVKWatermark.h"
#include "MVKOSExtensions.h"
#include "MTLTextureDescriptor+MoltenVK.h"
#include "MVKEnvironment.h"
/** The structure to hold shader uniforms. */

View File

@ -1,5 +1,5 @@
/*
* vk_mvk_moltenvk.mm
* mvk_api.mm
*
* Copyright (c) 2015-2023 The Brenwill Workshop Ltd. (http://www.brenwill.com)
*
@ -17,9 +17,10 @@
*/
#include "MVKInstance.h"
#include "vk_mvk_moltenvk.h"
#include "MVKEnvironment.h"
#include "mvk_private_api.h"
#include "mvk_deprecated_api.h"
#include "MVKInstance.h"
#include "MVKSwapchain.h"
#include "MVKImage.h"
#include "MVKBuffer.h"
@ -30,6 +31,7 @@
using namespace std;
// Copies the contents of a struct that might grow larger across MoltenVK versions.
// If pSrc and pDst are not null, copies at most *pCopySize bytes from the contents of the
// source struct to the destination struct, and sets *pCopySize to the number of bytes copied,
// which is the smaller of the original value of *pCopySize and the actual size of the struct.
@ -37,7 +39,7 @@ using namespace std;
// the struct, or VK_INCOMPLETE otherwise. If either pSrc or pDst are null, sets the value
// of *pCopySize to the size of the struct and returns VK_SUCCESS.
template<typename S>
VkResult mvkCopy(S* pDst, const S* pSrc, size_t* pCopySize) {
VkResult mvkCopyGrowingStruct(S* pDst, const S* pSrc, size_t* pCopySize) {
if (pSrc && pDst) {
size_t origSize = *pCopySize;
*pCopySize = std::min(origSize, sizeof(S));
@ -49,12 +51,16 @@ VkResult mvkCopy(S* pDst, const S* pSrc, size_t* pCopySize) {
}
}
#pragma mark -
#pragma mark mvk_config.h
MVK_PUBLIC_VULKAN_SYMBOL VkResult vkGetMoltenVKConfigurationMVK(
VkInstance ignored,
MVKConfiguration* pConfiguration,
size_t* pConfigurationSize) {
return mvkCopy(pConfiguration, &mvkConfig(), pConfigurationSize);
return mvkCopyGrowingStruct(pConfiguration, &mvkConfig(), pConfigurationSize);
}
MVK_PUBLIC_VULKAN_SYMBOL VkResult vkSetMoltenVKConfigurationMVK(
@ -64,18 +70,22 @@ MVK_PUBLIC_VULKAN_SYMBOL VkResult vkSetMoltenVKConfigurationMVK(
// Start with copy of current config, in case incoming is not fully copied
MVKConfiguration mvkCfg = mvkConfig();
VkResult rslt = mvkCopy(&mvkCfg, pConfiguration, pConfigurationSize);
VkResult rslt = mvkCopyGrowingStruct(&mvkCfg, pConfiguration, pConfigurationSize);
mvkSetConfig(mvkCfg);
return rslt;
}
#pragma mark -
#pragma mark mvk_private_api.h
MVK_PUBLIC_VULKAN_SYMBOL VkResult vkGetPhysicalDeviceMetalFeaturesMVK(
VkPhysicalDevice physicalDevice,
MVKPhysicalDeviceMetalFeatures* pMetalFeatures,
size_t* pMetalFeaturesSize) {
MVKPhysicalDevice* mvkPD = MVKPhysicalDevice::getMVKPhysicalDevice(physicalDevice);
return mvkCopy(pMetalFeatures, mvkPD->getMetalFeatures(), pMetalFeaturesSize);
return mvkCopyGrowingStruct(pMetalFeatures, mvkPD->getMetalFeatures(), pMetalFeaturesSize);
}
MVK_PUBLIC_VULKAN_SYMBOL VkResult vkGetPerformanceStatisticsMVK(
@ -85,9 +95,13 @@ MVK_PUBLIC_VULKAN_SYMBOL VkResult vkGetPerformanceStatisticsMVK(
MVKPerformanceStatistics mvkPerf;
MVKDevice::getMVKDevice(device)->getPerformanceStatistics(&mvkPerf);
return mvkCopy(pPerf, &mvkPerf, pPerfSize);
return mvkCopyGrowingStruct(pPerf, &mvkPerf, pPerfSize);
}
#pragma mark -
#pragma mark mvk_deprecated_api.h
MVK_PUBLIC_VULKAN_SYMBOL void vkGetVersionStringsMVK(
char* pMoltenVersionStringBuffer,
uint32_t moltenVersionStringBufferLength,

View File

@ -492,8 +492,16 @@ MVK_PUBLIC_VULKAN_SYMBOL VkResult vkMapMemory(
void** ppData) {
MVKTraceVulkanCallStart();
VkMemoryMapInfoKHR mapInfo = {};
mapInfo.sType = VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR;
mapInfo.pNext = nullptr;
mapInfo.flags = flags;
mapInfo.memory = mem;
mapInfo.offset = offset;
mapInfo.size = size;
MVKDeviceMemory* mvkMem = (MVKDeviceMemory*)mem;
VkResult rslt = mvkMem->map(offset, size, flags, ppData);
VkResult rslt = mvkMem->map(&mapInfo, ppData);
MVKTraceVulkanCallEnd();
return rslt;
}
@ -503,8 +511,13 @@ MVK_PUBLIC_VULKAN_SYMBOL void vkUnmapMemory(
VkDeviceMemory mem) {
MVKTraceVulkanCallStart();
VkMemoryUnmapInfoKHR unmapInfo = {};
unmapInfo.sType = VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR;
unmapInfo.pNext = nullptr;
unmapInfo.flags = 0;
unmapInfo.memory = mem;
MVKDeviceMemory* mvkMem = (MVKDeviceMemory*)mem;
mvkMem->unmap();
mvkMem->unmap(&unmapInfo);
MVKTraceVulkanCallEnd();
}
@ -2640,13 +2653,67 @@ MVK_PUBLIC_VULKAN_CORE_ALIAS(vkCmdBeginRenderPass2, KHR);
MVK_PUBLIC_VULKAN_CORE_ALIAS(vkCmdNextSubpass2, KHR);
MVK_PUBLIC_VULKAN_CORE_ALIAS(vkCmdEndRenderPass2, KHR);
#pragma mark -
#pragma mark VK_KHR_dynamic_rendering extension
#pragma mark VK_KHR_deferred_host_operations extension
MVK_PUBLIC_VULKAN_CORE_ALIAS(vkCmdBeginRendering, KHR);
MVK_PUBLIC_VULKAN_CORE_ALIAS(vkCmdEndRendering, KHR);
MVK_PUBLIC_VULKAN_SYMBOL VkResult vkCreateDeferredOperationKHR(
VkDevice device,
const VkAllocationCallbacks* pAllocator,
VkDeferredOperationKHR* pDeferredOperation) {
MVKTraceVulkanCallStart();
MVKDevice* mvkDev = MVKDevice::getMVKDevice(device);
MVKDeferredOperation* mvkDeferredOperation = mvkDev->createDeferredOperation(pAllocator);
*pDeferredOperation = (VkDeferredOperationKHR)mvkDeferredOperation;
VkResult rslt = mvkDeferredOperation->getConfigurationResult();
MVKTraceVulkanCallEnd();
return rslt;
}
MVK_PUBLIC_VULKAN_SYMBOL VkResult vkGetDeferredOperationResultKHR(
VkDevice device,
VkDeferredOperationKHR operation) {
MVKTraceVulkanCallStart();
MVKDeferredOperation* mvkDeferredOperation = (MVKDeferredOperation*)operation;
VkResult rslt = mvkDeferredOperation->getResult();
MVKTraceVulkanCallEnd();
return rslt;
}
MVK_PUBLIC_VULKAN_SYMBOL VkResult vkDeferredOperationJoinKHR(
VkDevice device,
VkDeferredOperationKHR operation) {
MVKTraceVulkanCallStart();
MVKDeferredOperation* mvkDeferredOperation = (MVKDeferredOperation*)operation;
VkResult rslt = mvkDeferredOperation->join();
MVKTraceVulkanCallEnd();
return rslt;
}
MVK_PUBLIC_VULKAN_SYMBOL uint32_t vkGetDeferredOperationMaxConcurrencyKHR(
VkDevice device,
VkDeferredOperationKHR operation) {
MVKTraceVulkanCallStart();
MVKDeferredOperation* mvkDeferredOperation = (MVKDeferredOperation*)operation;
uint32_t getMaxConcurrencyKHR = mvkDeferredOperation->getMaxConcurrency();
MVKTraceVulkanCallEnd();
return getMaxConcurrencyKHR;
}
MVK_PUBLIC_VULKAN_SYMBOL void vkDestroyDeferredOperationKHR(
VkDevice device,
VkDeferredOperationKHR operation,
const VkAllocationCallbacks* pAllocator) {
MVKTraceVulkanCallStart();
MVKDevice* mvkDev = MVKDevice::getMVKDevice(device);
MVKDeferredOperation* mvkDeferredOperation = (MVKDeferredOperation*)operation;
mvkDev->destroyDeferredOperation(mvkDeferredOperation, pAllocator);
MVKTraceVulkanCallEnd();
}
#pragma mark -
#pragma mark VK_KHR_descriptor_update_template extension
@ -2676,6 +2743,11 @@ MVK_PUBLIC_VULKAN_CORE_ALIAS(vkEnumeratePhysicalDeviceGroups, KHR);
MVK_PUBLIC_VULKAN_CORE_ALIAS(vkCmdDrawIndexedIndirectCount, KHR);
MVK_PUBLIC_VULKAN_CORE_ALIAS(vkCmdDrawIndirectCount, KHR);
#pragma mark -
#pragma mark VK_KHR_dynamic_rendering extension
MVK_PUBLIC_VULKAN_CORE_ALIAS(vkCmdBeginRendering, KHR);
MVK_PUBLIC_VULKAN_CORE_ALIAS(vkCmdEndRendering, KHR);
#pragma mark -
#pragma mark VK_KHR_external_fence_capabilities extension
@ -2727,6 +2799,33 @@ MVK_PUBLIC_VULKAN_CORE_ALIAS(vkTrimCommandPool, KHR);
MVK_PUBLIC_VULKAN_CORE_ALIAS(vkGetDescriptorSetLayoutSupport, KHR);
#pragma mark -
#pragma mark VK_KHR_map_memory2 extension
MVK_PUBLIC_VULKAN_SYMBOL VkResult vkMapMemory2KHR(
VkDevice device,
const VkMemoryMapInfoKHR* pMemoryMapInfo,
void** ppData) {
MVKTraceVulkanCallStart();
MVKDeviceMemory* mvkMem = (MVKDeviceMemory*)pMemoryMapInfo->memory;
VkResult rslt = mvkMem->map(pMemoryMapInfo, ppData);
MVKTraceVulkanCallEnd();
return rslt;
}
MVK_PUBLIC_VULKAN_SYMBOL VkResult vkUnmapMemory2KHR(
VkDevice device,
const VkMemoryUnmapInfoKHR* pMemoryUnmapInfo) {
MVKTraceVulkanCallStart();
MVKDeviceMemory* mvkMem = (MVKDeviceMemory*)pMemoryUnmapInfo->memory;
VkResult rslt = mvkMem->unmap(pMemoryUnmapInfo);
MVKTraceVulkanCallEnd();
return rslt;
}
#pragma mark -
#pragma mark VK_KHR_push_descriptor extension

View File

@ -344,7 +344,7 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConversionConfigur
#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS
} catch (CompilerError& ex) {
string errMsg("MSL conversion error: ");
string errMsg("SPIR-V to MSL conversion error: ");
errMsg += ex.what();
logError(conversionResult.resultLog, errMsg.data());
if (shouldLogMSL && pMSLCompiler) {
@ -366,6 +366,7 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConversionConfigur
conversionResult.resultInfo.needsInputThreadgroupMem = pMSLCompiler && pMSLCompiler->needs_input_threadgroup_mem();
conversionResult.resultInfo.needsDispatchBaseBuffer = pMSLCompiler && pMSLCompiler->needs_dispatch_base_buffer();
conversionResult.resultInfo.needsViewRangeBuffer = pMSLCompiler && pMSLCompiler->needs_view_mask_buffer();
conversionResult.resultInfo.usesPhysicalStorageBufferAddressesCapability = usesPhysicalStorageBufferAddressesCapability(pMSLCompiler);
// When using Metal argument buffers, if the shader is provided with dynamic buffer offsets,
// then it needs a buffer to hold these dynamic offsets.
@ -445,6 +446,7 @@ void SPIRVToMSLConverter::logMsg(string& log, const char* logMsg) {
// Appends the error text to the result log, and returns false to indicate an error.
bool SPIRVToMSLConverter::logError(string& log, const char* errMsg) {
logMsg(log, errMsg);
fprintf(stderr, "[mvk-error] %s\n", errMsg);
return false;
}
@ -533,3 +535,15 @@ void SPIRVToMSLConverter::populateEntryPoint(Compiler* pCompiler,
populateWorkgroupDimension(wgSize.height, spvEP.workgroup_size.y, heightSC);
populateWorkgroupDimension(wgSize.depth, spvEP.workgroup_size.z, depthSC);
}
bool SPIRVToMSLConverter::usesPhysicalStorageBufferAddressesCapability(Compiler* pCompiler) {
if (pCompiler) {
auto& declaredCapabilities = pCompiler->get_declared_capabilities();
for(auto dc: declaredCapabilities) {
if (dc == CapabilityPhysicalStorageBufferAddresses) {
return true;
}
}
}
return false;
}

View File

@ -244,6 +244,7 @@ namespace mvk {
bool needsInputThreadgroupMem = false;
bool needsDispatchBaseBuffer = false;
bool needsViewRangeBuffer = false;
bool usesPhysicalStorageBufferAddressesCapability = false;
} SPIRVToMSLConversionResultInfo;
@ -300,6 +301,7 @@ namespace mvk {
void writeSPIRVToFile(std::string spvFilepath, std::string& log);
void populateWorkgroupDimension(SPIRVWorkgroupSizeDimension& wgDim, uint32_t size, SPIRV_CROSS_NAMESPACE::SpecializationConstant& spvSpecConst);
void populateEntryPoint(SPIRV_CROSS_NAMESPACE::Compiler* pCompiler, SPIRVToMSLConversionOptions& options, SPIRVEntryPoint& entryPoint);
bool usesPhysicalStorageBufferAddressesCapability(SPIRV_CROSS_NAMESPACE::Compiler* pCompiler);
std::vector<uint32_t> _spirv;
};

View File

@ -18,8 +18,10 @@ Copyright (c) 2015-2023 [The Brenwill Workshop Ltd.](http://www.brenwill.com)
Table of Contents
-----------------
- [Developing Vulkan Applications on *macOS, iOS, and tvOS*](#developing_vulkan)
- [Introduction to **MoltenVK**](#intro)
- [Developing Vulkan Applications on *macOS, iOS, and tvOS*](#developing_vulkan)
- [Using the *Vulkan SDK*](#sdk)
- [Using MoltenVK Directly](#download)
- [Fetching **MoltenVK** Source Code](#install)
- [Building **MoltenVK**](#building)
- [Running **MoltenVK** Demo Applications](#demos)
@ -30,41 +32,6 @@ Table of Contents
<a name="developing_vulkan"></a>
Developing Vulkan Applications for *macOS, iOS, and tvOS*
---------------------------------------------------------
The recommended method for developing a *Vulkan* application for *macOS* is to use the
[*Vulkan SDK*](https://vulkan.lunarg.com/sdk/home).
The *Vulkan SDK* includes a **MoltenVK** runtime library for *macOS*. *Vulkan* is a layered
architecture that allows applications to add additional functionality without modifying the
application itself. The *Validation Layers* included in the *Vulkan SDK* are an essential debugging
tool for application developers because they identify inappropriate use of the *Vulkan API*.
If you are developing a *Vulkan* application for *macOS*, it is highly recommended that you use the
[*Vulkan SDK*](https://vulkan.lunarg.com/sdk/home) and the **MoltenVK** library included in it.
Refer to the *Vulkan SDK [Getting Started](https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html)*
document for more info.
Because **MoltenVK** supports the `VK_KHR_portability_subset` extension, when using the
*Vulkan Loader* from the *Vulkan SDK* to run **MoltenVK** on *macOS*, the *Vulkan Loader*
will only include **MoltenVK** `VkPhysicalDevices` in the list returned by
`vkEnumeratePhysicalDevices()` if the `VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR`
flag is enabled in `vkCreateInstance()`. See the description of the `VK_KHR_portability_enumeration`
extension in the *Vulkan* specification for more information about the use of the
`VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR` flag.
If you are developing a *Vulkan* application for *iOS* or *tvOS*, or are developing a *Vulkan*
application for *macOS* and want to use a different version of the **MoltenVK** runtime library
provided in the *macOS Vulkan SDK*, you can use this document to learn how to build a **MoltenVK**
runtime library from source code.
To learn how to integrate the **MoltenVK** runtime library into a game or application,
see the [`MoltenVK_Runtime_UserGuide.md `](Docs/MoltenVK_Runtime_UserGuide.md)
document in the `Docs` directory.
<a name="intro"></a>
Introduction to MoltenVK
------------------------
@ -99,6 +66,52 @@ The **MoltenVK** runtime package contains two products:
<a name="developing_vulkan"></a>
Developing *Vulkan* Applications for *macOS, iOS, and tvOS*
---------------------------------------------------------
<a name="sdk"></a>
### Using the *Vulkan SDK*
The recommended method for developing a *Vulkan* application for *macOS* is to use the
[*Vulkan SDK*](https://vulkan.lunarg.com/sdk/home).
The *Vulkan SDK* includes a **MoltenVK** runtime library for *macOS*. *Vulkan* is a layered
architecture that allows applications to add additional functionality without modifying the
application itself. The *Validation Layers* included in the *Vulkan SDK* are an essential debugging
tool for application developers because they identify inappropriate use of the *Vulkan API*.
If you are developing a *Vulkan* application for *macOS*, it is highly recommended that you use the
[*Vulkan SDK*](https://vulkan.lunarg.com/sdk/home) and the **MoltenVK** library included in it.
Refer to the *Vulkan SDK [Getting Started](https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html)*
document for more info.
Because **MoltenVK** supports the `VK_KHR_portability_subset` extension, when using the
*Vulkan Loader* from the *Vulkan SDK* to run **MoltenVK** on *macOS*, the *Vulkan Loader*
will only include **MoltenVK** `VkPhysicalDevices` in the list returned by
`vkEnumeratePhysicalDevices()` if the `VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR`
flag is enabled in `vkCreateInstance()`. See the description of the `VK_KHR_portability_enumeration`
extension in the *Vulkan* specification for more information about the use of the
`VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR` flag.
<a name="download"></a>
### Using MoltenVK Directly
If you are developing a *Vulkan* application for *iOS* or *tvOS*, or are developing a
*Vulkan* application for *macOS* and want to use a different version or build of the
**MoltenVK** runtime library than provided in the *macOS Vulkan SDK*, you can access
a pre-built MoltenVK binary library from the **MoltenVK** repository, by
[selecting a repository commit from the list](https://github.com/KhronosGroup/MoltenVK/actions),
and downloading the associated **MoltenVK** runtime library artifact.
Finally, if you want a customized build of **MoltenVK**, you can follow the [instructions below](#install)
to create a **MoltenVK** runtime library by fetching and building the **MoltenVK** source code.
To learn how to integrate the **MoltenVK** runtime library into a game or application,
see the [`MoltenVK_Runtime_UserGuide.md `](Docs/MoltenVK_Runtime_UserGuide.md)
document in the `Docs` directory.
<a name="install"></a>
Fetching **MoltenVK** Source Code
---------------------------------
@ -268,14 +281,14 @@ The `make` targets all require that *Xcode* is installed on your system.
Building from the command line creates the same `Package` folder structure described above when
building from within *Xcode*.
When building from the command line, you can set any of the build settings documented in
the `vk_mvk_moltenvk.h` file for `MVKConfiguration`, by passing them in the command line,
When building from the command line, you can set any of the build settings documented
in the `mvk_config.h` file for `MVKConfiguration`, by passing them in the command line,
as in the following examples:
make MVK_CONFIG_LOG_LEVEL=0
or
make macos MVK_CONFIG_PREFILL_METAL_COMMAND_BUFFERS=1
make macos MVK_CONFIG_USE_METAL_ARGUMENT_BUFFERS=2
...etc.
@ -293,7 +306,7 @@ or it can be included in any of the `make` build commands. For example:
or
make macos MVK_HIDE_VULKAN_SYMBOLS=1
...etc.

View File

@ -39,7 +39,6 @@
# only Vulkan 1.0, and only the following extensions:
# VK_KHR_get_physical_device_properties2
# VK_KHR_portability_subset
# VK_MVK_moltenvk
#
cts_vk_dir="../../VK-GL-CTS/build/external/vulkancts/modules/vulkan/Debug"
@ -85,7 +84,7 @@ fi
# -------------- MoltenVK configuration --------------------
# As documented above, the portability option restricts to Vulkan 1.0 and a very limited set of extensions.
# The values used here are documented in vk_mvk_moltenvk.h.
# The values used here are documented in mvk_config.h.
# - MVK_CONFIG_API_VERSION_TO_ADVERTISE = VK_API_VERSION_1_0 (4194304)
# - MVK_CONFIG_ADVERTISE_EXTENSIONS selects support for a very limited set of extensions,
# using a bit-or of values in MVKConfigAdvertiseExtensions (extension list documented above).

Binary file not shown.