diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index abeb5859b5..e7fe675cbf 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -190,6 +190,9 @@ add_library(core STATIC
     hle/kernel/k_code_memory.h
     hle/kernel/k_condition_variable.cpp
     hle/kernel/k_condition_variable.h
+    hle/kernel/k_dynamic_page_manager.h
+    hle/kernel/k_dynamic_resource_manager.h
+    hle/kernel/k_dynamic_slab_heap.h
     hle/kernel/k_event.cpp
     hle/kernel/k_event.h
     hle/kernel/k_handle_table.cpp
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index 953d964399..29ba562dce 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -134,6 +134,14 @@ void ARM_Interface::Run() {
         }
         system.ExitDynarmicProfile();
 
+        // If the thread is scheduled for termination, exit the thread.
+        if (current_thread->HasDpc()) {
+            if (current_thread->IsTerminationRequested()) {
+                current_thread->Exit();
+                UNREACHABLE();
+            }
+        }
+
         // Notify the debugger and go to sleep if a breakpoint was hit,
         // or if the thread is unable to continue for any reason.
         if (Has(hr, breakpoint) || Has(hr, no_execute)) {
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 1deeee1545..7fb8bc0195 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -133,6 +133,50 @@ struct System::Impl {
         : kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{},
           cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {}
 
+    void Initialize(System& system) {
+        device_memory = std::make_unique<Core::DeviceMemory>();
+
+        is_multicore = Settings::values.use_multi_core.GetValue();
+
+        core_timing.SetMulticore(is_multicore);
+        core_timing.Initialize([&system]() { system.RegisterHostThread(); });
+
+        const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
+        const auto current_time =
+            std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
+        Settings::values.custom_rtc_differential =
+            Settings::values.custom_rtc.value_or(current_time) - current_time;
+
+        // Create a default fs if one doesn't already exist.
+        if (virtual_filesystem == nullptr) {
+            virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
+        }
+        if (content_provider == nullptr) {
+            content_provider = std::make_unique<FileSys::ContentProviderUnion>();
+        }
+
+        // Create default implementations of applets if one is not provided.
+        applet_manager.SetDefaultAppletsIfMissing();
+
+        is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
+
+        kernel.SetMulticore(is_multicore);
+        cpu_manager.SetMulticore(is_multicore);
+        cpu_manager.SetAsyncGpu(is_async_gpu);
+    }
+
+    void ReinitializeIfNecessary(System& system) {
+        if (is_multicore == Settings::values.use_multi_core.GetValue()) {
+            return;
+        }
+
+        LOG_DEBUG(Kernel, "Re-initializing");
+
+        is_multicore = Settings::values.use_multi_core.GetValue();
+
+        Initialize(system);
+    }
+
     SystemResultStatus Run() {
         std::unique_lock<std::mutex> lk(suspend_guard);
         status = SystemResultStatus::Success;
@@ -178,37 +222,14 @@ struct System::Impl {
         debugger = std::make_unique<Debugger>(system, port);
     }
 
-    SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) {
+    SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) {
         LOG_DEBUG(Core, "initialized OK");
 
-        device_memory = std::make_unique<Core::DeviceMemory>();
-
-        is_multicore = Settings::values.use_multi_core.GetValue();
-        is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
-
-        kernel.SetMulticore(is_multicore);
-        cpu_manager.SetMulticore(is_multicore);
-        cpu_manager.SetAsyncGpu(is_async_gpu);
-        core_timing.SetMulticore(is_multicore);
+        // Setting changes may require a full system reinitialization (e.g., disabling multicore).
+        ReinitializeIfNecessary(system);
 
         kernel.Initialize();
         cpu_manager.Initialize();
-        core_timing.Initialize([&system]() { system.RegisterHostThread(); });
-
-        const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
-        const auto current_time =
-            std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
-        Settings::values.custom_rtc_differential =
-            Settings::values.custom_rtc.value_or(current_time) - current_time;
-
-        // Create a default fs if one doesn't already exist.
-        if (virtual_filesystem == nullptr)
-            virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
-        if (content_provider == nullptr)
-            content_provider = std::make_unique<FileSys::ContentProviderUnion>();
-
-        /// Create default implementations of applets if one is not provided.
-        applet_manager.SetDefaultAppletsIfMissing();
 
         /// Reset all glue registrations
         arp_manager.ResetAll();
@@ -253,11 +274,11 @@ struct System::Impl {
             return SystemResultStatus::ErrorGetLoader;
         }
 
-        SystemResultStatus init_result{Init(system, emu_window)};
+        SystemResultStatus init_result{SetupForMainProcess(system, emu_window)};
         if (init_result != SystemResultStatus::Success) {
             LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
                          static_cast<int>(init_result));
-            Shutdown();
+            ShutdownMainProcess();
             return init_result;
         }
 
@@ -276,7 +297,7 @@ struct System::Impl {
         const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
         if (load_result != Loader::ResultStatus::Success) {
             LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
-            Shutdown();
+            ShutdownMainProcess();
 
             return static_cast<SystemResultStatus>(
                 static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
@@ -335,7 +356,7 @@ struct System::Impl {
         return status;
     }
 
-    void Shutdown() {
+    void ShutdownMainProcess() {
         SetShuttingDown(true);
 
         // Log last frame performance stats if game was loded
@@ -369,7 +390,7 @@ struct System::Impl {
         cheat_engine.reset();
         telemetry_session.reset();
         time_manager.Shutdown();
-        core_timing.Shutdown();
+        core_timing.ClearPendingEvents();
         app_loader.reset();
         audio_core.reset();
         gpu_core.reset();
@@ -377,7 +398,6 @@ struct System::Impl {
         perf_stats.reset();
         kernel.Shutdown();
         memory.Reset();
-        applet_manager.ClearAll();
 
         if (auto room_member = room_network.GetRoomMember().lock()) {
             Network::GameInfo game_info{};
@@ -520,6 +540,10 @@ const CpuManager& System::GetCpuManager() const {
     return impl->cpu_manager;
 }
 
+void System::Initialize() {
+    impl->Initialize(*this);
+}
+
 SystemResultStatus System::Run() {
     return impl->Run();
 }
@@ -540,8 +564,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
     impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
 }
 
-void System::Shutdown() {
-    impl->Shutdown();
+void System::ShutdownMainProcess() {
+    impl->ShutdownMainProcess();
 }
 
 bool System::IsShuttingDown() const {
diff --git a/src/core/core.h b/src/core/core.h
index 7843cc8ad9..4ebedffd91 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -142,6 +142,12 @@ public:
     System(System&&) = delete;
     System& operator=(System&&) = delete;
 
+    /**
+     * Initializes the system
+     * This function will initialize core functionaility used for system emulation
+     */
+    void Initialize();
+
     /**
      * Run the OS and Application
      * This function will start emulation and run the relevant devices
@@ -166,8 +172,8 @@ public:
 
     void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
 
-    /// Shutdown the emulated system.
-    void Shutdown();
+    /// Shutdown the main emulated process.
+    void ShutdownMainProcess();
 
     /// Check if the core is shutting down.
     [[nodiscard]] bool IsShuttingDown() const;
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 2678ce5328..0e7b5f9436 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -40,7 +40,9 @@ struct CoreTiming::Event {
 CoreTiming::CoreTiming()
     : clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {}
 
-CoreTiming::~CoreTiming() = default;
+CoreTiming::~CoreTiming() {
+    Reset();
+}
 
 void CoreTiming::ThreadEntry(CoreTiming& instance) {
     constexpr char name[] = "HostTiming";
@@ -53,6 +55,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
 }
 
 void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
+    Reset();
     on_thread_init = std::move(on_thread_init_);
     event_fifo_id = 0;
     shutting_down = false;
@@ -65,17 +68,8 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
     }
 }
 
-void CoreTiming::Shutdown() {
-    paused = true;
-    shutting_down = true;
-    pause_event.Set();
-    event.Set();
-    if (timer_thread) {
-        timer_thread->join();
-    }
-    ClearPendingEvents();
-    timer_thread.reset();
-    has_started = false;
+void CoreTiming::ClearPendingEvents() {
+    event_queue.clear();
 }
 
 void CoreTiming::Pause(bool is_paused) {
@@ -196,10 +190,6 @@ u64 CoreTiming::GetClockTicks() const {
     return CpuCyclesToClockCycles(ticks);
 }
 
-void CoreTiming::ClearPendingEvents() {
-    event_queue.clear();
-}
-
 void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
     std::scoped_lock lock{basic_lock};
 
@@ -307,6 +297,18 @@ void CoreTiming::ThreadLoop() {
     }
 }
 
+void CoreTiming::Reset() {
+    paused = true;
+    shutting_down = true;
+    pause_event.Set();
+    event.Set();
+    if (timer_thread) {
+        timer_thread->join();
+    }
+    timer_thread.reset();
+    has_started = false;
+}
+
 std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
     if (is_multicore) {
         return clock->GetTimeNS();
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index 3259397b28..b5925193c7 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -61,19 +61,14 @@ public:
     /// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
     void Initialize(std::function<void()>&& on_thread_init_);
 
-    /// Tears down all timing related functionality.
-    void Shutdown();
+    /// Clear all pending events. This should ONLY be done on exit.
+    void ClearPendingEvents();
 
     /// Sets if emulation is multicore or single core, must be set before Initialize
     void SetMulticore(bool is_multicore_) {
         is_multicore = is_multicore_;
     }
 
-    /// Check if it's using host timing.
-    bool IsHostTiming() const {
-        return is_multicore;
-    }
-
     /// Pauses/Unpauses the execution of the timer thread.
     void Pause(bool is_paused);
 
@@ -136,12 +131,11 @@ public:
 private:
     struct Event;
 
-    /// Clear all pending events. This should ONLY be done on exit.
-    void ClearPendingEvents();
-
     static void ThreadEntry(CoreTiming& instance);
     void ThreadLoop();
 
+    void Reset();
+
     std::unique_ptr<Common::WallClock> clock;
 
     s64 global_timer = 0;
diff --git a/src/core/device_memory.h b/src/core/device_memory.h
index df61b0c0b6..90510733c8 100644
--- a/src/core/device_memory.h
+++ b/src/core/device_memory.h
@@ -31,12 +31,14 @@ public:
                DramMemoryMap::Base;
     }
 
-    u8* GetPointer(PAddr addr) {
-        return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
+    template <typename T>
+    T* GetPointer(PAddr addr) {
+        return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
     }
 
-    const u8* GetPointer(PAddr addr) const {
-        return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
+    template <typename T>
+    const T* GetPointer(PAddr addr) const {
+        return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
     }
 
     Common::HostMemory buffer;
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 9b6b284d08..c84d36c8c2 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -94,8 +94,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
     // TODO(bunnei): Fix this once we support the kernel virtual memory layout.
 
     if (size > 0) {
-        void* backing_kernel_memory{
-            system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))};
+        void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>(
+            TranslateSlabAddrToPhysical(memory_layout, start))};
 
         const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
         ASSERT(region != nullptr);
@@ -181,7 +181,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) {
     ASSERT(slab_address != 0);
 
     // Initialize the slabheap.
-    KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address),
+    KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
                                     slab_size);
 }
 
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index da57ceb21e..4b1c134d40 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
 
     // Clear the memory.
     for (const auto& block : m_page_group.Nodes()) {
-        std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
+        std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
     }
 
     // Set remaining tracking members.
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h
new file mode 100644
index 0000000000..9076c8fa3c
--- /dev/null
+++ b/src/core/hle/kernel/k_dynamic_page_manager.h
@@ -0,0 +1,136 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/alignment.h"
+#include "common/common_types.h"
+#include "core/hle/kernel/k_page_bitmap.h"
+#include "core/hle/kernel/k_spin_lock.h"
+#include "core/hle/kernel/memory_types.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel {
+
+class KDynamicPageManager {
+public:
+    class PageBuffer {
+    private:
+        u8 m_buffer[PageSize];
+    };
+    static_assert(sizeof(PageBuffer) == PageSize);
+
+public:
+    KDynamicPageManager() = default;
+
+    template <typename T>
+    T* GetPointer(VAddr addr) {
+        return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
+    }
+
+    template <typename T>
+    const T* GetPointer(VAddr addr) const {
+        return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
+    }
+
+    Result Initialize(VAddr addr, size_t sz) {
+        // We need to have positive size.
+        R_UNLESS(sz > 0, ResultOutOfMemory);
+        m_backing_memory.resize(sz);
+
+        // Calculate management overhead.
+        const size_t management_size =
+            KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer));
+        const size_t allocatable_size = sz - management_size;
+
+        // Set tracking fields.
+        m_address = addr;
+        m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer));
+        m_count = allocatable_size / sizeof(PageBuffer);
+        R_UNLESS(m_count > 0, ResultOutOfMemory);
+
+        // Clear the management region.
+        u64* management_ptr = GetPointer<u64>(m_address + allocatable_size);
+        std::memset(management_ptr, 0, management_size);
+
+        // Initialize the bitmap.
+        m_page_bitmap.Initialize(management_ptr, m_count);
+
+        // Free the pages to the bitmap.
+        for (size_t i = 0; i < m_count; i++) {
+            // Ensure the freed page is all-zero.
+            std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize);
+
+            // Set the bit for the free page.
+            m_page_bitmap.SetBit(i);
+        }
+
+        R_SUCCEED();
+    }
+
+    VAddr GetAddress() const {
+        return m_address;
+    }
+    size_t GetSize() const {
+        return m_size;
+    }
+    size_t GetUsed() const {
+        return m_used;
+    }
+    size_t GetPeak() const {
+        return m_peak;
+    }
+    size_t GetCount() const {
+        return m_count;
+    }
+
+    PageBuffer* Allocate() {
+        // Take the lock.
+        // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+        KScopedSpinLock lk(m_lock);
+
+        // Find a random free block.
+        s64 soffset = m_page_bitmap.FindFreeBlock(true);
+        if (soffset < 0) [[unlikely]] {
+            return nullptr;
+        }
+
+        const size_t offset = static_cast<size_t>(soffset);
+
+        // Update our tracking.
+        m_page_bitmap.ClearBit(offset);
+        m_peak = std::max(m_peak, (++m_used));
+
+        return GetPointer<PageBuffer>(m_address) + offset;
+    }
+
+    void Free(PageBuffer* pb) {
+        // Ensure all pages in the heap are zero.
+        std::memset(pb, 0, PageSize);
+
+        // Take the lock.
+        // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+        KScopedSpinLock lk(m_lock);
+
+        // Set the bit for the free page.
+        size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer);
+        m_page_bitmap.SetBit(offset);
+
+        // Decrement our used count.
+        --m_used;
+    }
+
+private:
+    KSpinLock m_lock;
+    KPageBitmap m_page_bitmap;
+    size_t m_used{};
+    size_t m_peak{};
+    size_t m_count{};
+    VAddr m_address{};
+    size_t m_size{};
+
+    // TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
+    std::vector<u8> m_backing_memory;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h
new file mode 100644
index 0000000000..1ce517e8e9
--- /dev/null
+++ b/src/core/hle/kernel/k_dynamic_resource_manager.h
@@ -0,0 +1,58 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/common_funcs.h"
+#include "core/hle/kernel/k_dynamic_slab_heap.h"
+#include "core/hle/kernel/k_memory_block.h"
+
+namespace Kernel {
+
+template <typename T, bool ClearNode = false>
+class KDynamicResourceManager {
+    YUZU_NON_COPYABLE(KDynamicResourceManager);
+    YUZU_NON_MOVEABLE(KDynamicResourceManager);
+
+public:
+    using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>;
+
+public:
+    constexpr KDynamicResourceManager() = default;
+
+    constexpr size_t GetSize() const {
+        return m_slab_heap->GetSize();
+    }
+    constexpr size_t GetUsed() const {
+        return m_slab_heap->GetUsed();
+    }
+    constexpr size_t GetPeak() const {
+        return m_slab_heap->GetPeak();
+    }
+    constexpr size_t GetCount() const {
+        return m_slab_heap->GetCount();
+    }
+
+    void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) {
+        m_page_allocator = page_allocator;
+        m_slab_heap = slab_heap;
+    }
+
+    T* Allocate() const {
+        return m_slab_heap->Allocate(m_page_allocator);
+    }
+
+    void Free(T* t) const {
+        m_slab_heap->Free(t);
+    }
+
+private:
+    KDynamicPageManager* m_page_allocator{};
+    DynamicSlabType* m_slab_heap{};
+};
+
+class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {};
+
+using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_dynamic_slab_heap.h b/src/core/hle/kernel/k_dynamic_slab_heap.h
new file mode 100644
index 0000000000..3a0ddd0500
--- /dev/null
+++ b/src/core/hle/kernel/k_dynamic_slab_heap.h
@@ -0,0 +1,122 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <atomic>
+
+#include "common/common_funcs.h"
+#include "core/hle/kernel/k_dynamic_page_manager.h"
+#include "core/hle/kernel/k_slab_heap.h"
+
+namespace Kernel {
+
+template <typename T, bool ClearNode = false>
+class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
+    YUZU_NON_COPYABLE(KDynamicSlabHeap);
+    YUZU_NON_MOVEABLE(KDynamicSlabHeap);
+
+public:
+    constexpr KDynamicSlabHeap() = default;
+
+    constexpr VAddr GetAddress() const {
+        return m_address;
+    }
+    constexpr size_t GetSize() const {
+        return m_size;
+    }
+    constexpr size_t GetUsed() const {
+        return m_used.load();
+    }
+    constexpr size_t GetPeak() const {
+        return m_peak.load();
+    }
+    constexpr size_t GetCount() const {
+        return m_count.load();
+    }
+
+    constexpr bool IsInRange(VAddr addr) const {
+        return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
+    }
+
+    void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) {
+        ASSERT(page_allocator != nullptr);
+
+        // Initialize members.
+        m_address = page_allocator->GetAddress();
+        m_size = page_allocator->GetSize();
+
+        // Initialize the base allocator.
+        KSlabHeapImpl::Initialize();
+
+        // Allocate until we have the correct number of objects.
+        while (m_count.load() < num_objects) {
+            auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate());
+            ASSERT(allocated != nullptr);
+
+            for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
+                KSlabHeapImpl::Free(allocated + i);
+            }
+
+            m_count += sizeof(PageBuffer) / sizeof(T);
+        }
+    }
+
+    T* Allocate(KDynamicPageManager* page_allocator) {
+        T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate());
+
+        // If we successfully allocated and we should clear the node, do so.
+        if constexpr (ClearNode) {
+            if (allocated != nullptr) [[likely]] {
+                reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr;
+            }
+        }
+
+        // If we fail to allocate, try to get a new page from our next allocator.
+        if (allocated == nullptr) [[unlikely]] {
+            if (page_allocator != nullptr) {
+                allocated = reinterpret_cast<T*>(page_allocator->Allocate());
+                if (allocated != nullptr) {
+                    // If we succeeded in getting a page, free the rest to our slab.
+                    for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
+                        KSlabHeapImpl::Free(allocated + i);
+                    }
+                    m_count += sizeof(PageBuffer) / sizeof(T);
+                }
+            }
+        }
+
+        if (allocated != nullptr) [[likely]] {
+            // Construct the object.
+            std::construct_at(allocated);
+
+            // Update our tracking.
+            const size_t used = ++m_used;
+            size_t peak = m_peak.load();
+            while (peak < used) {
+                if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
+                    break;
+                }
+            }
+        }
+
+        return allocated;
+    }
+
+    void Free(T* t) {
+        KSlabHeapImpl::Free(t);
+        --m_used;
+    }
+
+private:
+    using PageBuffer = KDynamicPageManager::PageBuffer;
+
+private:
+    std::atomic<size_t> m_used{};
+    std::atomic<size_t> m_peak{};
+    std::atomic<size_t> m_count{};
+    VAddr m_address{};
+    size_t m_size{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
index 1b577a5b3e..4a6b60d268 100644
--- a/src/core/hle/kernel/k_interrupt_manager.cpp
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -11,29 +11,34 @@
 namespace Kernel::KInterruptManager {
 
 void HandleInterrupt(KernelCore& kernel, s32 core_id) {
-    auto* process = kernel.CurrentProcess();
-    if (!process) {
-        return;
-    }
-
     // Acknowledge the interrupt.
     kernel.PhysicalCore(core_id).ClearInterrupt();
 
     auto& current_thread = GetCurrentThread(kernel);
 
-    // If the user disable count is set, we may need to pin the current thread.
-    if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
-        KScopedSchedulerLock sl{kernel};
+    if (auto* process = kernel.CurrentProcess(); process) {
+        // If the user disable count is set, we may need to pin the current thread.
+        if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
+            KScopedSchedulerLock sl{kernel};
 
-        // Pin the current thread.
-        process->PinCurrentThread(core_id);
+            // Pin the current thread.
+            process->PinCurrentThread(core_id);
 
-        // Set the interrupt flag for the thread.
-        GetCurrentThread(kernel).SetInterruptFlag();
+            // Set the interrupt flag for the thread.
+            GetCurrentThread(kernel).SetInterruptFlag();
+        }
     }
 
     // Request interrupt scheduling.
     kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
 }
 
+void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) {
+    for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
+        if (core_mask & (1ULL << core_id)) {
+            kernel.PhysicalCore(core_id).Interrupt();
+        }
+    }
+}
+
 } // namespace Kernel::KInterruptManager
diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h
index f103dfe3f1..803dc92117 100644
--- a/src/core/hle/kernel/k_interrupt_manager.h
+++ b/src/core/hle/kernel/k_interrupt_manager.h
@@ -11,6 +11,8 @@ class KernelCore;
 
 namespace KInterruptManager {
 void HandleInterrupt(KernelCore& kernel, s32 core_id);
-}
+void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask);
+
+} // namespace KInterruptManager
 
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index 18df1f836a..9444f6bd28 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -6,6 +6,7 @@
 #include "common/alignment.h"
 #include "common/assert.h"
 #include "common/common_types.h"
+#include "common/intrusive_red_black_tree.h"
 #include "core/hle/kernel/memory_types.h"
 #include "core/hle/kernel/svc_types.h"
 
@@ -168,9 +169,8 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per
 
 enum class KMemoryAttribute : u8 {
     None = 0x00,
-    Mask = 0x7F,
-    All = Mask,
-    DontCareMask = 0x80,
+    All = 0xFF,
+    UserMask = All,
 
     Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
     IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
@@ -178,76 +178,112 @@ enum class KMemoryAttribute : u8 {
     Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
 
     SetMask = Uncached,
-
-    IpcAndDeviceMapped = IpcLocked | DeviceShared,
-    LockedAndIpcLocked = Locked | IpcLocked,
-    DeviceSharedAndUncached = DeviceShared | Uncached
 };
 DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
 
-static_assert((static_cast<u8>(KMemoryAttribute::Mask) &
-               static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0);
+enum class KMemoryBlockDisableMergeAttribute : u8 {
+    None = 0,
+    Normal = (1u << 0),
+    DeviceLeft = (1u << 1),
+    IpcLeft = (1u << 2),
+    Locked = (1u << 3),
+    DeviceRight = (1u << 4),
+
+    AllLeft = Normal | DeviceLeft | IpcLeft | Locked,
+    AllRight = DeviceRight,
+};
+DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute);
 
 struct KMemoryInfo {
-    VAddr addr{};
-    std::size_t size{};
-    KMemoryState state{};
-    KMemoryPermission perm{};
-    KMemoryAttribute attribute{};
-    KMemoryPermission original_perm{};
-    u16 ipc_lock_count{};
-    u16 device_use_count{};
+    uintptr_t m_address;
+    size_t m_size;
+    KMemoryState m_state;
+    u16 m_device_disable_merge_left_count;
+    u16 m_device_disable_merge_right_count;
+    u16 m_ipc_lock_count;
+    u16 m_device_use_count;
+    u16 m_ipc_disable_merge_count;
+    KMemoryPermission m_permission;
+    KMemoryAttribute m_attribute;
+    KMemoryPermission m_original_permission;
+    KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
 
     constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
         return {
-            addr,
-            size,
-            static_cast<Svc::MemoryState>(state & KMemoryState::Mask),
-            static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask),
-            static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask),
-            ipc_lock_count,
-            device_use_count,
+            .addr = m_address,
+            .size = m_size,
+            .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask),
+            .attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask),
+            .perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask),
+            .ipc_refcount = m_ipc_lock_count,
+            .device_refcount = m_device_use_count,
+            .padding = {},
         };
     }
 
-    constexpr VAddr GetAddress() const {
-        return addr;
+    constexpr uintptr_t GetAddress() const {
+        return m_address;
     }
-    constexpr std::size_t GetSize() const {
-        return size;
+
+    constexpr size_t GetSize() const {
+        return m_size;
     }
-    constexpr std::size_t GetNumPages() const {
-        return GetSize() / PageSize;
+
+    constexpr size_t GetNumPages() const {
+        return this->GetSize() / PageSize;
     }
-    constexpr VAddr GetEndAddress() const {
-        return GetAddress() + GetSize();
+
+    constexpr uintptr_t GetEndAddress() const {
+        return this->GetAddress() + this->GetSize();
     }
-    constexpr VAddr GetLastAddress() const {
-        return GetEndAddress() - 1;
+
+    constexpr uintptr_t GetLastAddress() const {
+        return this->GetEndAddress() - 1;
     }
+
+    constexpr u16 GetIpcLockCount() const {
+        return m_ipc_lock_count;
+    }
+
+    constexpr u16 GetIpcDisableMergeCount() const {
+        return m_ipc_disable_merge_count;
+    }
+
     constexpr KMemoryState GetState() const {
-        return state;
-    }
-    constexpr KMemoryAttribute GetAttribute() const {
-        return attribute;
+        return m_state;
     }
+
     constexpr KMemoryPermission GetPermission() const {
-        return perm;
+        return m_permission;
+    }
+
+    constexpr KMemoryPermission GetOriginalPermission() const {
+        return m_original_permission;
+    }
+
+    constexpr KMemoryAttribute GetAttribute() const {
+        return m_attribute;
+    }
+
+    constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
+        return m_disable_merge_attribute;
     }
 };
 
-class KMemoryBlock final {
-    friend class KMemoryBlockManager;
-
+class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
 private:
-    VAddr addr{};
-    std::size_t num_pages{};
-    KMemoryState state{KMemoryState::None};
-    u16 ipc_lock_count{};
-    u16 device_use_count{};
-    KMemoryPermission perm{KMemoryPermission::None};
-    KMemoryPermission original_perm{KMemoryPermission::None};
-    KMemoryAttribute attribute{KMemoryAttribute::None};
+    u16 m_device_disable_merge_left_count;
+    u16 m_device_disable_merge_right_count;
+    VAddr m_address;
+    size_t m_num_pages;
+    KMemoryState m_memory_state;
+    u16 m_ipc_lock_count;
+    u16 m_device_use_count;
+    u16 m_ipc_disable_merge_count;
+    KMemoryPermission m_permission;
+    KMemoryPermission m_original_permission;
+    KMemoryAttribute m_attribute;
+    KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
 
 public:
     static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
@@ -261,113 +297,349 @@ public:
     }
 
 public:
-    constexpr KMemoryBlock() = default;
-    constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_,
-                           KMemoryPermission perm_, KMemoryAttribute attribute_)
-        : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
-
     constexpr VAddr GetAddress() const {
-        return addr;
+        return m_address;
     }
 
-    constexpr std::size_t GetNumPages() const {
-        return num_pages;
+    constexpr size_t GetNumPages() const {
+        return m_num_pages;
     }
 
-    constexpr std::size_t GetSize() const {
-        return GetNumPages() * PageSize;
+    constexpr size_t GetSize() const {
+        return this->GetNumPages() * PageSize;
     }
 
     constexpr VAddr GetEndAddress() const {
-        return GetAddress() + GetSize();
+        return this->GetAddress() + this->GetSize();
     }
 
     constexpr VAddr GetLastAddress() const {
-        return GetEndAddress() - 1;
+        return this->GetEndAddress() - 1;
+    }
+
+    constexpr u16 GetIpcLockCount() const {
+        return m_ipc_lock_count;
+    }
+
+    constexpr u16 GetIpcDisableMergeCount() const {
+        return m_ipc_disable_merge_count;
+    }
+
+    constexpr KMemoryPermission GetPermission() const {
+        return m_permission;
+    }
+
+    constexpr KMemoryPermission GetOriginalPermission() const {
+        return m_original_permission;
+    }
+
+    constexpr KMemoryAttribute GetAttribute() const {
+        return m_attribute;
     }
 
     constexpr KMemoryInfo GetMemoryInfo() const {
         return {
-            GetAddress(), GetSize(),     state,          perm,
-            attribute,    original_perm, ipc_lock_count, device_use_count,
+            .m_address = this->GetAddress(),
+            .m_size = this->GetSize(),
+            .m_state = m_memory_state,
+            .m_device_disable_merge_left_count = m_device_disable_merge_left_count,
+            .m_device_disable_merge_right_count = m_device_disable_merge_right_count,
+            .m_ipc_lock_count = m_ipc_lock_count,
+            .m_device_use_count = m_device_use_count,
+            .m_ipc_disable_merge_count = m_ipc_disable_merge_count,
+            .m_permission = m_permission,
+            .m_attribute = m_attribute,
+            .m_original_permission = m_original_permission,
+            .m_disable_merge_attribute = m_disable_merge_attribute,
         };
     }
 
-    void ShareToDevice(KMemoryPermission /*new_perm*/) {
-        ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
-               device_use_count == 0);
-        attribute |= KMemoryAttribute::DeviceShared;
-        const u16 new_use_count{++device_use_count};
-        ASSERT(new_use_count > 0);
+public:
+    explicit KMemoryBlock() = default;
+
+    constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
+                           KMemoryAttribute attr)
+        : Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(),
+          m_device_disable_merge_left_count(), m_device_disable_merge_right_count(),
+          m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
+          m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p),
+          m_original_permission(KMemoryPermission::None), m_attribute(attr),
+          m_disable_merge_attribute() {}
+
+    constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
+                              KMemoryAttribute attr) {
+        m_device_disable_merge_left_count = 0;
+        m_device_disable_merge_right_count = 0;
+        m_address = addr;
+        m_num_pages = np;
+        m_memory_state = ms;
+        m_ipc_lock_count = 0;
+        m_device_use_count = 0;
+        m_permission = p;
+        m_original_permission = KMemoryPermission::None;
+        m_attribute = attr;
+        m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None;
     }
 
-    void UnshareToDevice(KMemoryPermission /*new_perm*/) {
-        ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
-        const u16 prev_use_count{device_use_count--};
-        ASSERT(prev_use_count > 0);
-        if (prev_use_count == 1) {
-            attribute &= ~KMemoryAttribute::DeviceShared;
-        }
-    }
-
-private:
     constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
-        constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask |
-                                                       KMemoryAttribute::IpcLocked |
-                                                       KMemoryAttribute::DeviceShared};
-        return state == s && perm == p &&
-               (attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
+        constexpr auto AttributeIgnoreMask =
+            KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
+        return m_memory_state == s && m_permission == p &&
+               (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
     }
 
     constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
-        return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
-               attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
-               device_use_count == rhs.device_use_count;
+        return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission &&
+               m_original_permission == rhs.m_original_permission &&
+               m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count &&
+               m_device_use_count == rhs.m_device_use_count;
     }
 
-    constexpr bool Contains(VAddr start) const {
-        return GetAddress() <= start && start <= GetEndAddress();
+    constexpr bool CanMergeWith(const KMemoryBlock& rhs) const {
+        return this->HasSameProperties(rhs) &&
+               (m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) ==
+                   KMemoryBlockDisableMergeAttribute::None &&
+               (rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) ==
+                   KMemoryBlockDisableMergeAttribute::None;
     }
 
-    constexpr void Add(std::size_t count) {
-        ASSERT(count > 0);
-        ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
-
-        num_pages += count;
+    constexpr bool Contains(VAddr addr) const {
+        return this->GetAddress() <= addr && addr <= this->GetEndAddress();
     }
 
-    constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm,
-                          KMemoryAttribute new_attribute) {
-        ASSERT(original_perm == KMemoryPermission::None);
-        ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
+    constexpr void Add(const KMemoryBlock& added_block) {
+        ASSERT(added_block.GetNumPages() > 0);
+        ASSERT(this->GetAddress() + added_block.GetSize() - 1 <
+               this->GetEndAddress() + added_block.GetSize() - 1);
 
-        state = new_state;
-        perm = new_perm;
-
-        attribute = static_cast<KMemoryAttribute>(
-            new_attribute |
-            (attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
+        m_num_pages += added_block.GetNumPages();
+        m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+            m_disable_merge_attribute | added_block.m_disable_merge_attribute);
+        m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count;
     }
 
-    constexpr KMemoryBlock Split(VAddr split_addr) {
-        ASSERT(GetAddress() < split_addr);
-        ASSERT(Contains(split_addr));
-        ASSERT(Common::IsAligned(split_addr, PageSize));
+    constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a,
+                          bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
+        ASSERT(m_original_permission == KMemoryPermission::None);
+        ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
 
-        KMemoryBlock block;
-        block.addr = addr;
-        block.num_pages = (split_addr - GetAddress()) / PageSize;
-        block.state = state;
-        block.ipc_lock_count = ipc_lock_count;
-        block.device_use_count = device_use_count;
-        block.perm = perm;
-        block.original_perm = original_perm;
-        block.attribute = attribute;
+        m_memory_state = s;
+        m_permission = p;
+        m_attribute = static_cast<KMemoryAttribute>(
+            a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
 
-        addr = split_addr;
-        num_pages -= block.num_pages;
+        if (set_disable_merge_attr && set_mask != 0) {
+            m_disable_merge_attribute = m_disable_merge_attribute |
+                                        static_cast<KMemoryBlockDisableMergeAttribute>(set_mask);
+        }
+        if (clear_mask != 0) {
+            m_disable_merge_attribute = m_disable_merge_attribute &
+                                        static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask);
+        }
+    }
 
-        return block;
+    constexpr void Split(KMemoryBlock* block, VAddr addr) {
+        ASSERT(this->GetAddress() < addr);
+        ASSERT(this->Contains(addr));
+        ASSERT(Common::IsAligned(addr, PageSize));
+
+        block->m_address = m_address;
+        block->m_num_pages = (addr - this->GetAddress()) / PageSize;
+        block->m_memory_state = m_memory_state;
+        block->m_ipc_lock_count = m_ipc_lock_count;
+        block->m_device_use_count = m_device_use_count;
+        block->m_permission = m_permission;
+        block->m_original_permission = m_original_permission;
+        block->m_attribute = m_attribute;
+        block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+            m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft);
+        block->m_ipc_disable_merge_count = m_ipc_disable_merge_count;
+        block->m_device_disable_merge_left_count = m_device_disable_merge_left_count;
+        block->m_device_disable_merge_right_count = 0;
+
+        m_address = addr;
+        m_num_pages -= block->m_num_pages;
+
+        m_ipc_disable_merge_count = 0;
+        m_device_disable_merge_left_count = 0;
+        m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+            m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
+    }
+
+    constexpr void UpdateDeviceDisableMergeStateForShareLeft(
+        [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
+        if (left) {
+            m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+                m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft);
+            const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count;
+            ASSERT(new_device_disable_merge_left_count > 0);
+        }
+    }
+
+    constexpr void UpdateDeviceDisableMergeStateForShareRight(
+        [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
+        if (right) {
+            m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+                m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight);
+            const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count;
+            ASSERT(new_device_disable_merge_right_count > 0);
+        }
+    }
+
+    constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left,
+                                                         bool right) {
+        this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right);
+        this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
+    }
+
+    constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
+                                 bool right) {
+        // We must either be shared or have a zero lock count.
+        ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
+               m_device_use_count == 0);
+
+        // Share.
+        const u16 new_count = ++m_device_use_count;
+        ASSERT(new_count > 0);
+
+        m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared);
+
+        this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
+    }
+
+    constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
+        [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
+
+        if (left) {
+            if (!m_device_disable_merge_left_count) {
+                return;
+            }
+            --m_device_disable_merge_left_count;
+        }
+
+        m_device_disable_merge_left_count =
+            std::min(m_device_disable_merge_left_count, m_device_use_count);
+
+        if (m_device_disable_merge_left_count == 0) {
+            m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+                m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft);
+        }
+    }
+
+    constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
+        [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
+        if (right) {
+            const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
+            ASSERT(old_device_disable_merge_right_count > 0);
+            if (old_device_disable_merge_right_count == 1) {
+                m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+                    m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight);
+            }
+        }
+    }
+
+    constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left,
+                                                           bool right) {
+        this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right);
+        this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
+    }
+
+    constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
+                                   bool right) {
+        // We must be shared.
+        ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
+
+        // Unhare.
+        const u16 old_count = m_device_use_count--;
+        ASSERT(old_count > 0);
+
+        if (old_count == 1) {
+            m_attribute =
+                static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
+        }
+
+        this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
+    }
+
+    constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
+                                        bool right) {
+
+        // We must be shared.
+        ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
+
+        // Unhare.
+        const u16 old_count = m_device_use_count--;
+        ASSERT(old_count > 0);
+
+        if (old_count == 1) {
+            m_attribute =
+                static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
+        }
+
+        this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
+    }
+
+    constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
+        // We must either be locked or have a zero lock count.
+        ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
+               m_ipc_lock_count == 0);
+
+        // Lock.
+        const u16 new_lock_count = ++m_ipc_lock_count;
+        ASSERT(new_lock_count > 0);
+
+        // If this is our first lock, update our permissions.
+        if (new_lock_count == 1) {
+            ASSERT(m_original_permission == KMemoryPermission::None);
+            ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) ==
+                   (m_permission | KMemoryPermission::NotMapped));
+            ASSERT((m_permission & KMemoryPermission::UserExecute) !=
+                       KMemoryPermission::UserExecute ||
+                   (new_perm == KMemoryPermission::UserRead));
+            m_original_permission = m_permission;
+            m_permission = static_cast<KMemoryPermission>(
+                (new_perm & KMemoryPermission::IpcLockChangeMask) |
+                (m_original_permission & ~KMemoryPermission::IpcLockChangeMask));
+        }
+        m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked);
+
+        if (left) {
+            m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+                m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft);
+            const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count;
+            ASSERT(new_ipc_disable_merge_count > 0);
+        }
+    }
+
+    constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
+                                [[maybe_unused]] bool right) {
+        // We must be locked.
+        ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked);
+
+        // Unlock.
+        const u16 old_lock_count = m_ipc_lock_count--;
+        ASSERT(old_lock_count > 0);
+
+        // If this is our last unlock, update our permissions.
+        if (old_lock_count == 1) {
+            ASSERT(m_original_permission != KMemoryPermission::None);
+            m_permission = m_original_permission;
+            m_original_permission = KMemoryPermission::None;
+            m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked);
+        }
+
+        if (left) {
+            const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--;
+            ASSERT(old_ipc_disable_merge_count > 0);
+            if (old_ipc_disable_merge_count == 1) {
+                m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+                    m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft);
+            }
+        }
+    }
+
+    constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
+        return m_disable_merge_attribute;
     }
 };
 static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp
index 3ddb9984fa..cf4c1e371b 100644
--- a/src/core/hle/kernel/k_memory_block_manager.cpp
+++ b/src/core/hle/kernel/k_memory_block_manager.cpp
@@ -2,221 +2,336 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 
 #include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/memory_types.h"
 
 namespace Kernel {
 
-KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_)
-    : start_addr{start_addr_}, end_addr{end_addr_} {
-    const u64 num_pages{(end_addr - start_addr) / PageSize};
-    memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free,
-                                   KMemoryPermission::None, KMemoryAttribute::None);
+KMemoryBlockManager::KMemoryBlockManager() = default;
+
+Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
+    // Allocate a block to encapsulate the address space, insert it into the tree.
+    KMemoryBlock* start_block = slab_manager->Allocate();
+    R_UNLESS(start_block != nullptr, ResultOutOfResource);
+
+    // Set our start and end.
+    m_start_address = st;
+    m_end_address = nd;
+    ASSERT(Common::IsAligned(m_start_address, PageSize));
+    ASSERT(Common::IsAligned(m_end_address, PageSize));
+
+    // Initialize and insert the block.
+    start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
+                            KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None);
+    m_memory_block_tree.insert(*start_block);
+
+    R_SUCCEED();
 }
 
-KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) {
-    auto node{memory_block_tree.begin()};
-    while (node != end()) {
-        const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
-        if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) {
-            return node;
-        }
-        node = std::next(node);
+void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
+                                   HostUnmapCallback&& host_unmap_callback) {
+    // Erase every block until we have none left.
+    auto it = m_memory_block_tree.begin();
+    while (it != m_memory_block_tree.end()) {
+        KMemoryBlock* block = std::addressof(*it);
+        it = m_memory_block_tree.erase(it);
+        slab_manager->Free(block);
+        host_unmap_callback(block->GetAddress(), block->GetSize());
     }
-    return end();
+
+    ASSERT(m_memory_block_tree.empty());
 }
 
-VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
-                                        std::size_t num_pages, std::size_t align,
-                                        std::size_t offset, std::size_t guard_pages) {
-    if (num_pages == 0) {
-        return {};
-    }
+VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
+                                        size_t num_pages, size_t alignment, size_t offset,
+                                        size_t guard_pages) const {
+    if (num_pages > 0) {
+        const VAddr region_end = region_start + region_num_pages * PageSize;
+        const VAddr region_last = region_end - 1;
+        for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
+             it++) {
+            const KMemoryInfo info = it->GetMemoryInfo();
+            if (region_last < info.GetAddress()) {
+                break;
+            }
+            if (info.m_state != KMemoryState::Free) {
+                continue;
+            }
 
-    const VAddr region_end{region_start + region_num_pages * PageSize};
-    const VAddr region_last{region_end - 1};
-    for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
-        const auto info{it->GetMemoryInfo()};
-        if (region_last < info.GetAddress()) {
-            break;
-        }
+            VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
+            area += guard_pages * PageSize;
 
-        if (info.state != KMemoryState::Free) {
-            continue;
-        }
+            const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
+            area = (area <= offset_area) ? offset_area : offset_area + alignment;
 
-        VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
-        area += guard_pages * PageSize;
+            const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
+            const VAddr area_last = area_end - 1;
 
-        const VAddr offset_area{Common::AlignDown(area, align) + offset};
-        area = (area <= offset_area) ? offset_area : offset_area + align;
-
-        const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
-        const VAddr area_last{area_end - 1};
-
-        if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
-            area_last <= info.GetLastAddress()) {
-            return area;
+            if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
+                area_last <= info.GetLastAddress()) {
+                return area;
+            }
         }
     }
 
     return {};
 }
 
-void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
-                                 KMemoryPermission prev_perm, KMemoryAttribute prev_attribute,
-                                 KMemoryState state, KMemoryPermission perm,
-                                 KMemoryAttribute attribute) {
-    const VAddr update_end_addr{addr + num_pages * PageSize};
-    iterator node{memory_block_tree.begin()};
+void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
+                                            VAddr address, size_t num_pages) {
+    // Find the iterator now that we've updated.
+    iterator it = this->FindIterator(address);
+    if (address != m_start_address) {
+        it--;
+    }
 
-    prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped;
-
-    while (node != memory_block_tree.end()) {
-        KMemoryBlock* block{&(*node)};
-        iterator next_node{std::next(node)};
-        const VAddr cur_addr{block->GetAddress()};
-        const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
-
-        if (addr < cur_end_addr && cur_addr < update_end_addr) {
-            if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
-                node = next_node;
-                continue;
-            }
-
-            iterator new_node{node};
-            if (addr > cur_addr) {
-                memory_block_tree.insert(node, block->Split(addr));
-            }
-
-            if (update_end_addr < cur_end_addr) {
-                new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
-            }
-
-            new_node->Update(state, perm, attribute);
-
-            MergeAdjacent(new_node, next_node);
-        }
-
-        if (cur_end_addr - 1 >= update_end_addr - 1) {
+    // Coalesce blocks that we can.
+    while (true) {
+        iterator prev = it++;
+        if (it == m_memory_block_tree.end()) {
             break;
         }
 
-        node = next_node;
-    }
-}
-
-void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state,
-                                 KMemoryPermission perm, KMemoryAttribute attribute) {
-    const VAddr update_end_addr{addr + num_pages * PageSize};
-    iterator node{memory_block_tree.begin()};
-
-    while (node != memory_block_tree.end()) {
-        KMemoryBlock* block{&(*node)};
-        iterator next_node{std::next(node)};
-        const VAddr cur_addr{block->GetAddress()};
-        const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
-
-        if (addr < cur_end_addr && cur_addr < update_end_addr) {
-            iterator new_node{node};
-
-            if (addr > cur_addr) {
-                memory_block_tree.insert(node, block->Split(addr));
-            }
-
-            if (update_end_addr < cur_end_addr) {
-                new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
-            }
-
-            new_node->Update(state, perm, attribute);
-
-            MergeAdjacent(new_node, next_node);
+        if (prev->CanMergeWith(*it)) {
+            KMemoryBlock* block = std::addressof(*it);
+            m_memory_block_tree.erase(it);
+            prev->Add(*block);
+            allocator->Free(block);
+            it = prev;
         }
 
-        if (cur_end_addr - 1 >= update_end_addr - 1) {
+        if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
             break;
         }
-
-        node = next_node;
     }
 }
 
-void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
+void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
+                                 size_t num_pages, KMemoryState state, KMemoryPermission perm,
+                                 KMemoryAttribute attr,
+                                 KMemoryBlockDisableMergeAttribute set_disable_attr,
+                                 KMemoryBlockDisableMergeAttribute clear_disable_attr) {
+    // Ensure for auditing that we never end up with an invalid tree.
+    KScopedMemoryBlockManagerAuditor auditor(this);
+    ASSERT(Common::IsAligned(address, PageSize));
+    ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
+           KMemoryAttribute::None);
+
+    VAddr cur_address = address;
+    size_t remaining_pages = num_pages;
+    iterator it = this->FindIterator(address);
+
+    while (remaining_pages > 0) {
+        const size_t remaining_size = remaining_pages * PageSize;
+        KMemoryInfo cur_info = it->GetMemoryInfo();
+        if (it->HasProperties(state, perm, attr)) {
+            // If we already have the right properties, just advance.
+            if (cur_address + remaining_size < cur_info.GetEndAddress()) {
+                remaining_pages = 0;
+                cur_address += remaining_size;
+            } else {
+                remaining_pages =
+                    (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
+                cur_address = cur_info.GetEndAddress();
+            }
+        } else {
+            // If we need to, create a new block before and insert it.
+            if (cur_info.GetAddress() != cur_address) {
+                KMemoryBlock* new_block = allocator->Allocate();
+
+                it->Split(new_block, cur_address);
+                it = m_memory_block_tree.insert(*new_block);
+                it++;
+
+                cur_info = it->GetMemoryInfo();
+                cur_address = cur_info.GetAddress();
+            }
+
+            // If we need to, create a new block after and insert it.
+            if (cur_info.GetSize() > remaining_size) {
+                KMemoryBlock* new_block = allocator->Allocate();
+
+                it->Split(new_block, cur_address + remaining_size);
+                it = m_memory_block_tree.insert(*new_block);
+
+                cur_info = it->GetMemoryInfo();
+            }
+
+            // Update block state.
+            it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
+                       static_cast<u8>(clear_disable_attr));
+            cur_address += cur_info.GetSize();
+            remaining_pages -= cur_info.GetNumPages();
+        }
+        it++;
+    }
+
+    this->CoalesceForUpdate(allocator, address, num_pages);
+}
+
+void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
+                                        VAddr address, size_t num_pages, KMemoryState test_state,
+                                        KMemoryPermission test_perm, KMemoryAttribute test_attr,
+                                        KMemoryState state, KMemoryPermission perm,
+                                        KMemoryAttribute attr) {
+    // Ensure for auditing that we never end up with an invalid tree.
+    KScopedMemoryBlockManagerAuditor auditor(this);
+    ASSERT(Common::IsAligned(address, PageSize));
+    ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
+           KMemoryAttribute::None);
+
+    VAddr cur_address = address;
+    size_t remaining_pages = num_pages;
+    iterator it = this->FindIterator(address);
+
+    while (remaining_pages > 0) {
+        const size_t remaining_size = remaining_pages * PageSize;
+        KMemoryInfo cur_info = it->GetMemoryInfo();
+        if (it->HasProperties(test_state, test_perm, test_attr) &&
+            !it->HasProperties(state, perm, attr)) {
+            // If we need to, create a new block before and insert it.
+            if (cur_info.GetAddress() != cur_address) {
+                KMemoryBlock* new_block = allocator->Allocate();
+
+                it->Split(new_block, cur_address);
+                it = m_memory_block_tree.insert(*new_block);
+                it++;
+
+                cur_info = it->GetMemoryInfo();
+                cur_address = cur_info.GetAddress();
+            }
+
+            // If we need to, create a new block after and insert it.
+            if (cur_info.GetSize() > remaining_size) {
+                KMemoryBlock* new_block = allocator->Allocate();
+
+                it->Split(new_block, cur_address + remaining_size);
+                it = m_memory_block_tree.insert(*new_block);
+
+                cur_info = it->GetMemoryInfo();
+            }
+
+            // Update block state.
+            it->Update(state, perm, attr, false, 0, 0);
+            cur_address += cur_info.GetSize();
+            remaining_pages -= cur_info.GetNumPages();
+        } else {
+            // If we already have the right properties, just advance.
+            if (cur_address + remaining_size < cur_info.GetEndAddress()) {
+                remaining_pages = 0;
+                cur_address += remaining_size;
+            } else {
+                remaining_pages =
+                    (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
+                cur_address = cur_info.GetEndAddress();
+            }
+        }
+        it++;
+    }
+
+    this->CoalesceForUpdate(allocator, address, num_pages);
+}
+
+void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
+                                     size_t num_pages, MemoryBlockLockFunction lock_func,
                                      KMemoryPermission perm) {
-    const VAddr update_end_addr{addr + num_pages * PageSize};
-    iterator node{memory_block_tree.begin()};
+    // Ensure for auditing that we never end up with an invalid tree.
+    KScopedMemoryBlockManagerAuditor auditor(this);
+    ASSERT(Common::IsAligned(address, PageSize));
 
-    while (node != memory_block_tree.end()) {
-        KMemoryBlock* block{&(*node)};
-        iterator next_node{std::next(node)};
-        const VAddr cur_addr{block->GetAddress()};
-        const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
+    VAddr cur_address = address;
+    size_t remaining_pages = num_pages;
+    iterator it = this->FindIterator(address);
 
-        if (addr < cur_end_addr && cur_addr < update_end_addr) {
-            iterator new_node{node};
+    const VAddr end_address = address + (num_pages * PageSize);
 
-            if (addr > cur_addr) {
-                memory_block_tree.insert(node, block->Split(addr));
-            }
+    while (remaining_pages > 0) {
+        const size_t remaining_size = remaining_pages * PageSize;
+        KMemoryInfo cur_info = it->GetMemoryInfo();
 
-            if (update_end_addr < cur_end_addr) {
-                new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
-            }
+        // If we need to, create a new block before and insert it.
+        if (cur_info.m_address != cur_address) {
+            KMemoryBlock* new_block = allocator->Allocate();
 
-            lock_func(new_node, perm);
+            it->Split(new_block, cur_address);
+            it = m_memory_block_tree.insert(*new_block);
+            it++;
 
-            MergeAdjacent(new_node, next_node);
+            cur_info = it->GetMemoryInfo();
+            cur_address = cur_info.GetAddress();
         }
 
-        if (cur_end_addr - 1 >= update_end_addr - 1) {
-            break;
+        if (cur_info.GetSize() > remaining_size) {
+            // If we need to, create a new block after and insert it.
+            KMemoryBlock* new_block = allocator->Allocate();
+
+            it->Split(new_block, cur_address + remaining_size);
+            it = m_memory_block_tree.insert(*new_block);
+
+            cur_info = it->GetMemoryInfo();
         }
 
-        node = next_node;
+        // Call the locked update function.
+        (std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address,
+                                          cur_info.GetEndAddress() == end_address);
+        cur_address += cur_info.GetSize();
+        remaining_pages -= cur_info.GetNumPages();
+        it++;
     }
+
+    this->CoalesceForUpdate(allocator, address, num_pages);
 }
 
-void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
-    const_iterator it{FindIterator(start)};
-    KMemoryInfo info{};
-    do {
-        info = it->GetMemoryInfo();
-        func(info);
-        it = std::next(it);
-    } while (info.addr + info.size - 1 < end - 1 && it != cend());
-}
+// Debug.
+bool KMemoryBlockManager::CheckState() const {
+    // Loop over every block, ensuring that we are sorted and coalesced.
+    auto it = m_memory_block_tree.cbegin();
+    auto prev = it++;
+    while (it != m_memory_block_tree.cend()) {
+        const KMemoryInfo prev_info = prev->GetMemoryInfo();
+        const KMemoryInfo cur_info = it->GetMemoryInfo();
 
-void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
-    KMemoryBlock* block{&(*it)};
-
-    auto EraseIt = [&](const iterator it_to_erase) {
-        if (next_it == it_to_erase) {
-            next_it = std::next(next_it);
+        // Sequential blocks which can be merged should be merged.
+        if (prev->CanMergeWith(*it)) {
+            return false;
         }
-        memory_block_tree.erase(it_to_erase);
-    };
 
-    if (it != memory_block_tree.begin()) {
-        KMemoryBlock* prev{&(*std::prev(it))};
+        // Sequential blocks should be sequential.
+        if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
+            return false;
+        }
 
-        if (block->HasSameProperties(*prev)) {
-            const iterator prev_it{std::prev(it)};
+        // If the block is ipc locked, it must have a count.
+        if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
+            cur_info.m_ipc_lock_count == 0) {
+            return false;
+        }
 
-            prev->Add(block->GetNumPages());
-            EraseIt(it);
+        // If the block is device shared, it must have a count.
+        if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
+            cur_info.m_device_use_count == 0) {
+            return false;
+        }
 
-            it = prev_it;
-            block = prev;
+        // Advance the iterator.
+        prev = it++;
+    }
+
+    // Our loop will miss checking the last block, potentially, so check it.
+    if (prev != m_memory_block_tree.cend()) {
+        const KMemoryInfo prev_info = prev->GetMemoryInfo();
+        // If the block is ipc locked, it must have a count.
+        if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
+            prev_info.m_ipc_lock_count == 0) {
+            return false;
+        }
+
+        // If the block is device shared, it must have a count.
+        if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
+            prev_info.m_device_use_count == 0) {
+            return false;
         }
     }
 
-    if (it != cend()) {
-        const KMemoryBlock* const next{&(*std::next(it))};
-
-        if (block->HasSameProperties(*next)) {
-            block->Add(next->GetNumPages());
-            EraseIt(std::next(it));
-        }
-    }
+    return true;
 }
 
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h
index e14741b898..9b5873883d 100644
--- a/src/core/hle/kernel/k_memory_block_manager.h
+++ b/src/core/hle/kernel/k_memory_block_manager.h
@@ -4,63 +4,154 @@
 #pragma once
 
 #include <functional>
-#include <list>
 
+#include "common/common_funcs.h"
 #include "common/common_types.h"
+#include "core/hle/kernel/k_dynamic_resource_manager.h"
 #include "core/hle/kernel/k_memory_block.h"
 
 namespace Kernel {
 
+class KMemoryBlockManagerUpdateAllocator {
+public:
+    static constexpr size_t MaxBlocks = 2;
+
+private:
+    KMemoryBlock* m_blocks[MaxBlocks];
+    size_t m_index;
+    KMemoryBlockSlabManager* m_slab_manager;
+
+private:
+    Result Initialize(size_t num_blocks) {
+        // Check num blocks.
+        ASSERT(num_blocks <= MaxBlocks);
+
+        // Set index.
+        m_index = MaxBlocks - num_blocks;
+
+        // Allocate the blocks.
+        for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) {
+            m_blocks[m_index + i] = m_slab_manager->Allocate();
+            R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource);
+        }
+
+        R_SUCCEED();
+    }
+
+public:
+    KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm,
+                                       size_t num_blocks = MaxBlocks)
+        : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) {
+        *out_result = this->Initialize(num_blocks);
+    }
+
+    ~KMemoryBlockManagerUpdateAllocator() {
+        for (const auto& block : m_blocks) {
+            if (block != nullptr) {
+                m_slab_manager->Free(block);
+            }
+        }
+    }
+
+    KMemoryBlock* Allocate() {
+        ASSERT(m_index < MaxBlocks);
+        ASSERT(m_blocks[m_index] != nullptr);
+        KMemoryBlock* block = nullptr;
+        std::swap(block, m_blocks[m_index++]);
+        return block;
+    }
+
+    void Free(KMemoryBlock* block) {
+        ASSERT(m_index <= MaxBlocks);
+        ASSERT(block != nullptr);
+        if (m_index == 0) {
+            m_slab_manager->Free(block);
+        } else {
+            m_blocks[--m_index] = block;
+        }
+    }
+};
+
 class KMemoryBlockManager final {
 public:
-    using MemoryBlockTree = std::list<KMemoryBlock>;
+    using MemoryBlockTree =
+        Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
+    using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left,
+                                                           bool right);
     using iterator = MemoryBlockTree::iterator;
     using const_iterator = MemoryBlockTree::const_iterator;
 
 public:
-    KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_);
+    KMemoryBlockManager();
+
+    using HostUnmapCallback = std::function<void(VAddr, u64)>;
+
+    Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
+    void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
 
     iterator end() {
-        return memory_block_tree.end();
+        return m_memory_block_tree.end();
     }
     const_iterator end() const {
-        return memory_block_tree.end();
+        return m_memory_block_tree.end();
     }
     const_iterator cend() const {
-        return memory_block_tree.cend();
+        return m_memory_block_tree.cend();
     }
 
-    iterator FindIterator(VAddr addr);
+    VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
+                       size_t alignment, size_t offset, size_t guard_pages) const;
 
-    VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
-                       std::size_t align, std::size_t offset, std::size_t guard_pages);
+    void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
+                KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
+                KMemoryBlockDisableMergeAttribute set_disable_attr,
+                KMemoryBlockDisableMergeAttribute clear_disable_attr);
+    void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
+                    MemoryBlockLockFunction lock_func, KMemoryPermission perm);
 
-    void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
-                KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state,
-                KMemoryPermission perm, KMemoryAttribute attribute);
+    void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
+                       size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
+                       KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
+                       KMemoryAttribute attr);
 
-    void Update(VAddr addr, std::size_t num_pages, KMemoryState state,
-                KMemoryPermission perm = KMemoryPermission::None,
-                KMemoryAttribute attribute = KMemoryAttribute::None);
+    iterator FindIterator(VAddr address) const {
+        return m_memory_block_tree.find(KMemoryBlock(
+            address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
+    }
 
-    using LockFunc = std::function<void(iterator, KMemoryPermission)>;
-    void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
-                    KMemoryPermission perm);
+    const KMemoryBlock* FindBlock(VAddr address) const {
+        if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
+            return std::addressof(*it);
+        }
 
-    using IterateFunc = std::function<void(const KMemoryInfo&)>;
-    void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
+        return nullptr;
+    }
 
-    KMemoryBlock& FindBlock(VAddr addr) {
-        return *FindIterator(addr);
+    // Debug.
+    bool CheckState() const;
+
+private:
+    void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
+                           size_t num_pages);
+
+    MemoryBlockTree m_memory_block_tree;
+    VAddr m_start_address{};
+    VAddr m_end_address{};
+};
+
+class KScopedMemoryBlockManagerAuditor {
+public:
+    explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {
+        ASSERT(m_manager->CheckState());
+    }
+    explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m)
+        : KScopedMemoryBlockManagerAuditor(std::addressof(m)) {}
+    ~KScopedMemoryBlockManagerAuditor() {
+        ASSERT(m_manager->CheckState());
     }
 
 private:
-    void MergeAdjacent(iterator it, iterator& next_it);
-
-    [[maybe_unused]] const VAddr start_addr;
-    [[maybe_unused]] const VAddr end_addr;
-
-    MemoryBlockTree memory_block_tree;
+    KMemoryBlockManager* m_manager;
 };
 
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 5b0a9963a8..6467115056 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
 
     // Set all the allocated memory.
     for (const auto& block : out->Nodes()) {
-        std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern,
+        std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
                     block.GetSize());
     }
 
diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp
index 1a0bf44393..0c16dded4b 100644
--- a/src/core/hle/kernel/k_page_buffer.cpp
+++ b/src/core/hle/kernel/k_page_buffer.cpp
@@ -12,7 +12,7 @@ namespace Kernel {
 
 KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
     ASSERT(Common::IsAligned(phys_addr, PageSize));
-    return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr));
+    return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
 }
 
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index d975de8449..307e491cb5 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -25,7 +25,7 @@ namespace {
 
 using namespace Common::Literals;
 
-constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
+constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
     switch (as_type) {
     case FileSys::ProgramAddressSpaceType::Is32Bit:
     case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
@@ -43,27 +43,29 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
 } // namespace
 
 KPageTable::KPageTable(Core::System& system_)
-    : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {}
+    : m_general_lock{system_.Kernel()},
+      m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {}
 
 KPageTable::~KPageTable() = default;
 
 Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
-                                        VAddr code_addr, std::size_t code_size,
+                                        VAddr code_addr, size_t code_size,
+                                        KMemoryBlockSlabManager* mem_block_slab_manager,
                                         KMemoryManager::Pool pool) {
 
     const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
-        return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type);
+        return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
     };
     const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) {
-        return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type);
+        return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
     };
 
     //  Set our width and heap/alias sizes
-    address_space_width = GetAddressSpaceWidthFromType(as_type);
+    m_address_space_width = GetAddressSpaceWidthFromType(as_type);
     const VAddr start = 0;
-    const VAddr end{1ULL << address_space_width};
-    std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
-    std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
+    const VAddr end{1ULL << m_address_space_width};
+    size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
+    size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
 
     ASSERT(code_addr < code_addr + code_size);
     ASSERT(code_addr + code_size - 1 <= end - 1);
@@ -75,66 +77,65 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
     }
 
     // Set code regions and determine remaining
-    constexpr std::size_t RegionAlignment{2_MiB};
+    constexpr size_t RegionAlignment{2_MiB};
     VAddr process_code_start{};
     VAddr process_code_end{};
-    std::size_t stack_region_size{};
-    std::size_t kernel_map_region_size{};
+    size_t stack_region_size{};
+    size_t kernel_map_region_size{};
 
-    if (address_space_width == 39) {
+    if (m_address_space_width == 39) {
         alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
         heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
         stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
         kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
-        code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
-        code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
-        alias_code_region_start = code_region_start;
-        alias_code_region_end = code_region_end;
+        m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
+        m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
+        m_alias_code_region_start = m_code_region_start;
+        m_alias_code_region_end = m_code_region_end;
         process_code_start = Common::AlignDown(code_addr, RegionAlignment);
         process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment);
     } else {
         stack_region_size = 0;
         kernel_map_region_size = 0;
-        code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
-        code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
-        stack_region_start = code_region_start;
-        alias_code_region_start = code_region_start;
-        alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
-                                GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
-        stack_region_end = code_region_end;
-        kernel_map_region_start = code_region_start;
-        kernel_map_region_end = code_region_end;
-        process_code_start = code_region_start;
-        process_code_end = code_region_end;
+        m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
+        m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
+        m_stack_region_start = m_code_region_start;
+        m_alias_code_region_start = m_code_region_start;
+        m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
+                                  GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
+        m_stack_region_end = m_code_region_end;
+        m_kernel_map_region_start = m_code_region_start;
+        m_kernel_map_region_end = m_code_region_end;
+        process_code_start = m_code_region_start;
+        process_code_end = m_code_region_end;
     }
 
     // Set other basic fields
-    is_aslr_enabled = enable_aslr;
-    address_space_start = start;
-    address_space_end = end;
-    is_kernel = false;
+    m_enable_aslr = enable_aslr;
+    m_enable_device_address_space_merge = false;
+    m_address_space_start = start;
+    m_address_space_end = end;
+    m_is_kernel = false;
+    m_memory_block_slab_manager = mem_block_slab_manager;
 
     // Determine the region we can place our undetermineds in
     VAddr alloc_start{};
-    std::size_t alloc_size{};
-    if ((process_code_start - code_region_start) >= (end - process_code_end)) {
-        alloc_start = code_region_start;
-        alloc_size = process_code_start - code_region_start;
+    size_t alloc_size{};
+    if ((process_code_start - m_code_region_start) >= (end - process_code_end)) {
+        alloc_start = m_code_region_start;
+        alloc_size = process_code_start - m_code_region_start;
     } else {
         alloc_start = process_code_end;
         alloc_size = end - process_code_end;
     }
-    const std::size_t needed_size{
-        (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
-    if (alloc_size < needed_size) {
-        ASSERT(false);
-        return ResultOutOfMemory;
-    }
+    const size_t needed_size =
+        (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
+    R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
 
-    const std::size_t remaining_size{alloc_size - needed_size};
+    const size_t remaining_size{alloc_size - needed_size};
 
     // Determine random placements for each region
-    std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
+    size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
     if (enable_aslr) {
         alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
                     RegionAlignment;
@@ -147,117 +148,130 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
     }
 
     // Setup heap and alias regions
-    alias_region_start = alloc_start + alias_rnd;
-    alias_region_end = alias_region_start + alias_region_size;
-    heap_region_start = alloc_start + heap_rnd;
-    heap_region_end = heap_region_start + heap_region_size;
+    m_alias_region_start = alloc_start + alias_rnd;
+    m_alias_region_end = m_alias_region_start + alias_region_size;
+    m_heap_region_start = alloc_start + heap_rnd;
+    m_heap_region_end = m_heap_region_start + heap_region_size;
 
     if (alias_rnd <= heap_rnd) {
-        heap_region_start += alias_region_size;
-        heap_region_end += alias_region_size;
+        m_heap_region_start += alias_region_size;
+        m_heap_region_end += alias_region_size;
     } else {
-        alias_region_start += heap_region_size;
-        alias_region_end += heap_region_size;
+        m_alias_region_start += heap_region_size;
+        m_alias_region_end += heap_region_size;
     }
 
     // Setup stack region
     if (stack_region_size) {
-        stack_region_start = alloc_start + stack_rnd;
-        stack_region_end = stack_region_start + stack_region_size;
+        m_stack_region_start = alloc_start + stack_rnd;
+        m_stack_region_end = m_stack_region_start + stack_region_size;
 
         if (alias_rnd < stack_rnd) {
-            stack_region_start += alias_region_size;
-            stack_region_end += alias_region_size;
+            m_stack_region_start += alias_region_size;
+            m_stack_region_end += alias_region_size;
         } else {
-            alias_region_start += stack_region_size;
-            alias_region_end += stack_region_size;
+            m_alias_region_start += stack_region_size;
+            m_alias_region_end += stack_region_size;
         }
 
         if (heap_rnd < stack_rnd) {
-            stack_region_start += heap_region_size;
-            stack_region_end += heap_region_size;
+            m_stack_region_start += heap_region_size;
+            m_stack_region_end += heap_region_size;
         } else {
-            heap_region_start += stack_region_size;
-            heap_region_end += stack_region_size;
+            m_heap_region_start += stack_region_size;
+            m_heap_region_end += stack_region_size;
         }
     }
 
     // Setup kernel map region
     if (kernel_map_region_size) {
-        kernel_map_region_start = alloc_start + kmap_rnd;
-        kernel_map_region_end = kernel_map_region_start + kernel_map_region_size;
+        m_kernel_map_region_start = alloc_start + kmap_rnd;
+        m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
 
         if (alias_rnd < kmap_rnd) {
-            kernel_map_region_start += alias_region_size;
-            kernel_map_region_end += alias_region_size;
+            m_kernel_map_region_start += alias_region_size;
+            m_kernel_map_region_end += alias_region_size;
         } else {
-            alias_region_start += kernel_map_region_size;
-            alias_region_end += kernel_map_region_size;
+            m_alias_region_start += kernel_map_region_size;
+            m_alias_region_end += kernel_map_region_size;
         }
 
         if (heap_rnd < kmap_rnd) {
-            kernel_map_region_start += heap_region_size;
-            kernel_map_region_end += heap_region_size;
+            m_kernel_map_region_start += heap_region_size;
+            m_kernel_map_region_end += heap_region_size;
         } else {
-            heap_region_start += kernel_map_region_size;
-            heap_region_end += kernel_map_region_size;
+            m_heap_region_start += kernel_map_region_size;
+            m_heap_region_end += kernel_map_region_size;
         }
 
         if (stack_region_size) {
             if (stack_rnd < kmap_rnd) {
-                kernel_map_region_start += stack_region_size;
-                kernel_map_region_end += stack_region_size;
+                m_kernel_map_region_start += stack_region_size;
+                m_kernel_map_region_end += stack_region_size;
             } else {
-                stack_region_start += kernel_map_region_size;
-                stack_region_end += kernel_map_region_size;
+                m_stack_region_start += kernel_map_region_size;
+                m_stack_region_end += kernel_map_region_size;
             }
         }
     }
 
     // Set heap members
-    current_heap_end = heap_region_start;
-    max_heap_size = 0;
-    max_physical_memory_size = 0;
+    m_current_heap_end = m_heap_region_start;
+    m_max_heap_size = 0;
+    m_max_physical_memory_size = 0;
 
     // Ensure that we regions inside our address space
     auto IsInAddressSpace = [&](VAddr addr) {
-        return address_space_start <= addr && addr <= address_space_end;
+        return m_address_space_start <= addr && addr <= m_address_space_end;
     };
-    ASSERT(IsInAddressSpace(alias_region_start));
-    ASSERT(IsInAddressSpace(alias_region_end));
-    ASSERT(IsInAddressSpace(heap_region_start));
-    ASSERT(IsInAddressSpace(heap_region_end));
-    ASSERT(IsInAddressSpace(stack_region_start));
-    ASSERT(IsInAddressSpace(stack_region_end));
-    ASSERT(IsInAddressSpace(kernel_map_region_start));
-    ASSERT(IsInAddressSpace(kernel_map_region_end));
+    ASSERT(IsInAddressSpace(m_alias_region_start));
+    ASSERT(IsInAddressSpace(m_alias_region_end));
+    ASSERT(IsInAddressSpace(m_heap_region_start));
+    ASSERT(IsInAddressSpace(m_heap_region_end));
+    ASSERT(IsInAddressSpace(m_stack_region_start));
+    ASSERT(IsInAddressSpace(m_stack_region_end));
+    ASSERT(IsInAddressSpace(m_kernel_map_region_start));
+    ASSERT(IsInAddressSpace(m_kernel_map_region_end));
 
     // Ensure that we selected regions that don't overlap
-    const VAddr alias_start{alias_region_start};
-    const VAddr alias_last{alias_region_end - 1};
-    const VAddr heap_start{heap_region_start};
-    const VAddr heap_last{heap_region_end - 1};
-    const VAddr stack_start{stack_region_start};
-    const VAddr stack_last{stack_region_end - 1};
-    const VAddr kmap_start{kernel_map_region_start};
-    const VAddr kmap_last{kernel_map_region_end - 1};
+    const VAddr alias_start{m_alias_region_start};
+    const VAddr alias_last{m_alias_region_end - 1};
+    const VAddr heap_start{m_heap_region_start};
+    const VAddr heap_last{m_heap_region_end - 1};
+    const VAddr stack_start{m_stack_region_start};
+    const VAddr stack_last{m_stack_region_end - 1};
+    const VAddr kmap_start{m_kernel_map_region_start};
+    const VAddr kmap_last{m_kernel_map_region_end - 1};
     ASSERT(alias_last < heap_start || heap_last < alias_start);
     ASSERT(alias_last < stack_start || stack_last < alias_start);
     ASSERT(alias_last < kmap_start || kmap_last < alias_start);
     ASSERT(heap_last < stack_start || stack_last < heap_start);
     ASSERT(heap_last < kmap_start || kmap_last < heap_start);
 
-    current_heap_end = heap_region_start;
-    max_heap_size = 0;
-    mapped_physical_memory_size = 0;
-    memory_pool = pool;
+    m_current_heap_end = m_heap_region_start;
+    m_max_heap_size = 0;
+    m_mapped_physical_memory_size = 0;
+    m_memory_pool = pool;
 
-    page_table_impl.Resize(address_space_width, PageBits);
+    m_page_table_impl = std::make_unique<Common::PageTable>();
+    m_page_table_impl->Resize(m_address_space_width, PageBits);
 
-    return InitializeMemoryLayout(start, end);
+    // Initialize our memory block manager.
+    R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
+                                               m_memory_block_slab_manager));
 }
 
-Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
+void KPageTable::Finalize() {
+    // Finalize memory blocks.
+    m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) {
+        m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size);
+    });
+
+    // Close the backing page table, as the destructor is not called for guest objects.
+    m_page_table_impl.reset();
+}
+
+Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state,
                                   KMemoryPermission perm) {
     const u64 size{num_pages * PageSize};
 
@@ -265,52 +279,76 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat
     R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Verify that the destination memory is unmapped.
     R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
                                  KMemoryPermission::None, KMemoryPermission::None,
                                  KMemoryAttribute::None, KMemoryAttribute::None));
+
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager);
+
+    // Allocate and open.
     KPageGroup pg;
-    R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
+    R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
         &pg, num_pages,
-        KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option)));
+        KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
 
     R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
 
-    block_manager->Update(addr, num_pages, state, perm);
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
+Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) {
     // Validate the mapping request.
     R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
              ResultInvalidMemoryRegion);
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Verify that the source memory is normal heap.
     KMemoryState src_state{};
     KMemoryPermission src_perm{};
-    std::size_t num_src_allocator_blocks{};
+    size_t num_src_allocator_blocks{};
     R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
                                  src_address, size, KMemoryState::All, KMemoryState::Normal,
                                  KMemoryPermission::All, KMemoryPermission::UserReadWrite,
                                  KMemoryAttribute::All, KMemoryAttribute::None));
 
     // Verify that the destination memory is unmapped.
-    std::size_t num_dst_allocator_blocks{};
+    size_t num_dst_allocator_blocks{};
     R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
                                  KMemoryState::Free, KMemoryPermission::None,
                                  KMemoryPermission::None, KMemoryAttribute::None,
                                  KMemoryAttribute::None));
 
+    // Create an update allocator for the source.
+    Result src_allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+                                                     m_memory_block_slab_manager,
+                                                     num_src_allocator_blocks);
+    R_TRY(src_allocator_result);
+
+    // Create an update allocator for the destination.
+    Result dst_allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+                                                     m_memory_block_slab_manager,
+                                                     num_dst_allocator_blocks);
+    R_TRY(dst_allocator_result);
+
     // Map the code memory.
     {
         // Determine the number of pages being operated on.
-        const std::size_t num_pages = size / PageSize;
+        const size_t num_pages = size / PageSize;
 
         // Create page groups for the memory being mapped.
         KPageGroup pg;
@@ -335,33 +373,37 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size
         unprot_guard.Cancel();
 
         // Apply the memory block updates.
-        block_manager->Update(src_address, num_pages, src_state, new_perm,
-                              KMemoryAttribute::Locked);
-        block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm,
-                              KMemoryAttribute::None);
+        m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
+                                      src_state, new_perm, KMemoryAttribute::Locked,
+                                      KMemoryBlockDisableMergeAttribute::Locked,
+                                      KMemoryBlockDisableMergeAttribute::None);
+        m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
+                                      KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
+                                      KMemoryBlockDisableMergeAttribute::Normal,
+                                      KMemoryBlockDisableMergeAttribute::None);
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
                                    ICacheInvalidationStrategy icache_invalidation_strategy) {
     // Validate the mapping request.
     R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
              ResultInvalidMemoryRegion);
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Verify that the source memory is locked normal heap.
-    std::size_t num_src_allocator_blocks{};
+    size_t num_src_allocator_blocks{};
     R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
                                  KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
                                  KMemoryPermission::None, KMemoryAttribute::All,
                                  KMemoryAttribute::Locked));
 
     // Verify that the destination memory is aliasable code.
-    std::size_t num_dst_allocator_blocks{};
+    size_t num_dst_allocator_blocks{};
     R_TRY(this->CheckMemoryStateContiguous(
         std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
         KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
@@ -370,7 +412,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
     // Determine whether any pages being unmapped are code.
     bool any_code_pages = false;
     {
-        KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address);
+        KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
         while (true) {
             // Get the memory info.
             const KMemoryInfo info = it->GetMemoryInfo();
@@ -396,9 +438,9 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
     SCOPE_EXIT({
         if (reprotected_pages && any_code_pages) {
             if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
-                system.InvalidateCpuInstructionCacheRange(dst_address, size);
+                m_system.InvalidateCpuInstructionCacheRange(dst_address, size);
             } else {
-                system.InvalidateCpuInstructionCaches();
+                m_system.InvalidateCpuInstructionCaches();
             }
         }
     });
@@ -406,7 +448,21 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
     // Unmap.
     {
         // Determine the number of pages being operated on.
-        const std::size_t num_pages = size / PageSize;
+        const size_t num_pages = size / PageSize;
+
+        // Create an update allocator for the source.
+        Result src_allocator_result{ResultSuccess};
+        KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+                                                         m_memory_block_slab_manager,
+                                                         num_src_allocator_blocks);
+        R_TRY(src_allocator_result);
+
+        // Create an update allocator for the destination.
+        Result dst_allocator_result{ResultSuccess};
+        KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+                                                         m_memory_block_slab_manager,
+                                                         num_dst_allocator_blocks);
+        R_TRY(dst_allocator_result);
 
         // Unmap the aliased copy of the pages.
         R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
@@ -416,73 +472,34 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
                       OperationType::ChangePermissions));
 
         // Apply the memory block updates.
-        block_manager->Update(dst_address, num_pages, KMemoryState::None);
-        block_manager->Update(src_address, num_pages, KMemoryState::Normal,
-                              KMemoryPermission::UserReadWrite);
+        m_memory_block_manager.Update(
+            std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
+            KMemoryPermission::None, KMemoryAttribute::None,
+            KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
+        m_memory_block_manager.Update(
+            std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
+            KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+            KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
 
         // Note that we reprotected pages.
         reprotected_pages = true;
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
-                               std::size_t num_pages, std::size_t alignment, std::size_t offset,
-                               std::size_t guard_pages) {
+VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
+                               size_t alignment, size_t offset, size_t guard_pages) {
     VAddr address = 0;
 
     if (num_pages <= region_num_pages) {
         if (this->IsAslrEnabled()) {
-            // Try to directly find a free area up to 8 times.
-            for (std::size_t i = 0; i < 8; i++) {
-                const std::size_t random_offset =
-                    KSystemControl::GenerateRandomRange(
-                        0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
-                    alignment;
-                const VAddr candidate =
-                    Common::AlignDown((region_start + random_offset), alignment) + offset;
-
-                KMemoryInfo info = this->QueryInfoImpl(candidate);
-
-                if (info.state != KMemoryState::Free) {
-                    continue;
-                }
-                if (region_start > candidate) {
-                    continue;
-                }
-                if (info.GetAddress() + guard_pages * PageSize > candidate) {
-                    continue;
-                }
-
-                const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1;
-                if (candidate_end > info.GetLastAddress()) {
-                    continue;
-                }
-                if (candidate_end > region_start + region_num_pages * PageSize - 1) {
-                    continue;
-                }
-
-                address = candidate;
-                break;
-            }
-            // Fall back to finding the first free area with a random offset.
-            if (address == 0) {
-                // NOTE: Nintendo does not account for guard pages here.
-                // This may theoretically cause an offset to be chosen that cannot be mapped. We
-                // will account for guard pages.
-                const std::size_t offset_pages = KSystemControl::GenerateRandomRange(
-                    0, region_num_pages - num_pages - guard_pages);
-                address = block_manager->FindFreeArea(region_start + offset_pages * PageSize,
-                                                      region_num_pages - offset_pages, num_pages,
-                                                      alignment, offset, guard_pages);
-            }
+            UNIMPLEMENTED();
         }
-
         // Find the first free area.
         if (address == 0) {
-            address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages,
-                                                  alignment, offset, guard_pages);
+            address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
+                                                          alignment, offset, guard_pages);
         }
     }
 
@@ -500,7 +517,8 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
     // Begin traversal.
     Common::PageTable::TraversalContext context;
     Common::PageTable::TraversalEntry next_entry;
-    R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory);
+    R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, addr),
+             ResultInvalidCurrentMemory);
 
     // Prepare tracking variables.
     PAddr cur_addr = next_entry.phys_addr;
@@ -508,9 +526,9 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
     size_t tot_size = cur_size;
 
     // Iterate, adding to group as we go.
-    const auto& memory_layout = system.Kernel().MemoryLayout();
+    const auto& memory_layout = m_system.Kernel().MemoryLayout();
     while (tot_size < size) {
-        R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context),
+        R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context),
                  ResultInvalidCurrentMemory);
 
         if (next_entry.phys_addr != (cur_addr + cur_size)) {
@@ -538,7 +556,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
     R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
     R_TRY(pg.AddBlock(cur_addr, cur_pages));
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
@@ -546,7 +564,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
 
     const size_t size = num_pages * PageSize;
     const auto& pg = pg_ll.Nodes();
-    const auto& memory_layout = system.Kernel().MemoryLayout();
+    const auto& memory_layout = m_system.Kernel().MemoryLayout();
 
     // Empty groups are necessarily invalid.
     if (pg.empty()) {
@@ -573,7 +591,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
     // Begin traversal.
     Common::PageTable::TraversalContext context;
     Common::PageTable::TraversalEntry next_entry;
-    if (!page_table_impl.BeginTraversal(next_entry, context, addr)) {
+    if (!m_page_table_impl->BeginTraversal(next_entry, context, addr)) {
         return false;
     }
 
@@ -584,7 +602,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
 
     // Iterate, comparing expected to actual.
     while (tot_size < size) {
-        if (!page_table_impl.ContinueTraversal(next_entry, context)) {
+        if (!m_page_table_impl->ContinueTraversal(next_entry, context)) {
             return false;
         }
 
@@ -630,11 +648,11 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
     return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
 }
 
-Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
+Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
                                       VAddr src_addr) {
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
-    const std::size_t num_pages{size / PageSize};
+    const size_t num_pages{size / PageSize};
 
     // Check that the memory is mapped in the destination process.
     size_t num_allocator_blocks;
@@ -649,43 +667,51 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTab
                                           KMemoryPermission::None, KMemoryAttribute::All,
                                           KMemoryAttribute::None));
 
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
     CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
 
     // Apply the memory block update.
-    block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
-                          KMemoryAttribute::None);
+    m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages,
+                                  KMemoryState::Free, KMemoryPermission::None,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Normal);
 
-    system.InvalidateCpuInstructionCaches();
+    m_system.InvalidateCpuInstructionCaches();
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
+Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
     // Lock the physical memory lock.
-    KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
+    KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
 
     // Calculate the last address for convenience.
     const VAddr last_address = address + size - 1;
 
     // Define iteration variables.
     VAddr cur_address;
-    std::size_t mapped_size;
+    size_t mapped_size;
 
     // The entire mapping process can be retried.
     while (true) {
         // Check if the memory is already mapped.
         {
             // Lock the table.
-            KScopedLightLock lk(general_lock);
+            KScopedLightLock lk(m_general_lock);
 
             // Iterate over the memory.
             cur_address = address;
             mapped_size = 0;
 
-            auto it = block_manager->FindIterator(cur_address);
+            auto it = m_memory_block_manager.FindIterator(cur_address);
             while (true) {
                 // Check that the iterator is valid.
-                ASSERT(it != block_manager->end());
+                ASSERT(it != m_memory_block_manager.end());
 
                 // Get the memory info.
                 const KMemoryInfo info = it->GetMemoryInfo();
@@ -716,20 +742,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
         {
             // Reserve the memory from the process resource limit.
             KScopedResourceReservation memory_reservation(
-                system.Kernel().CurrentProcess()->GetResourceLimit(),
+                m_system.Kernel().CurrentProcess()->GetResourceLimit(),
                 LimitableResource::PhysicalMemory, size - mapped_size);
             R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
 
             // Allocate pages for the new memory.
             KPageGroup pg;
-            R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
+            R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
                 &pg, (size - mapped_size) / PageSize,
-                KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
+                KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
 
             // Map the memory.
             {
                 // Lock the table.
-                KScopedLightLock lk(general_lock);
+                KScopedLightLock lk(m_general_lock);
 
                 size_t num_allocator_blocks = 0;
 
@@ -739,10 +765,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
                     size_t checked_mapped_size = 0;
                     cur_address = address;
 
-                    auto it = block_manager->FindIterator(cur_address);
+                    auto it = m_memory_block_manager.FindIterator(cur_address);
                     while (true) {
                         // Check that the iterator is valid.
-                        ASSERT(it != block_manager->end());
+                        ASSERT(it != m_memory_block_manager.end());
 
                         // Get the memory info.
                         const KMemoryInfo info = it->GetMemoryInfo();
@@ -782,6 +808,14 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
                     }
                 }
 
+                // Create an update allocator.
+                ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+                Result allocator_result{ResultSuccess};
+                KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                             m_memory_block_slab_manager,
+                                                             num_allocator_blocks);
+                R_TRY(allocator_result);
+
                 // Reset the current tracking address, and make sure we clean up on failure.
                 cur_address = address;
                 auto unmap_guard = detail::ScopeExit([&] {
@@ -791,10 +825,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
                         // Iterate, unmapping the pages.
                         cur_address = address;
 
-                        auto it = block_manager->FindIterator(cur_address);
+                        auto it = m_memory_block_manager.FindIterator(cur_address);
                         while (true) {
                             // Check that the iterator is valid.
-                            ASSERT(it != block_manager->end());
+                            ASSERT(it != m_memory_block_manager.end());
 
                             // Get the memory info.
                             const KMemoryInfo info = it->GetMemoryInfo();
@@ -830,10 +864,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
                 PAddr pg_phys_addr = pg_it->GetAddress();
                 size_t pg_pages = pg_it->GetNumPages();
 
-                auto it = block_manager->FindIterator(cur_address);
+                auto it = m_memory_block_manager.FindIterator(cur_address);
                 while (true) {
                     // Check that the iterator is valid.
-                    ASSERT(it != block_manager->end());
+                    ASSERT(it != m_memory_block_manager.end());
 
                     // Get the memory info.
                     const KMemoryInfo info = it->GetMemoryInfo();
@@ -886,37 +920,37 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
                 memory_reservation.Commit();
 
                 // Increase our tracked mapped size.
-                mapped_physical_memory_size += (size - mapped_size);
+                m_mapped_physical_memory_size += (size - mapped_size);
 
                 // Update the relevant memory blocks.
-                block_manager->Update(address, size / PageSize, KMemoryState::Free,
-                                      KMemoryPermission::None, KMemoryAttribute::None,
-                                      KMemoryState::Normal, KMemoryPermission::UserReadWrite,
-                                      KMemoryAttribute::None);
+                m_memory_block_manager.UpdateIfMatch(
+                    std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
+                    KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
+                    KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
 
                 // Cancel our guard.
                 unmap_guard.Cancel();
 
-                return ResultSuccess;
+                R_SUCCEED();
             }
         }
     }
 }
 
-Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
+Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
     // Lock the physical memory lock.
-    KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
+    KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Calculate the last address for convenience.
     const VAddr last_address = address + size - 1;
 
     // Define iteration variables.
     VAddr cur_address = 0;
-    std::size_t mapped_size = 0;
-    std::size_t num_allocator_blocks = 0;
+    size_t mapped_size = 0;
+    size_t num_allocator_blocks = 0;
 
     // Check if the memory is mapped.
     {
@@ -924,10 +958,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
         cur_address = address;
         mapped_size = 0;
 
-        auto it = block_manager->FindIterator(cur_address);
+        auto it = m_memory_block_manager.FindIterator(cur_address);
         while (true) {
             // Check that the iterator is valid.
-            ASSERT(it != block_manager->end());
+            ASSERT(it != m_memory_block_manager.end());
 
             // Get the memory info.
             const KMemoryInfo info = it->GetMemoryInfo();
@@ -1022,6 +1056,13 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
     }
     ASSERT(pg.GetNumPages() == mapped_size / PageSize);
 
+    // Create an update allocator.
+    ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
     // Reset the current tracking address, and make sure we clean up on failure.
     cur_address = address;
     auto remap_guard = detail::ScopeExit([&] {
@@ -1030,7 +1071,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
             cur_address = address;
 
             // Iterate over the memory we unmapped.
-            auto it = block_manager->FindIterator(cur_address);
+            auto it = m_memory_block_manager.FindIterator(cur_address);
             auto pg_it = pg.Nodes().begin();
             PAddr pg_phys_addr = pg_it->GetAddress();
             size_t pg_pages = pg_it->GetNumPages();
@@ -1085,10 +1126,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
     });
 
     // Iterate over the memory, unmapping as we go.
-    auto it = block_manager->FindIterator(cur_address);
+    auto it = m_memory_block_manager.FindIterator(cur_address);
     while (true) {
         // Check that the iterator is valid.
-        ASSERT(it != block_manager->end());
+        ASSERT(it != m_memory_block_manager.end());
 
         // Get the memory info.
         const KMemoryInfo info = it->GetMemoryInfo();
@@ -1115,104 +1156,159 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
     }
 
     // Release the memory resource.
-    mapped_physical_memory_size -= mapped_size;
-    auto process{system.Kernel().CurrentProcess()};
+    m_mapped_physical_memory_size -= mapped_size;
+    auto process{m_system.Kernel().CurrentProcess()};
     process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
 
     // Update memory blocks.
-    block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None,
-                          KMemoryAttribute::None);
+    m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
+                                  KMemoryState::Free, KMemoryPermission::None,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::None);
 
     // TODO(bunnei): This is a workaround until the next set of changes, where we add reference
     // counting for mapped pages. Until then, we must manually close the reference to the page
     // group.
-    system.Kernel().MemoryManager().Close(pg);
+    m_system.Kernel().MemoryManager().Close(pg);
 
     // We succeeded.
     remap_guard.Cancel();
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
-    KScopedLightLock lk(general_lock);
+Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) {
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
 
-    KMemoryState src_state{};
-    CASCADE_CODE(CheckMemoryState(
-        &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias,
-        KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
-        KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
+    // Validate that the source address's state is valid.
+    KMemoryState src_state;
+    size_t num_src_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
+                                 std::addressof(num_src_allocator_blocks), src_address, size,
+                                 KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
+                                 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+                                 KMemoryAttribute::All, KMemoryAttribute::None));
 
-    if (IsRegionMapped(dst_addr, size)) {
-        return ResultInvalidCurrentMemory;
-    }
+    // Validate that the dst address's state is valid.
+    size_t num_dst_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
+                                 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+                                 KMemoryPermission::None, KMemoryAttribute::None,
+                                 KMemoryAttribute::None));
 
+    // Create an update allocator for the source.
+    Result src_allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+                                                     m_memory_block_slab_manager,
+                                                     num_src_allocator_blocks);
+    R_TRY(src_allocator_result);
+
+    // Create an update allocator for the destination.
+    Result dst_allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+                                                     m_memory_block_slab_manager,
+                                                     num_dst_allocator_blocks);
+    R_TRY(dst_allocator_result);
+
+    // Map the memory.
     KPageGroup page_linked_list;
-    const std::size_t num_pages{size / PageSize};
-
-    AddRegionToPages(src_addr, num_pages, page_linked_list);
+    const size_t num_pages{size / PageSize};
+    const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
+        KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
+    const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
 
+    AddRegionToPages(src_address, num_pages, page_linked_list);
     {
+        // Reprotect the source as kernel-read/not mapped.
         auto block_guard = detail::ScopeExit([&] {
-            Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite,
+            Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
                     OperationType::ChangePermissions);
         });
-
-        CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None,
-                             OperationType::ChangePermissions));
-        CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::UserReadWrite));
+        R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions));
+        R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite));
 
         block_guard.Cancel();
     }
 
-    block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None,
-                          KMemoryAttribute::Locked);
-    block_manager->Update(dst_addr, num_pages, KMemoryState::Stack,
-                          KMemoryPermission::UserReadWrite);
+    // Apply the memory block updates.
+    m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
+                                  new_src_perm, new_src_attr,
+                                  KMemoryBlockDisableMergeAttribute::Locked,
+                                  KMemoryBlockDisableMergeAttribute::None);
+    m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
+                                  KMemoryState::Stack, KMemoryPermission::UserReadWrite,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
-    KScopedLightLock lk(general_lock);
+Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) {
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
 
-    KMemoryState src_state{};
-    CASCADE_CODE(CheckMemoryState(
-        &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias,
-        KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::None,
-        KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
+    // Validate that the source address's state is valid.
+    KMemoryState src_state;
+    size_t num_src_allocator_blocks;
+    R_TRY(this->CheckMemoryState(
+        std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
+        src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
+        KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
+        KMemoryAttribute::All, KMemoryAttribute::Locked));
 
-    KMemoryPermission dst_perm{};
-    CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, nullptr, dst_addr, size,
-                                  KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
-                                  KMemoryPermission::None, KMemoryAttribute::Mask,
-                                  KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
+    // Validate that the dst address's state is valid.
+    KMemoryPermission dst_perm;
+    size_t num_dst_allocator_blocks;
+    R_TRY(this->CheckMemoryState(
+        nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
+        dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
+        KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
+
+    // Create an update allocator for the source.
+    Result src_allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+                                                     m_memory_block_slab_manager,
+                                                     num_src_allocator_blocks);
+    R_TRY(src_allocator_result);
+
+    // Create an update allocator for the destination.
+    Result dst_allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+                                                     m_memory_block_slab_manager,
+                                                     num_dst_allocator_blocks);
+    R_TRY(dst_allocator_result);
 
     KPageGroup src_pages;
     KPageGroup dst_pages;
-    const std::size_t num_pages{size / PageSize};
+    const size_t num_pages{size / PageSize};
 
-    AddRegionToPages(src_addr, num_pages, src_pages);
-    AddRegionToPages(dst_addr, num_pages, dst_pages);
+    AddRegionToPages(src_address, num_pages, src_pages);
+    AddRegionToPages(dst_address, num_pages, dst_pages);
 
-    if (!dst_pages.IsEqual(src_pages)) {
-        return ResultInvalidMemoryRegion;
-    }
+    R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion);
 
     {
-        auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); });
+        auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
 
-        CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
-        CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite,
-                             OperationType::ChangePermissions));
+        R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
+        R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
+                      OperationType::ChangePermissions));
 
         block_guard.Cancel();
     }
 
-    block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::UserReadWrite);
-    block_manager->Update(dst_addr, num_pages, KMemoryState::Free);
+    // Apply the memory block updates.
+    m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
+                                  KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Locked);
+    m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
+                                  KMemoryState::None, KMemoryPermission::None,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Normal);
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
@@ -1225,48 +1321,54 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
         if (const auto result{
                 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
             result.IsError()) {
-            const std::size_t num_pages{(addr - cur_addr) / PageSize};
+            const size_t num_pages{(addr - cur_addr) / PageSize};
 
             ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)
                        .IsSuccess());
 
-            return result;
+            R_RETURN(result);
         }
 
         cur_addr += node.GetNumPages() * PageSize;
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
                             KMemoryPermission perm) {
     // Check that the map is in range.
-    const std::size_t num_pages{page_linked_list.GetNumPages()};
-    const std::size_t size{num_pages * PageSize};
+    const size_t num_pages{page_linked_list.GetNumPages()};
+    const size_t size{num_pages * PageSize};
     R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Check the memory state.
     R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
                                  KMemoryPermission::None, KMemoryPermission::None,
                                  KMemoryAttribute::None, KMemoryAttribute::None));
 
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager);
+
     // Map the pages.
     R_TRY(MapPages(address, page_linked_list, perm));
 
     // Update the blocks.
-    block_manager->Update(address, num_pages, state, perm);
+    m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
-                            PAddr phys_addr, bool is_pa_valid, VAddr region_start,
-                            std::size_t region_num_pages, KMemoryState state,
-                            KMemoryPermission perm) {
+Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
+                            bool is_pa_valid, VAddr region_start, size_t region_num_pages,
+                            KMemoryState state, KMemoryPermission perm) {
     ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
 
     // Ensure this is a valid map request.
@@ -1275,7 +1377,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t
     R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Find a random address to map at.
     VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
@@ -1288,6 +1390,11 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t
                                   KMemoryAttribute::None, KMemoryAttribute::None)
                .IsSuccess());
 
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager);
+
     // Perform mapping operation.
     if (is_pa_valid) {
         R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
@@ -1296,11 +1403,13 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t
     }
 
     // Update the blocks.
-    block_manager->Update(addr, num_pages, state, perm);
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
 
     // We successfully mapped the pages.
     *out_addr = addr;
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
@@ -1312,60 +1421,80 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
         if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
                                       OperationType::Unmap)};
             result.IsError()) {
-            return result;
+            R_RETURN(result);
         }
 
         cur_addr += node.GetNumPages() * PageSize;
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) {
+Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) {
     // Check that the unmap is in range.
-    const std::size_t num_pages{page_linked_list.GetNumPages()};
-    const std::size_t size{num_pages * PageSize};
-    R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(general_lock);
-
-    // Check the memory state.
-    R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None,
-                                 KMemoryPermission::None, KMemoryAttribute::All,
-                                 KMemoryAttribute::None));
-
-    // Perform the unmap.
-    R_TRY(UnmapPages(addr, page_linked_list));
-
-    // Update the blocks.
-    block_manager->Update(addr, num_pages, state, KMemoryPermission::None);
-
-    return ResultSuccess;
-}
-
-Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
-    // Check that the unmap is in range.
-    const std::size_t size = num_pages * PageSize;
+    const size_t num_pages{page_linked_list.GetNumPages()};
+    const size_t size{num_pages * PageSize};
     R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Check the memory state.
-    std::size_t num_allocator_blocks{};
+    size_t num_allocator_blocks;
     R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
                                  KMemoryState::All, state, KMemoryPermission::None,
                                  KMemoryPermission::None, KMemoryAttribute::All,
                                  KMemoryAttribute::None));
 
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // Perform the unmap.
+    R_TRY(UnmapPages(address, page_linked_list));
+
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+                                  KMemoryPermission::None, KMemoryAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Normal);
+
+    R_SUCCEED();
+}
+
+Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) {
+    // Check that the unmap is in range.
+    const size_t size = num_pages * PageSize;
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check the memory state.
+    size_t num_allocator_blocks{};
+    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+                                 KMemoryState::All, state, KMemoryPermission::None,
+                                 KMemoryPermission::None, KMemoryAttribute::All,
+                                 KMemoryAttribute::None));
+
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
     // Perform the unmap.
     R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
 
     // Update the blocks.
-    block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None);
+    m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+                                  KMemoryPermission::None, KMemoryAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Normal);
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
@@ -1380,7 +1509,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n
     R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Check if state allows us to create the group.
     R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
@@ -1390,15 +1519,15 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n
     // Create a new page group for the region.
     R_TRY(this->MakePageGroup(*out, address, num_pages));
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
+Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size,
                                               Svc::MemoryPermission svc_perm) {
     const size_t num_pages = size / PageSize;
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Verify we can change the memory permission.
     KMemoryState old_state;
@@ -1435,105 +1564,101 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
     // Succeed if there's nothing to do.
     R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
 
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
     // Perform mapping operation.
     const auto operation =
         was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions;
     R_TRY(Operate(addr, num_pages, new_perm, operation));
 
     // Update the blocks.
-    block_manager->Update(addr, num_pages, new_state, new_perm, KMemoryAttribute::None);
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::None);
 
     // Ensure cache coherency, if we're setting pages as executable.
     if (is_x) {
-        system.InvalidateCpuInstructionCacheRange(addr, size);
+        m_system.InvalidateCpuInstructionCacheRange(addr, size);
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) {
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
-    return block_manager->FindBlock(addr).GetMemoryInfo();
+    return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo();
 }
 
 KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
     if (!Contains(addr, 1)) {
-        return {address_space_end,       0 - address_space_end,  KMemoryState::Inaccessible,
-                KMemoryPermission::None, KMemoryAttribute::None, KMemoryPermission::None};
+        return {
+            .m_address = m_address_space_end,
+            .m_size = 0 - m_address_space_end,
+            .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
+            .m_device_disable_merge_left_count = 0,
+            .m_device_disable_merge_right_count = 0,
+            .m_ipc_lock_count = 0,
+            .m_device_use_count = 0,
+            .m_ipc_disable_merge_count = 0,
+            .m_permission = KMemoryPermission::None,
+            .m_attribute = KMemoryAttribute::None,
+            .m_original_permission = KMemoryPermission::None,
+            .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
+        };
     }
 
     return QueryInfoImpl(addr);
 }
 
-Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
-    KScopedLightLock lk(general_lock);
-
-    KMemoryState state{};
-    KMemoryAttribute attribute{};
-
-    R_TRY(CheckMemoryState(&state, nullptr, &attribute, nullptr, addr, size,
-                           KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
-                           KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
-                           KMemoryPermission::All, KMemoryPermission::UserReadWrite,
-                           KMemoryAttribute::Mask, KMemoryAttribute::None,
-                           KMemoryAttribute::IpcAndDeviceMapped));
-
-    block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked);
-
-    return ResultSuccess;
-}
-
-Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
-    KScopedLightLock lk(general_lock);
-
-    KMemoryState state{};
-
-    R_TRY(CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size,
-                           KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
-                           KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
-                           KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask,
-                           KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
-
-    block_manager->Update(addr, size / PageSize, state, KMemoryPermission::UserReadWrite);
-    return ResultSuccess;
-}
-
-Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
-                                       Svc::MemoryPermission svc_perm) {
+Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) {
     const size_t num_pages = size / PageSize;
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Verify we can change the memory permission.
     KMemoryState old_state;
     KMemoryPermission old_perm;
-    R_TRY(this->CheckMemoryState(
-        std::addressof(old_state), std::addressof(old_perm), nullptr, nullptr, addr, size,
-        KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, KMemoryPermission::None,
-        KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
+                                 std::addressof(num_allocator_blocks), addr, size,
+                                 KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
+                                 KMemoryPermission::None, KMemoryPermission::None,
+                                 KMemoryAttribute::All, KMemoryAttribute::None));
 
     // Determine new perm.
     const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
     R_SUCCEED_IF(old_perm == new_perm);
 
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
     // Perform mapping operation.
     R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
 
     // Update the blocks.
-    block_manager->Update(addr, num_pages, old_state, new_perm, KMemoryAttribute::None);
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::None);
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
+Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) {
     const size_t num_pages = size / PageSize;
     ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
            KMemoryAttribute::SetMask);
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Verify we can change the memory attribute.
     KMemoryState old_state;
@@ -1548,6 +1673,12 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3
         KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
         AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
 
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
     // Determine the new attribute.
     const KMemoryAttribute new_attr =
         static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) |
@@ -1557,123 +1688,142 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3
     this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh);
 
     // Update the blocks.
-    block_manager->Update(addr, num_pages, old_state, old_perm, new_attr);
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm,
+                                  new_attr, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::None);
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::SetMaxHeapSize(std::size_t size) {
+Result KPageTable::SetMaxHeapSize(size_t size) {
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Only process page tables are allowed to set heap size.
     ASSERT(!this->IsKernel());
 
-    max_heap_size = size;
+    m_max_heap_size = size;
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
+Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
     // Lock the physical memory mutex.
-    KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
+    KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
 
     // Try to perform a reduction in heap, instead of an extension.
     VAddr cur_address{};
-    std::size_t allocation_size{};
+    size_t allocation_size{};
     {
         // Lock the table.
-        KScopedLightLock lk(general_lock);
+        KScopedLightLock lk(m_general_lock);
 
         // Validate that setting heap size is possible at all.
-        R_UNLESS(!is_kernel, ResultOutOfMemory);
-        R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start),
+        R_UNLESS(!m_is_kernel, ResultOutOfMemory);
+        R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
                  ResultOutOfMemory);
-        R_UNLESS(size <= max_heap_size, ResultOutOfMemory);
+        R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
 
         if (size < GetHeapSize()) {
             // The size being requested is less than the current size, so we need to free the end of
             // the heap.
 
             // Validate memory state.
-            std::size_t num_allocator_blocks;
+            size_t num_allocator_blocks;
             R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
-                                         heap_region_start + size, GetHeapSize() - size,
+                                         m_heap_region_start + size, GetHeapSize() - size,
                                          KMemoryState::All, KMemoryState::Normal,
                                          KMemoryPermission::All, KMemoryPermission::UserReadWrite,
                                          KMemoryAttribute::All, KMemoryAttribute::None));
 
+            // Create an update allocator.
+            Result allocator_result{ResultSuccess};
+            KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                         m_memory_block_slab_manager,
+                                                         num_allocator_blocks);
+            R_TRY(allocator_result);
+
             // Unmap the end of the heap.
             const auto num_pages = (GetHeapSize() - size) / PageSize;
-            R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None,
+            R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None,
                           OperationType::Unmap));
 
             // Release the memory from the resource limit.
-            system.Kernel().CurrentProcess()->GetResourceLimit()->Release(
+            m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release(
                 LimitableResource::PhysicalMemory, num_pages * PageSize);
 
             // Apply the memory block update.
-            block_manager->Update(heap_region_start + size, num_pages, KMemoryState::Free,
-                                  KMemoryPermission::None, KMemoryAttribute::None);
+            m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
+                                          num_pages, KMemoryState::Free, KMemoryPermission::None,
+                                          KMemoryAttribute::None,
+                                          KMemoryBlockDisableMergeAttribute::None,
+                                          size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
+                                                    : KMemoryBlockDisableMergeAttribute::None);
 
             // Update the current heap end.
-            current_heap_end = heap_region_start + size;
+            m_current_heap_end = m_heap_region_start + size;
 
             // Set the output.
-            *out = heap_region_start;
-            return ResultSuccess;
+            *out = m_heap_region_start;
+            R_SUCCEED();
         } else if (size == GetHeapSize()) {
             // The size requested is exactly the current size.
-            *out = heap_region_start;
-            return ResultSuccess;
+            *out = m_heap_region_start;
+            R_SUCCEED();
         } else {
             // We have to allocate memory. Determine how much to allocate and where while the table
             // is locked.
-            cur_address = current_heap_end;
+            cur_address = m_current_heap_end;
             allocation_size = size - GetHeapSize();
         }
     }
 
     // Reserve memory for the heap extension.
     KScopedResourceReservation memory_reservation(
-        system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
+        m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
         allocation_size);
     R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
 
     // Allocate pages for the heap extension.
     KPageGroup pg;
-    R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
+    R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
         &pg, allocation_size / PageSize,
-        KMemoryManager::EncodeOption(memory_pool, allocation_option)));
+        KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
 
     // Clear all the newly allocated pages.
     for (const auto& it : pg.Nodes()) {
-        std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value,
+        std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
                     it.GetSize());
     }
 
     // Map the pages.
     {
         // Lock the table.
-        KScopedLightLock lk(general_lock);
+        KScopedLightLock lk(m_general_lock);
 
         // Ensure that the heap hasn't changed since we began executing.
-        ASSERT(cur_address == current_heap_end);
+        ASSERT(cur_address == m_current_heap_end);
 
         // Check the memory state.
-        std::size_t num_allocator_blocks{};
-        R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end,
+        size_t num_allocator_blocks{};
+        R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
                                      allocation_size, KMemoryState::All, KMemoryState::Free,
                                      KMemoryPermission::None, KMemoryPermission::None,
                                      KMemoryAttribute::None, KMemoryAttribute::None));
 
+        // Create an update allocator.
+        Result allocator_result{ResultSuccess};
+        KMemoryBlockManagerUpdateAllocator allocator(
+            std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
+        R_TRY(allocator_result);
+
         // Map the pages.
         const auto num_pages = allocation_size / PageSize;
-        R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup));
+        R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup));
 
         // Clear all the newly allocated pages.
-        for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
-            std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0,
+        for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
+            std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
                         PageSize);
         }
 
@@ -1681,133 +1831,172 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
         memory_reservation.Commit();
 
         // Apply the memory block update.
-        block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal,
-                              KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
+        m_memory_block_manager.Update(
+            std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
+            KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+            m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
+                                                      : KMemoryBlockDisableMergeAttribute::None,
+            KMemoryBlockDisableMergeAttribute::None);
 
         // Update the current heap end.
-        current_heap_end = heap_region_start + size;
+        m_current_heap_end = m_heap_region_start + size;
 
         // Set the output.
-        *out = heap_region_start;
-        return ResultSuccess;
+        *out = m_heap_region_start;
+        R_SUCCEED();
     }
 }
 
-ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
+ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align,
                                                   bool is_map_only, VAddr region_start,
-                                                  std::size_t region_num_pages, KMemoryState state,
+                                                  size_t region_num_pages, KMemoryState state,
                                                   KMemoryPermission perm, PAddr map_addr) {
-    KScopedLightLock lk(general_lock);
-
-    if (!CanContain(region_start, region_num_pages * PageSize, state)) {
-        return ResultInvalidCurrentMemory;
-    }
-
-    if (region_num_pages <= needed_num_pages) {
-        return ResultOutOfMemory;
-    }
+    KScopedLightLock lk(m_general_lock);
 
+    R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state),
+             ResultInvalidCurrentMemory);
+    R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory);
     const VAddr addr{
         AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
-    if (!addr) {
-        return ResultOutOfMemory;
-    }
+    R_UNLESS(addr, ResultOutOfMemory);
+
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager);
 
     if (is_map_only) {
         R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
     } else {
         KPageGroup page_group;
-        R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
+        R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
             &page_group, needed_num_pages,
-            KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
+            KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
         R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
     }
 
-    block_manager->Update(addr, needed_num_pages, state, perm);
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
 
     return addr;
 }
 
-Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
-    KScopedLightLock lk(general_lock);
+Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
+                                                bool is_aligned) {
+    // Lightly validate the range before doing anything else.
+    const size_t num_pages = size / PageSize;
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
 
-    KMemoryPermission perm{};
-    if (const Result result{CheckMemoryState(
-            nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
-            KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
-            KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
-            KMemoryAttribute::DeviceSharedAndUncached)};
-        result.IsError()) {
-        return result;
-    }
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
 
-    block_manager->UpdateLock(
-        addr, size / PageSize,
-        [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
-            block->ShareToDevice(permission);
-        },
-        perm);
+    // Check the memory state.
+    const auto test_state =
+        (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap);
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state,
+                                 test_state, perm, perm,
+                                 KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
+                                 KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
 
-    return ResultSuccess;
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // Update the memory blocks.
+    m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
+                                      &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
+
+    R_SUCCEED();
 }
 
-Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
-    KScopedLightLock lk(general_lock);
+Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {
+    // Lightly validate the range before doing anything else.
+    const size_t num_pages = size / PageSize;
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
 
-    KMemoryPermission perm{};
-    if (const Result result{CheckMemoryState(
-            nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
-            KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
-            KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
-            KMemoryAttribute::DeviceSharedAndUncached)};
-        result.IsError()) {
-        return result;
-    }
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
 
-    block_manager->UpdateLock(
-        addr, size / PageSize,
-        [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
-            block->UnshareToDevice(permission);
-        },
-        perm);
+    // Check the memory state.
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryStateContiguous(
+        std::addressof(num_allocator_blocks), address, size,
+        KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
+        KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
+        KMemoryPermission::None, KMemoryPermission::None,
+        KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
 
-    return ResultSuccess;
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // Update the memory blocks.
+    const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
+        m_enable_device_address_space_merge
+            ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
+            : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
+    m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
+                                      KMemoryPermission::None);
+
+    R_SUCCEED();
 }
 
-Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) {
-    return this->LockMemoryAndOpen(
+Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
+    // Lightly validate the range before doing anything else.
+    const size_t num_pages = size / PageSize;
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check the memory state.
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryStateContiguous(
+        std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
+        KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
+        KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // Update the memory blocks.
+    m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
+                                      &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
+
+    R_SUCCEED();
+}
+
+Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
+    R_RETURN(this->LockMemoryAndOpen(
         out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
         KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
         KMemoryAttribute::None,
         static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
                                        KMemoryPermission::KernelReadWrite),
-        KMemoryAttribute::Locked);
+        KMemoryAttribute::Locked));
 }
 
-Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) {
-    return this->UnlockMemory(
+Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) {
+    R_RETURN(this->UnlockMemory(
         addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
         KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
-        KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
-}
-
-Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
-    block_manager = std::make_unique<KMemoryBlockManager>(start, end);
-
-    return ResultSuccess;
-}
-
-bool KPageTable::IsRegionMapped(VAddr address, u64 size) {
-    return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
-                            KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask,
-                            KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)
-        .IsError();
+        KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
 }
 
 bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
-    auto start_ptr = system.Memory().GetPointer(addr);
+    auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(addr);
     for (u64 offset{}; offset < size; offset += PageSize) {
-        if (start_ptr != system.Memory().GetPointer(addr + offset)) {
+        if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(addr + offset)) {
             return false;
         }
         start_ptr += PageSize;
@@ -1815,8 +2004,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
     return true;
 }
 
-void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
-                                  KPageGroup& page_linked_list) {
+void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) {
     VAddr addr{start};
     while (addr < start + (num_pages * PageSize)) {
         const PAddr paddr{GetPhysicalAddr(addr)};
@@ -1826,16 +2014,16 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
     }
 }
 
-VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages,
-                                        u64 needed_num_pages, std::size_t align) {
-    if (is_aslr_enabled) {
+VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
+                                        size_t align) {
+    if (m_enable_aslr) {
         UNIMPLEMENTED();
     }
-    return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
-                                       IsKernel() ? 1 : 4);
+    return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
+                                               IsKernel() ? 1 : 4);
 }
 
-Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
+Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
                            OperationType operation) {
     ASSERT(this->IsLockedByCurrentThread());
 
@@ -1844,11 +2032,11 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup&
     ASSERT(num_pages == page_group.GetNumPages());
 
     for (const auto& node : page_group.Nodes()) {
-        const std::size_t size{node.GetNumPages() * PageSize};
+        const size_t size{node.GetNumPages() * PageSize};
 
         switch (operation) {
         case OperationType::MapGroup:
-            system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress());
+            m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
             break;
         default:
             ASSERT(false);
@@ -1857,10 +2045,10 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup&
         addr += size;
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
+Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
                            OperationType operation, PAddr map_addr) {
     ASSERT(this->IsLockedByCurrentThread());
 
@@ -1870,12 +2058,12 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission
 
     switch (operation) {
     case OperationType::Unmap:
-        system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize);
+        m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
         break;
     case OperationType::Map: {
         ASSERT(map_addr);
         ASSERT(Common::IsAligned(map_addr, PageSize));
-        system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr);
+        m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
         break;
     }
     case OperationType::ChangePermissions:
@@ -1884,25 +2072,25 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission
     default:
         ASSERT(false);
     }
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
     switch (state) {
     case KMemoryState::Free:
     case KMemoryState::Kernel:
-        return address_space_start;
+        return m_address_space_start;
     case KMemoryState::Normal:
-        return heap_region_start;
+        return m_heap_region_start;
     case KMemoryState::Ipc:
     case KMemoryState::NonSecureIpc:
     case KMemoryState::NonDeviceIpc:
-        return alias_region_start;
+        return m_alias_region_start;
     case KMemoryState::Stack:
-        return stack_region_start;
+        return m_stack_region_start;
     case KMemoryState::Static:
     case KMemoryState::ThreadLocal:
-        return kernel_map_region_start;
+        return m_kernel_map_region_start;
     case KMemoryState::Io:
     case KMemoryState::Shared:
     case KMemoryState::AliasCode:
@@ -1913,31 +2101,31 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
     case KMemoryState::GeneratedCode:
     case KMemoryState::CodeOut:
     case KMemoryState::Coverage:
-        return alias_code_region_start;
+        return m_alias_code_region_start;
     case KMemoryState::Code:
     case KMemoryState::CodeData:
-        return code_region_start;
+        return m_code_region_start;
     default:
         UNREACHABLE();
     }
 }
 
-std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
+size_t KPageTable::GetRegionSize(KMemoryState state) const {
     switch (state) {
     case KMemoryState::Free:
     case KMemoryState::Kernel:
-        return address_space_end - address_space_start;
+        return m_address_space_end - m_address_space_start;
     case KMemoryState::Normal:
-        return heap_region_end - heap_region_start;
+        return m_heap_region_end - m_heap_region_start;
     case KMemoryState::Ipc:
     case KMemoryState::NonSecureIpc:
     case KMemoryState::NonDeviceIpc:
-        return alias_region_end - alias_region_start;
+        return m_alias_region_end - m_alias_region_start;
     case KMemoryState::Stack:
-        return stack_region_end - stack_region_start;
+        return m_stack_region_end - m_stack_region_start;
     case KMemoryState::Static:
     case KMemoryState::ThreadLocal:
-        return kernel_map_region_end - kernel_map_region_start;
+        return m_kernel_map_region_end - m_kernel_map_region_start;
     case KMemoryState::Io:
     case KMemoryState::Shared:
     case KMemoryState::AliasCode:
@@ -1948,16 +2136,16 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
     case KMemoryState::GeneratedCode:
     case KMemoryState::CodeOut:
     case KMemoryState::Coverage:
-        return alias_code_region_end - alias_code_region_start;
+        return m_alias_code_region_end - m_alias_code_region_start;
     case KMemoryState::Code:
     case KMemoryState::CodeData:
-        return code_region_end - code_region_start;
+        return m_code_region_end - m_code_region_start;
     default:
         UNREACHABLE();
     }
 }
 
-bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const {
+bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const {
     const VAddr end = addr + size;
     const VAddr last = end - 1;
 
@@ -1966,10 +2154,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co
 
     const bool is_in_region =
         region_start <= addr && addr < end && last <= region_start + region_size - 1;
-    const bool is_in_heap = !(end <= heap_region_start || heap_region_end <= addr ||
-                              heap_region_start == heap_region_end);
-    const bool is_in_alias = !(end <= alias_region_start || alias_region_end <= addr ||
-                               alias_region_start == alias_region_end);
+    const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
+                              m_heap_region_start == m_heap_region_end);
+    const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
+                               m_alias_region_start == m_alias_region_end);
     switch (state) {
     case KMemoryState::Free:
     case KMemoryState::Kernel:
@@ -2008,23 +2196,23 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_
                                     KMemoryPermission perm, KMemoryAttribute attr_mask,
                                     KMemoryAttribute attr) const {
     // Validate the states match expectation.
-    R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory);
-    R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory);
-    R_UNLESS((info.attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
+    R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
+    R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
+    R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
-Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
-                                              std::size_t size, KMemoryState state_mask,
-                                              KMemoryState state, KMemoryPermission perm_mask,
-                                              KMemoryPermission perm, KMemoryAttribute attr_mask,
+Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
+                                              KMemoryState state_mask, KMemoryState state,
+                                              KMemoryPermission perm_mask, KMemoryPermission perm,
+                                              KMemoryAttribute attr_mask,
                                               KMemoryAttribute attr) const {
     ASSERT(this->IsLockedByCurrentThread());
 
     // Get information about the first block.
     const VAddr last_addr = addr + size - 1;
-    KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr);
+    KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
     KMemoryInfo info = it->GetMemoryInfo();
 
     // If the start address isn't aligned, we need a block.
@@ -2042,7 +2230,7 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA
 
         // Advance our iterator.
         it++;
-        ASSERT(it != block_manager->cend());
+        ASSERT(it != m_memory_block_manager.cend());
         info = it->GetMemoryInfo();
     }
 
@@ -2054,12 +2242,12 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA
         *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
-                                    KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
-                                    VAddr addr, std::size_t size, KMemoryState state_mask,
+                                    KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+                                    VAddr addr, size_t size, KMemoryState state_mask,
                                     KMemoryState state, KMemoryPermission perm_mask,
                                     KMemoryPermission perm, KMemoryAttribute attr_mask,
                                     KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
@@ -2067,7 +2255,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
 
     // Get information about the first block.
     const VAddr last_addr = addr + size - 1;
-    KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr);
+    KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
     KMemoryInfo info = it->GetMemoryInfo();
 
     // If the start address isn't aligned, we need a block.
@@ -2075,14 +2263,14 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
         (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
 
     // Validate all blocks in the range have correct state.
-    const KMemoryState first_state = info.state;
-    const KMemoryPermission first_perm = info.perm;
-    const KMemoryAttribute first_attr = info.attribute;
+    const KMemoryState first_state = info.m_state;
+    const KMemoryPermission first_perm = info.m_permission;
+    const KMemoryAttribute first_attr = info.m_attribute;
     while (true) {
         // Validate the current block.
-        R_UNLESS(info.state == first_state, ResultInvalidCurrentMemory);
-        R_UNLESS(info.perm == first_perm, ResultInvalidCurrentMemory);
-        R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr),
+        R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
+        R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
+        R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
                  ResultInvalidCurrentMemory);
 
         // Validate against the provided masks.
@@ -2095,7 +2283,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
 
         // Advance our iterator.
         it++;
-        ASSERT(it != block_manager->cend());
+        ASSERT(it != m_memory_block_manager.cend());
         info = it->GetMemoryInfo();
     }
 
@@ -2116,7 +2304,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
     if (out_blocks_needed != nullptr) {
         *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
     }
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
@@ -2134,7 +2322,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
     R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Check that the output page group is empty, if it exists.
     if (out_pg) {
@@ -2162,6 +2350,12 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
         R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
     }
 
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
     // Decide on new perm and attr.
     new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
     KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
@@ -2172,9 +2366,11 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
     }
 
     // Apply the memory block updates.
-    block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+                                  new_attr, KMemoryBlockDisableMergeAttribute::Locked,
+                                  KMemoryBlockDisableMergeAttribute::None);
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
@@ -2191,7 +2387,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask
     R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
 
     // Lock the table.
-    KScopedLightLock lk(general_lock);
+    KScopedLightLock lk(m_general_lock);
 
     // Check the state.
     KMemoryState old_state{};
@@ -2213,15 +2409,23 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask
     new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
     KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
 
+    // Create an update allocator.
+    Result allocator_result{ResultSuccess};
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
     // Update permission, if we need to.
     if (new_perm != old_perm) {
         R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
     }
 
     // Apply the memory block updates.
-    block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+                                  new_attr, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Locked);
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 25774f2321..c6aeacd96c 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -9,8 +9,10 @@
 #include "common/common_types.h"
 #include "common/page_table.h"
 #include "core/file_sys/program_metadata.h"
+#include "core/hle/kernel/k_dynamic_resource_manager.h"
 #include "core/hle/kernel/k_light_lock.h"
 #include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_memory_block_manager.h"
 #include "core/hle/kernel/k_memory_layout.h"
 #include "core/hle/kernel/k_memory_manager.h"
 #include "core/hle/result.h"
@@ -34,58 +36,66 @@ public:
     ~KPageTable();
 
     Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
-                                VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool);
-    Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
+                                VAddr code_addr, size_t code_size,
+                                KMemoryBlockSlabManager* mem_block_slab_manager,
+                                KMemoryManager::Pool pool);
+
+    void Finalize();
+
+    Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state,
                           KMemoryPermission perm);
-    Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
-    Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+    Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size);
+    Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
                            ICacheInvalidationStrategy icache_invalidation_strategy);
-    Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
+    Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
                               VAddr src_addr);
-    Result MapPhysicalMemory(VAddr addr, std::size_t size);
-    Result UnmapPhysicalMemory(VAddr addr, std::size_t size);
-    Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
-    Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
+    Result MapPhysicalMemory(VAddr addr, size_t size);
+    Result UnmapPhysicalMemory(VAddr addr, size_t size);
+    Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
+    Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
     Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
                     KMemoryPermission perm);
-    Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
+    Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
                     KMemoryState state, KMemoryPermission perm) {
-        return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
-                              this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
-                              state, perm);
+        R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
+                                this->GetRegionAddress(state),
+                                this->GetRegionSize(state) / PageSize, state, perm));
     }
     Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
-    Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
-    Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm);
+    Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
+    Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
     KMemoryInfo QueryInfo(VAddr addr);
-    Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
-    Result ResetTransferMemory(VAddr addr, std::size_t size);
-    Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
-    Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
-    Result SetMaxHeapSize(std::size_t size);
-    Result SetHeapSize(VAddr* out, std::size_t size);
-    ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
-                                          bool is_map_only, VAddr region_start,
-                                          std::size_t region_num_pages, KMemoryState state,
-                                          KMemoryPermission perm, PAddr map_addr = 0);
-    Result LockForDeviceAddressSpace(VAddr addr, std::size_t size);
-    Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
-    Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size);
-    Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg);
+    Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
+    Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
+    Result SetMaxHeapSize(size_t size);
+    Result SetHeapSize(VAddr* out, size_t size);
+    ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
+                                          VAddr region_start, size_t region_num_pages,
+                                          KMemoryState state, KMemoryPermission perm,
+                                          PAddr map_addr = 0);
+
+    Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
+                                        bool is_aligned);
+    Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size);
+
+    Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
+
+    Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
+    Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
     Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
                                 KMemoryState state_mask, KMemoryState state,
                                 KMemoryPermission perm_mask, KMemoryPermission perm,
                                 KMemoryAttribute attr_mask, KMemoryAttribute attr);
 
     Common::PageTable& PageTableImpl() {
-        return page_table_impl;
+        return *m_page_table_impl;
     }
 
     const Common::PageTable& PageTableImpl() const {
-        return page_table_impl;
+        return *m_page_table_impl;
     }
 
-    bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
+    bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
 
 private:
     enum class OperationType : u32 {
@@ -96,67 +106,65 @@ private:
         ChangePermissionsAndRefresh,
     };
 
-    static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask |
-                                                                KMemoryAttribute::IpcLocked |
-                                                                KMemoryAttribute::DeviceShared;
+    static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
+        KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
 
-    Result InitializeMemoryLayout(VAddr start, VAddr end);
     Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
-    Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
-                    bool is_pa_valid, VAddr region_start, std::size_t region_num_pages,
+    Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
+                    bool is_pa_valid, VAddr region_start, size_t region_num_pages,
                     KMemoryState state, KMemoryPermission perm);
     Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
-    bool IsRegionMapped(VAddr address, u64 size);
     bool IsRegionContiguous(VAddr addr, u64 size) const;
-    void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list);
+    void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
     KMemoryInfo QueryInfoImpl(VAddr addr);
-    VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
-                                std::size_t align);
-    Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
+    VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
+                                size_t align);
+    Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
                    OperationType operation);
-    Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
-                   OperationType operation, PAddr map_addr = 0);
+    Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
+                   PAddr map_addr = 0);
     VAddr GetRegionAddress(KMemoryState state) const;
-    std::size_t GetRegionSize(KMemoryState state) const;
+    size_t GetRegionSize(KMemoryState state) const;
 
-    VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
-                       std::size_t alignment, std::size_t offset, std::size_t guard_pages);
+    VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
+                       size_t alignment, size_t offset, size_t guard_pages);
 
-    Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
+    Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
                                       KMemoryState state_mask, KMemoryState state,
                                       KMemoryPermission perm_mask, KMemoryPermission perm,
                                       KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
-    Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
+    Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask,
                                       KMemoryState state, KMemoryPermission perm_mask,
                                       KMemoryPermission perm, KMemoryAttribute attr_mask,
                                       KMemoryAttribute attr) const {
-        return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
-                                                perm, attr_mask, attr);
+        R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
+                                                  perm, attr_mask, attr));
     }
 
     Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
                             KMemoryPermission perm_mask, KMemoryPermission perm,
                             KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
     Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
-                            KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr,
-                            std::size_t size, KMemoryState state_mask, KMemoryState state,
+                            KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr,
+                            size_t size, KMemoryState state_mask, KMemoryState state,
                             KMemoryPermission perm_mask, KMemoryPermission perm,
                             KMemoryAttribute attr_mask, KMemoryAttribute attr,
                             KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
-    Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
+    Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
                             KMemoryState state_mask, KMemoryState state,
                             KMemoryPermission perm_mask, KMemoryPermission perm,
                             KMemoryAttribute attr_mask, KMemoryAttribute attr,
                             KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
-        return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
-                                state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
+        R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
+                                  state_mask, state, perm_mask, perm, attr_mask, attr,
+                                  ignore_attr));
     }
-    Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
-                            KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+    Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
+                            KMemoryPermission perm_mask, KMemoryPermission perm,
                             KMemoryAttribute attr_mask, KMemoryAttribute attr,
                             KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
-        return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
-                                      attr_mask, attr, ignore_attr);
+        R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
+                                        attr_mask, attr, ignore_attr));
     }
 
     Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
@@ -174,13 +182,13 @@ private:
     bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
 
     bool IsLockedByCurrentThread() const {
-        return general_lock.IsLockedByCurrentThread();
+        return m_general_lock.IsLockedByCurrentThread();
     }
 
     bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
         ASSERT(this->IsLockedByCurrentThread());
 
-        return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
+        return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
     }
 
     bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
@@ -191,95 +199,93 @@ private:
         return *out != 0;
     }
 
-    mutable KLightLock general_lock;
-    mutable KLightLock map_physical_memory_lock;
-
-    std::unique_ptr<KMemoryBlockManager> block_manager;
+    mutable KLightLock m_general_lock;
+    mutable KLightLock m_map_physical_memory_lock;
 
 public:
     constexpr VAddr GetAddressSpaceStart() const {
-        return address_space_start;
+        return m_address_space_start;
     }
     constexpr VAddr GetAddressSpaceEnd() const {
-        return address_space_end;
+        return m_address_space_end;
     }
-    constexpr std::size_t GetAddressSpaceSize() const {
-        return address_space_end - address_space_start;
+    constexpr size_t GetAddressSpaceSize() const {
+        return m_address_space_end - m_address_space_start;
     }
     constexpr VAddr GetHeapRegionStart() const {
-        return heap_region_start;
+        return m_heap_region_start;
     }
     constexpr VAddr GetHeapRegionEnd() const {
-        return heap_region_end;
+        return m_heap_region_end;
     }
-    constexpr std::size_t GetHeapRegionSize() const {
-        return heap_region_end - heap_region_start;
+    constexpr size_t GetHeapRegionSize() const {
+        return m_heap_region_end - m_heap_region_start;
     }
     constexpr VAddr GetAliasRegionStart() const {
-        return alias_region_start;
+        return m_alias_region_start;
     }
     constexpr VAddr GetAliasRegionEnd() const {
-        return alias_region_end;
+        return m_alias_region_end;
     }
-    constexpr std::size_t GetAliasRegionSize() const {
-        return alias_region_end - alias_region_start;
+    constexpr size_t GetAliasRegionSize() const {
+        return m_alias_region_end - m_alias_region_start;
     }
     constexpr VAddr GetStackRegionStart() const {
-        return stack_region_start;
+        return m_stack_region_start;
     }
     constexpr VAddr GetStackRegionEnd() const {
-        return stack_region_end;
+        return m_stack_region_end;
     }
-    constexpr std::size_t GetStackRegionSize() const {
-        return stack_region_end - stack_region_start;
+    constexpr size_t GetStackRegionSize() const {
+        return m_stack_region_end - m_stack_region_start;
     }
     constexpr VAddr GetKernelMapRegionStart() const {
-        return kernel_map_region_start;
+        return m_kernel_map_region_start;
     }
     constexpr VAddr GetKernelMapRegionEnd() const {
-        return kernel_map_region_end;
+        return m_kernel_map_region_end;
     }
     constexpr VAddr GetCodeRegionStart() const {
-        return code_region_start;
+        return m_code_region_start;
     }
     constexpr VAddr GetCodeRegionEnd() const {
-        return code_region_end;
+        return m_code_region_end;
     }
     constexpr VAddr GetAliasCodeRegionStart() const {
-        return alias_code_region_start;
+        return m_alias_code_region_start;
     }
     constexpr VAddr GetAliasCodeRegionSize() const {
-        return alias_code_region_end - alias_code_region_start;
+        return m_alias_code_region_end - m_alias_code_region_start;
     }
-    std::size_t GetNormalMemorySize() {
-        KScopedLightLock lk(general_lock);
-        return GetHeapSize() + mapped_physical_memory_size;
+    size_t GetNormalMemorySize() {
+        KScopedLightLock lk(m_general_lock);
+        return GetHeapSize() + m_mapped_physical_memory_size;
     }
-    constexpr std::size_t GetAddressSpaceWidth() const {
-        return address_space_width;
+    constexpr size_t GetAddressSpaceWidth() const {
+        return m_address_space_width;
     }
-    constexpr std::size_t GetHeapSize() const {
-        return current_heap_end - heap_region_start;
+    constexpr size_t GetHeapSize() const {
+        return m_current_heap_end - m_heap_region_start;
     }
-    constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
-        return address_space_start <= address && address + size - 1 <= address_space_end - 1;
+    constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const {
+        return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
     }
-    constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const {
-        return alias_region_start > address || address + size - 1 > alias_region_end - 1;
+    constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const {
+        return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
     }
-    constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const {
-        return stack_region_start > address || address + size - 1 > stack_region_end - 1;
+    constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const {
+        return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
     }
-    constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const {
+    constexpr bool IsInvalidRegion(VAddr address, size_t size) const {
         return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
     }
-    constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const {
-        return address + size > heap_region_start && heap_region_end > address;
+    constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const {
+        return address + size > m_heap_region_start && m_heap_region_end > address;
     }
-    constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const {
-        return address + size > alias_region_start && alias_region_end > address;
+    constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const {
+        return address + size > m_alias_region_start && m_alias_region_end > address;
     }
-    constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const {
+    constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const {
         if (IsInvalidRegion(address, size)) {
             return true;
         }
@@ -291,73 +297,78 @@ public:
         }
         return {};
     }
-    constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
+    constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const {
         return !IsOutsideASLRRegion(address, size);
     }
-    constexpr std::size_t GetNumGuardPages() const {
+    constexpr size_t GetNumGuardPages() const {
         return IsKernel() ? 1 : 4;
     }
     PAddr GetPhysicalAddr(VAddr addr) const {
-        const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
+        const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
         ASSERT(backing_addr);
         return backing_addr + addr;
     }
     constexpr bool Contains(VAddr addr) const {
-        return address_space_start <= addr && addr <= address_space_end - 1;
+        return m_address_space_start <= addr && addr <= m_address_space_end - 1;
     }
-    constexpr bool Contains(VAddr addr, std::size_t size) const {
-        return address_space_start <= addr && addr < addr + size &&
-               addr + size - 1 <= address_space_end - 1;
+    constexpr bool Contains(VAddr addr, size_t size) const {
+        return m_address_space_start <= addr && addr < addr + size &&
+               addr + size - 1 <= m_address_space_end - 1;
     }
 
 private:
     constexpr bool IsKernel() const {
-        return is_kernel;
+        return m_is_kernel;
     }
     constexpr bool IsAslrEnabled() const {
-        return is_aslr_enabled;
+        return m_enable_aslr;
     }
 
-    constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
-        return (address_space_start <= addr) &&
-               (num_pages <= (address_space_end - address_space_start) / PageSize) &&
-               (addr + num_pages * PageSize - 1 <= address_space_end - 1);
+    constexpr bool ContainsPages(VAddr addr, size_t num_pages) const {
+        return (m_address_space_start <= addr) &&
+               (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
+               (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
     }
 
 private:
-    VAddr address_space_start{};
-    VAddr address_space_end{};
-    VAddr heap_region_start{};
-    VAddr heap_region_end{};
-    VAddr current_heap_end{};
-    VAddr alias_region_start{};
-    VAddr alias_region_end{};
-    VAddr stack_region_start{};
-    VAddr stack_region_end{};
-    VAddr kernel_map_region_start{};
-    VAddr kernel_map_region_end{};
-    VAddr code_region_start{};
-    VAddr code_region_end{};
-    VAddr alias_code_region_start{};
-    VAddr alias_code_region_end{};
+    VAddr m_address_space_start{};
+    VAddr m_address_space_end{};
+    VAddr m_heap_region_start{};
+    VAddr m_heap_region_end{};
+    VAddr m_current_heap_end{};
+    VAddr m_alias_region_start{};
+    VAddr m_alias_region_end{};
+    VAddr m_stack_region_start{};
+    VAddr m_stack_region_end{};
+    VAddr m_kernel_map_region_start{};
+    VAddr m_kernel_map_region_end{};
+    VAddr m_code_region_start{};
+    VAddr m_code_region_end{};
+    VAddr m_alias_code_region_start{};
+    VAddr m_alias_code_region_end{};
 
-    std::size_t mapped_physical_memory_size{};
-    std::size_t max_heap_size{};
-    std::size_t max_physical_memory_size{};
-    std::size_t address_space_width{};
+    size_t m_mapped_physical_memory_size{};
+    size_t m_max_heap_size{};
+    size_t m_max_physical_memory_size{};
+    size_t m_address_space_width{};
 
-    bool is_kernel{};
-    bool is_aslr_enabled{};
+    KMemoryBlockManager m_memory_block_manager;
 
-    u32 heap_fill_value{};
-    const KMemoryRegion* cached_physical_heap_region{};
+    bool m_is_kernel{};
+    bool m_enable_aslr{};
+    bool m_enable_device_address_space_merge{};
 
-    KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
-    KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
+    KMemoryBlockSlabManager* m_memory_block_slab_manager{};
 
-    Common::PageTable page_table_impl;
+    u32 m_heap_fill_value{};
+    const KMemoryRegion* m_cached_physical_heap_region{};
 
-    Core::System& system;
+    KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
+    KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
+
+    std::unique_ptr<Common::PageTable> m_page_table_impl;
+
+    Core::System& m_system;
 };
 
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index d3e99665f8..8c3495e5a5 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -72,7 +72,8 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
 
     process->name = std::move(process_name);
     process->resource_limit = res_limit;
-    process->status = ProcessStatus::Created;
+    process->system_resource_address = 0;
+    process->state = State::Created;
     process->program_id = 0;
     process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
                                                               : kernel.CreateNewUserProcessID();
@@ -92,11 +93,12 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
     process->exception_thread = nullptr;
     process->is_suspended = false;
     process->schedule_count = 0;
+    process->is_handle_table_initialized = false;
 
     // Open a reference to the resource limit.
     process->resource_limit->Open();
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 void KProcess::DoWorkerTaskImpl() {
@@ -121,9 +123,9 @@ void KProcess::DecrementRunningThreadCount() {
     }
 }
 
-u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
+u64 KProcess::GetTotalPhysicalMemoryAvailable() {
     const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
-                       page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size +
+                       page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
                        main_thread_stack_size};
     if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
         capacity != pool_size) {
@@ -135,16 +137,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
     return memory_usage_capacity;
 }
 
-u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
+u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
     return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
 }
 
-u64 KProcess::GetTotalPhysicalMemoryUsed() const {
-    return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() +
+u64 KProcess::GetTotalPhysicalMemoryUsed() {
+    return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() +
            GetSystemResourceSize();
 }
 
-u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
+u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
     return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
 }
 
@@ -244,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
     shmem->Open();
     shemen_info->Open();
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
@@ -289,12 +291,12 @@ Result KProcess::Reset() {
     KScopedSchedulerLock sl{kernel};
 
     // Validate that we're in a state that we can reset.
-    R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
+    R_UNLESS(state != State::Terminated, ResultInvalidState);
     R_UNLESS(is_signaled, ResultInvalidState);
 
     // Clear signaled.
     is_signaled = false;
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KProcess::SetActivity(ProcessActivity activity) {
@@ -304,15 +306,13 @@ Result KProcess::SetActivity(ProcessActivity activity) {
     KScopedSchedulerLock sl{kernel};
 
     // Validate our state.
-    R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState);
-    R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
+    R_UNLESS(state != State::Terminating, ResultInvalidState);
+    R_UNLESS(state != State::Terminated, ResultInvalidState);
 
     // Either pause or resume.
     if (activity == ProcessActivity::Paused) {
         // Verify that we're not suspended.
-        if (is_suspended) {
-            return ResultInvalidState;
-        }
+        R_UNLESS(!is_suspended, ResultInvalidState);
 
         // Suspend all threads.
         for (auto* thread : GetThreadList()) {
@@ -325,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
         ASSERT(activity == ProcessActivity::Runnable);
 
         // Verify that we're suspended.
-        if (!is_suspended) {
-            return ResultInvalidState;
-        }
+        R_UNLESS(is_suspended, ResultInvalidState);
 
         // Resume all threads.
         for (auto* thread : GetThreadList()) {
@@ -338,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
         SetSuspended(false);
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
@@ -348,35 +346,38 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
     system_resource_size = metadata.GetSystemResourceSize();
     image_size = code_size;
 
+    // We currently do not support process-specific system resource
+    UNIMPLEMENTED_IF(system_resource_size != 0);
+
     KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
                                                   code_size + system_resource_size);
     if (!memory_reservation.Succeeded()) {
         LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
                   code_size + system_resource_size);
-        return ResultLimitReached;
+        R_RETURN(ResultLimitReached);
     }
     // Initialize proces address space
-    if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false,
-                                                             0x8000000, code_size,
-                                                             KMemoryManager::Pool::Application)};
+    if (const Result result{page_table.InitializeForProcess(
+            metadata.GetAddressSpaceType(), false, 0x8000000, code_size,
+            &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)};
         result.IsError()) {
-        return result;
+        R_RETURN(result);
     }
 
     // Map process code region
-    if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
-                                                       code_size / PageSize, KMemoryState::Code,
-                                                       KMemoryPermission::None)};
+    if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(),
+                                                      code_size / PageSize, KMemoryState::Code,
+                                                      KMemoryPermission::None)};
         result.IsError()) {
-        return result;
+        R_RETURN(result);
     }
 
     // Initialize process capabilities
     const auto& caps{metadata.GetKernelCapabilities()};
     if (const Result result{
-            capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
+            capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)};
         result.IsError()) {
-        return result;
+        R_RETURN(result);
     }
 
     // Set memory usage capacity
@@ -384,12 +385,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
     case FileSys::ProgramAddressSpaceType::Is32Bit:
     case FileSys::ProgramAddressSpaceType::Is36Bit:
     case FileSys::ProgramAddressSpaceType::Is39Bit:
-        memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart();
+        memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart();
         break;
 
     case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
-        memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() +
-                                page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart();
+        memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() +
+                                page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart();
         break;
 
     default:
@@ -397,10 +398,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
     }
 
     // Create TLS region
-    R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address)));
+    R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address)));
     memory_reservation.Commit();
 
-    return handle_table.Initialize(capabilities.GetHandleTableSize());
+    R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize()));
 }
 
 void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
@@ -409,15 +410,15 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
     resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
 
     const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
-    ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError());
+    ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
 
-    ChangeStatus(ProcessStatus::Running);
+    ChangeState(State::Running);
 
     SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
 }
 
 void KProcess::PrepareForTermination() {
-    ChangeStatus(ProcessStatus::Exiting);
+    ChangeState(State::Terminating);
 
     const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
         for (auto* thread : in_thread_list) {
@@ -437,15 +438,15 @@ void KProcess::PrepareForTermination() {
 
     stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
 
-    this->DeleteThreadLocalRegion(tls_region_address);
-    tls_region_address = 0;
+    this->DeleteThreadLocalRegion(plr_address);
+    plr_address = 0;
 
     if (resource_limit) {
         resource_limit->Release(LimitableResource::PhysicalMemory,
                                 main_thread_stack_size + image_size);
     }
 
-    ChangeStatus(ProcessStatus::Exited);
+    ChangeState(State::Terminated);
 }
 
 void KProcess::Finalize() {
@@ -474,7 +475,7 @@ void KProcess::Finalize() {
     }
 
     // Finalize the page table.
-    page_table.reset();
+    page_table.Finalize();
 
     // Perform inherited finalization.
     KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
@@ -499,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
             }
 
             *out = tlr;
-            return ResultSuccess;
+            R_SUCCEED();
         }
     }
 
@@ -528,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
     // We succeeded!
     tlp_guard.Cancel();
     *out = tlr;
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
@@ -576,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
         KThreadLocalPage::Free(kernel, page_to_free);
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
@@ -628,7 +629,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
 void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
     const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
                                       Svc::MemoryPermission permission) {
-        page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
+        page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
     };
 
     kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
@@ -645,19 +646,18 @@ bool KProcess::IsSignaled() const {
 }
 
 KProcess::KProcess(KernelCore& kernel_)
-    : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>(
-                                                        kernel_.System())},
+    : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()},
       handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
       state_lock{kernel_}, list_lock{kernel_} {}
 
 KProcess::~KProcess() = default;
 
-void KProcess::ChangeStatus(ProcessStatus new_status) {
-    if (status == new_status) {
+void KProcess::ChangeState(State new_state) {
+    if (state == new_state) {
         return;
     }
 
-    status = new_status;
+    state = new_state;
     is_signaled = true;
     NotifyAvailable();
 }
@@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
     // The kernel always ensures that the given stack size is page aligned.
     main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
 
-    const VAddr start{page_table->GetStackRegionStart()};
-    const std::size_t size{page_table->GetStackRegionEnd() - start};
+    const VAddr start{page_table.GetStackRegionStart()};
+    const std::size_t size{page_table.GetStackRegionEnd() - start};
 
     CASCADE_RESULT(main_thread_stack_top,
-                   page_table->AllocateAndMapMemory(
+                   page_table.AllocateAndMapMemory(
                        main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
                        KMemoryState::Stack, KMemoryPermission::UserReadWrite));
 
     main_thread_stack_top += main_thread_stack_size;
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index d56d73bab1..2e0cc3d0bc 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -13,6 +13,7 @@
 #include "core/hle/kernel/k_auto_object.h"
 #include "core/hle/kernel/k_condition_variable.h"
 #include "core/hle/kernel/k_handle_table.h"
+#include "core/hle/kernel/k_page_table.h"
 #include "core/hle/kernel/k_synchronization_object.h"
 #include "core/hle/kernel/k_thread_local_page.h"
 #include "core/hle/kernel/k_worker_task.h"
@@ -31,7 +32,6 @@ class ProgramMetadata;
 namespace Kernel {
 
 class KernelCore;
-class KPageTable;
 class KResourceLimit;
 class KThread;
 class KSharedMemoryInfo;
@@ -45,24 +45,6 @@ enum class MemoryRegion : u16 {
     BASE = 3,
 };
 
-/**
- * Indicates the status of a Process instance.
- *
- * @note These match the values as used by kernel,
- *       so new entries should only be added if RE
- *       shows that a new value has been introduced.
- */
-enum class ProcessStatus {
-    Created,
-    CreatedWithDebuggerAttached,
-    Running,
-    WaitingForDebuggerToAttach,
-    DebuggerAttached,
-    Exiting,
-    Exited,
-    DebugBreak,
-};
-
 enum class ProcessActivity : u32 {
     Runnable,
     Paused,
@@ -89,6 +71,17 @@ public:
     explicit KProcess(KernelCore& kernel_);
     ~KProcess() override;
 
+    enum class State {
+        Created = static_cast<u32>(Svc::ProcessState::Created),
+        CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
+        Running = static_cast<u32>(Svc::ProcessState::Running),
+        Crashed = static_cast<u32>(Svc::ProcessState::Crashed),
+        RunningAttached = static_cast<u32>(Svc::ProcessState::RunningAttached),
+        Terminating = static_cast<u32>(Svc::ProcessState::Terminating),
+        Terminated = static_cast<u32>(Svc::ProcessState::Terminated),
+        DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
+    };
+
     enum : u64 {
         /// Lowest allowed process ID for a kernel initial process.
         InitialKIPIDMin = 1,
@@ -114,12 +107,12 @@ public:
 
     /// Gets a reference to the process' page table.
     KPageTable& PageTable() {
-        return *page_table;
+        return page_table;
     }
 
     /// Gets const a reference to the process' page table.
     const KPageTable& PageTable() const {
-        return *page_table;
+        return page_table;
     }
 
     /// Gets a reference to the process' handle table.
@@ -145,26 +138,25 @@ public:
     }
 
     Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
-        return condition_var.Wait(address, cv_key, tag, ns);
+        R_RETURN(condition_var.Wait(address, cv_key, tag, ns));
     }
 
     Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
-        return address_arbiter.SignalToAddress(address, signal_type, value, count);
+        R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count));
     }
 
     Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
                               s64 timeout) {
-        return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
+        R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout));
     }
 
-    /// Gets the address to the process' dedicated TLS region.
-    VAddr GetTLSRegionAddress() const {
-        return tls_region_address;
+    VAddr GetProcessLocalRegionAddress() const {
+        return plr_address;
     }
 
     /// Gets the current status of the process
-    ProcessStatus GetStatus() const {
-        return status;
+    State GetState() const {
+        return state;
     }
 
     /// Gets the unique ID that identifies this particular process.
@@ -286,18 +278,18 @@ public:
     }
 
     /// Retrieves the total physical memory available to this process in bytes.
-    u64 GetTotalPhysicalMemoryAvailable() const;
+    u64 GetTotalPhysicalMemoryAvailable();
 
     /// Retrieves the total physical memory available to this process in bytes,
     /// without the size of the personal system resource heap added to it.
-    u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const;
+    u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
 
     /// Retrieves the total physical memory used by this process in bytes.
-    u64 GetTotalPhysicalMemoryUsed() const;
+    u64 GetTotalPhysicalMemoryUsed();
 
     /// Retrieves the total physical memory used by this process in bytes,
     /// without the size of the personal system resource heap added to it.
-    u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
+    u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
 
     /// Gets the list of all threads created with this process as their owner.
     std::list<KThread*>& GetThreadList() {
@@ -415,19 +407,24 @@ private:
         pinned_threads[core_id] = nullptr;
     }
 
-    /// Changes the process status. If the status is different
-    /// from the current process status, then this will trigger
-    /// a process signal.
-    void ChangeStatus(ProcessStatus new_status);
+    void FinalizeHandleTable() {
+        // Finalize the table.
+        handle_table.Finalize();
+
+        // Note that the table is finalized.
+        is_handle_table_initialized = false;
+    }
+
+    void ChangeState(State new_state);
 
     /// Allocates the main thread stack for the process, given the stack size in bytes.
     Result AllocateMainThreadStack(std::size_t stack_size);
 
     /// Memory manager for this process
-    std::unique_ptr<KPageTable> page_table;
+    KPageTable page_table;
 
     /// Current status of the process
-    ProcessStatus status{};
+    State state{};
 
     /// The ID of this process
     u64 process_id = 0;
@@ -443,6 +440,8 @@ private:
     /// Resource limit descriptor for this process
     KResourceLimit* resource_limit{};
 
+    VAddr system_resource_address{};
+
     /// The ideal CPU core for this process, threads are scheduled on this core by default.
     u8 ideal_core = 0;
 
@@ -469,7 +468,7 @@ private:
     KConditionVariable condition_var;
 
     /// Address indicating the location of the process' dedicated TLS region.
-    VAddr tls_region_address = 0;
+    VAddr plr_address = 0;
 
     /// Random values for svcGetInfo RandomEntropy
     std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
@@ -495,8 +494,12 @@ private:
     /// Schedule count of this process
     s64 schedule_count{};
 
+    size_t memory_release_hint{};
+
     bool is_signaled{};
     bool is_suspended{};
+    bool is_immortal{};
+    bool is_handle_table_initialized{};
     bool is_initialized{};
 
     std::atomic<u16> num_running_threads{};
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 8ff1545b6c..a039cc591c 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
     is_initialized = true;
 
     // Clear all pages in the memory.
-    std::memset(device_memory_.GetPointer(physical_address_), 0, size_);
+    std::memset(device_memory_.GetPointer<void>(physical_address_), 0, size_);
 
     return ResultSuccess;
 }
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index 34cb984564..5620c3660a 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -54,7 +54,7 @@ public:
      * @return A pointer to the shared memory block from the specified offset
      */
     u8* GetPointer(std::size_t offset = 0) {
-        return device_memory->GetPointer(physical_address + offset);
+        return device_memory->GetPointer<u8>(physical_address + offset);
     }
 
     /**
@@ -63,7 +63,7 @@ public:
      * @return A pointer to the shared memory block from the specified offset
      */
     const u8* GetPointer(std::size_t offset = 0) const {
-        return device_memory->GetPointer(physical_address + offset);
+        return device_memory->GetPointer<u8>(physical_address + offset);
     }
 
     void Finalize() override;
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 174afc80d7..b7bfcdce31 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -30,6 +30,7 @@
 #include "core/hle/kernel/k_worker_task_manager.h"
 #include "core/hle/kernel/kernel.h"
 #include "core/hle/kernel/svc_results.h"
+#include "core/hle/kernel/svc_types.h"
 #include "core/hle/result.h"
 #include "core/memory.h"
 
@@ -38,6 +39,9 @@
 #endif
 
 namespace {
+
+constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1;
+
 static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
                                  u32 entry_point, u32 arg) {
     context = {};
@@ -241,7 +245,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
         }
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
@@ -254,7 +258,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_
     thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
     thread->is_single_core = !Settings::values.use_multi_core.GetValue();
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KThread::InitializeDummyThread(KThread* thread) {
@@ -264,31 +268,32 @@ Result KThread::InitializeDummyThread(KThread* thread) {
     // Initialize emulation parameters.
     thread->stack_parameters.disable_count = 0;
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
-    return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
-                            system.GetCpuManager().GetGuestActivateFunc());
+    R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
+                              ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc()));
 }
 
 Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
-    return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
-                            system.GetCpuManager().GetIdleThreadStartFunc());
+    R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
+                              ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc()));
 }
 
 Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
                                              KThreadFunction func, uintptr_t arg, s32 virt_core) {
-    return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
-                            system.GetCpuManager().GetShutdownThreadStartFunc());
+    R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr,
+                              ThreadType::HighPriority,
+                              system.GetCpuManager().GetShutdownThreadStartFunc()));
 }
 
 Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
                                      uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
                                      KProcess* owner) {
     system.Kernel().GlobalSchedulerContext().AddThread(thread);
-    return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
-                            ThreadType::User, system.GetCpuManager().GetGuestThreadFunc());
+    R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
+                              ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
 }
 
 void KThread::PostDestroy(uintptr_t arg) {
@@ -538,7 +543,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
     *out_ideal_core = virtual_ideal_core_id;
     *out_affinity_mask = virtual_affinity_mask;
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
@@ -554,7 +559,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask)
         *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
@@ -666,7 +671,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
         } while (retry_update);
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 void KThread::SetBasePriority(s32 value) {
@@ -839,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
         } while (thread_is_current);
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KThread::GetThreadContext3(std::vector<u8>& out) {
@@ -874,7 +879,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
         }
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 void KThread::AddWaiterImpl(KThread* thread) {
@@ -1038,7 +1043,7 @@ Result KThread::Run() {
         // Set our state and finish.
         SetState(ThreadState::Runnable);
 
-        return ResultSuccess;
+        R_SUCCEED();
     }
 }
 
@@ -1073,6 +1078,78 @@ void KThread::Exit() {
     UNREACHABLE_MSG("KThread::Exit() would return");
 }
 
+Result KThread::Terminate() {
+    ASSERT(this != GetCurrentThreadPointer(kernel));
+
+    // Request the thread terminate if it hasn't already.
+    if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
+        // If the thread isn't terminated, wait for it to terminate.
+        s32 index;
+        KSynchronizationObject* objects[] = {this};
+        R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1,
+                                           Svc::WaitInfinite));
+    }
+
+    R_SUCCEED();
+}
+
+ThreadState KThread::RequestTerminate() {
+    ASSERT(this != GetCurrentThreadPointer(kernel));
+
+    KScopedSchedulerLock sl{kernel};
+
+    // Determine if this is the first termination request.
+    const bool first_request = [&]() -> bool {
+        // Perform an atomic compare-and-swap from false to true.
+        bool expected = false;
+        return termination_requested.compare_exchange_strong(expected, true);
+    }();
+
+    // If this is the first request, start termination procedure.
+    if (first_request) {
+        // If the thread is in initialized state, just change state to terminated.
+        if (this->GetState() == ThreadState::Initialized) {
+            thread_state = ThreadState::Terminated;
+            return ThreadState::Terminated;
+        }
+
+        // Register the terminating dpc.
+        this->RegisterDpc(DpcFlag::Terminating);
+
+        // If the thread is pinned, unpin it.
+        if (this->GetStackParameters().is_pinned) {
+            this->GetOwnerProcess()->UnpinThread(this);
+        }
+
+        // If the thread is suspended, continue it.
+        if (this->IsSuspended()) {
+            suspend_allowed_flags = 0;
+            this->UpdateState();
+        }
+
+        // Change the thread's priority to be higher than any system thread's.
+        if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) {
+            this->SetBasePriority(TerminatingThreadPriority);
+        }
+
+        // If the thread is runnable, send a termination interrupt to other cores.
+        if (this->GetState() == ThreadState::Runnable) {
+            if (const u64 core_mask =
+                    physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel));
+                core_mask != 0) {
+                Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask);
+            }
+        }
+
+        // Wake up the thread.
+        if (this->GetState() == ThreadState::Waiting) {
+            wait_queue->CancelWait(this, ResultTerminationRequested, true);
+        }
+    }
+
+    return this->GetState();
+}
+
 Result KThread::Sleep(s64 timeout) {
     ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
     ASSERT(this == GetCurrentThreadPointer(kernel));
@@ -1086,7 +1163,7 @@ Result KThread::Sleep(s64 timeout) {
         // Check if the thread should terminate.
         if (this->IsTerminationRequested()) {
             slp.CancelSleep();
-            return ResultTerminationRequested;
+            R_THROW(ResultTerminationRequested);
         }
 
         // Wait for the sleep to end.
@@ -1094,7 +1171,7 @@ Result KThread::Sleep(s64 timeout) {
         SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
     }
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 void KThread::IfDummyThreadTryWait() {
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 9ee20208eb..e2a27d6036 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -180,6 +180,10 @@ public:
 
     void Exit();
 
+    Result Terminate();
+
+    ThreadState RequestTerminate();
+
     [[nodiscard]] u32 GetSuspendFlags() const {
         return suspend_allowed_flags & suspend_request_flags;
     }
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 9251f29ad7..eed2dc9f3e 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -24,6 +24,7 @@
 #include "core/hardware_properties.h"
 #include "core/hle/kernel/init/init_slab_setup.h"
 #include "core/hle/kernel/k_client_port.h"
+#include "core/hle/kernel/k_dynamic_resource_manager.h"
 #include "core/hle/kernel/k_handle_table.h"
 #include "core/hle/kernel/k_memory_layout.h"
 #include "core/hle/kernel/k_memory_manager.h"
@@ -73,8 +74,16 @@ struct KernelCore::Impl {
         InitializeMemoryLayout();
         Init::InitializeKPageBufferSlabHeap(system);
         InitializeShutdownThreads();
-        InitializePreemption(kernel);
         InitializePhysicalCores();
+        InitializePreemption(kernel);
+
+        // Initialize the Dynamic Slab Heaps.
+        {
+            const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion();
+            ASSERT(pt_heap_region.GetEndAddress() != 0);
+
+            InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize());
+        }
 
         RegisterHostThread();
     }
@@ -86,6 +95,15 @@ struct KernelCore::Impl {
         }
     }
 
+    void CloseCurrentProcess() {
+        (*current_process).Finalize();
+        // current_process->Close();
+        // TODO: The current process should be destroyed based on accurate ref counting after
+        // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
+        (*current_process).Destroy();
+        current_process = nullptr;
+    }
+
     void Shutdown() {
         is_shutting_down.store(true, std::memory_order_relaxed);
         SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
@@ -99,10 +117,6 @@ struct KernelCore::Impl {
         next_user_process_id = KProcess::ProcessIDMin;
         next_thread_id = 1;
 
-        for (auto& core : cores) {
-            core = nullptr;
-        }
-
         global_handle_table->Finalize();
         global_handle_table.reset();
 
@@ -152,15 +166,7 @@ struct KernelCore::Impl {
             }
         }
 
-        // Shutdown all processes.
-        if (current_process) {
-            (*current_process).Finalize();
-            // current_process->Close();
-            // TODO: The current process should be destroyed based on accurate ref counting after
-            // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
-            (*current_process).Destroy();
-            current_process = nullptr;
-        }
+        CloseCurrentProcess();
 
         // Track kernel objects that were not freed on shutdown
         {
@@ -257,6 +263,18 @@ struct KernelCore::Impl {
         system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
     }
 
+    void InitializeResourceManagers(VAddr address, size_t size) {
+        dynamic_page_manager = std::make_unique<KDynamicPageManager>();
+        memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
+        app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
+
+        dynamic_page_manager->Initialize(address, size);
+        static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
+        memory_block_heap->Initialize(dynamic_page_manager.get(),
+                                      ApplicationMemoryBlockSlabHeapSize);
+        app_memory_block_manager->Initialize(nullptr, memory_block_heap.get());
+    }
+
     void InitializeShutdownThreads() {
         for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
             shutdown_threads[core_id] = KThread::Create(system.Kernel());
@@ -344,11 +362,6 @@ struct KernelCore::Impl {
     static inline thread_local KThread* current_thread{nullptr};
 
     KThread* GetCurrentEmuThread() {
-        // If we are shutting down the kernel, none of this is relevant anymore.
-        if (IsShuttingDown()) {
-            return {};
-        }
-
         const auto thread_id = GetCurrentHostThreadID();
         if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
             return GetHostDummyThread();
@@ -770,6 +783,11 @@ struct KernelCore::Impl {
     // Kernel memory management
     std::unique_ptr<KMemoryManager> memory_manager;
 
+    // Dynamic slab managers
+    std::unique_ptr<KDynamicPageManager> dynamic_page_manager;
+    std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap;
+    std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager;
+
     // Shared memory for services
     Kernel::KSharedMemory* hid_shared_mem{};
     Kernel::KSharedMemory* font_shared_mem{};
@@ -853,6 +871,10 @@ const KProcess* KernelCore::CurrentProcess() const {
     return impl->current_process;
 }
 
+void KernelCore::CloseCurrentProcess() {
+    impl->CloseCurrentProcess();
+}
+
 const std::vector<KProcess*>& KernelCore::GetProcessList() const {
     return impl->process_list;
 }
@@ -1041,6 +1063,14 @@ const KMemoryManager& KernelCore::MemoryManager() const {
     return *impl->memory_manager;
 }
 
+KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() {
+    return *impl->app_memory_block_manager;
+}
+
+const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const {
+    return *impl->app_memory_block_manager;
+}
+
 Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
     return *impl->hid_shared_mem;
 }
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 0847cbcbf7..6eded95393 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -37,6 +37,7 @@ class KClientSession;
 class KEvent;
 class KHandleTable;
 class KLinkedListNode;
+class KMemoryBlockSlabManager;
 class KMemoryLayout;
 class KMemoryManager;
 class KPageBuffer;
@@ -130,6 +131,9 @@ public:
     /// Retrieves a const pointer to the current process.
     const KProcess* CurrentProcess() const;
 
+    /// Closes the current process.
+    void CloseCurrentProcess();
+
     /// Retrieves the list of processes.
     const std::vector<KProcess*>& GetProcessList() const;
 
@@ -238,6 +242,12 @@ public:
     /// Gets the virtual memory manager for the kernel.
     const KMemoryManager& MemoryManager() const;
 
+    /// Gets the application memory block manager for the kernel.
+    KMemoryBlockSlabManager& GetApplicationMemoryBlockManager();
+
+    /// Gets the application memory block manager for the kernel.
+    const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const;
+
     /// Gets the shared memory object for HID services.
     Kernel::KSharedMemory& GetHidSharedMem();
 
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 1d145ea91e..b07ae3f027 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -933,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han
             return ResultSuccess;
 
         case GetInfoType::UserExceptionContextAddr:
-            *result = process->GetTLSRegionAddress();
+            *result = process->GetProcessLocalRegionAddress();
             return ResultSuccess;
 
         case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
@@ -1888,7 +1888,7 @@ static void ExitProcess(Core::System& system) {
     auto* current_process = system.Kernel().CurrentProcess();
 
     LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
-    ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
+    ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
                "Process has already exited");
 
     system.Exit();
@@ -2557,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand
         return ResultInvalidEnumValue;
     }
 
-    *out = static_cast<u64>(process->GetStatus());
+    *out = static_cast<u64>(process->GetState());
     return ResultSuccess;
 }
 
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h
index 95750c3ebe..85506710ef 100644
--- a/src/core/hle/kernel/svc_common.h
+++ b/src/core/hle/kernel/svc_common.h
@@ -14,8 +14,11 @@ namespace Kernel::Svc {
 
 using namespace Common::Literals;
 
-constexpr s32 ArgumentHandleCountMax = 0x40;
-constexpr u32 HandleWaitMask{1u << 30};
+constexpr inline s32 ArgumentHandleCountMax = 0x40;
+
+constexpr inline u32 HandleWaitMask = 1u << 30;
+
+constexpr inline s64 WaitInfinite = -1;
 
 constexpr inline std::size_t HeapSizeAlignment = 2_MiB;
 
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 79e15183aa..abb9847fe8 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -95,6 +95,19 @@ constexpr inline s32 IdealCoreNoUpdate = -3;
 constexpr inline s32 LowestThreadPriority = 63;
 constexpr inline s32 HighestThreadPriority = 0;
 
+constexpr inline s32 SystemThreadPriorityHighest = 16;
+
+enum class ProcessState : u32 {
+    Created = 0,
+    CreatedAttached = 1,
+    Running = 2,
+    Crashed = 3,
+    RunningAttached = 4,
+    Terminating = 5,
+    Terminated = 6,
+    DebugBreak = 7,
+};
+
 constexpr inline size_t ThreadLocalRegionSize = 0x200;
 
 } // namespace Kernel::Svc
diff --git a/src/core/hle/result.h b/src/core/hle/result.h
index d67e68bae3..ef4b2d4173 100644
--- a/src/core/hle/result.h
+++ b/src/core/hle/result.h
@@ -135,6 +135,14 @@ union Result {
     [[nodiscard]] constexpr bool IsFailure() const {
         return !IsSuccess();
     }
+
+    [[nodiscard]] constexpr u32 GetInnerValue() const {
+        return static_cast<u32>(module.Value()) | (description << module.bits);
+    }
+
+    [[nodiscard]] constexpr bool Includes(Result result) const {
+        return GetInnerValue() == result.GetInnerValue();
+    }
 };
 static_assert(std::is_trivial_v<Result>);
 
@@ -462,9 +470,6 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
 #define R_UNLESS(expr, res)                                                                        \
     {                                                                                              \
         if (!(expr)) {                                                                             \
-            if (res.IsError()) {                                                                   \
-                LOG_ERROR(Kernel, "Failed with result: {}", res.raw);                              \
-            }                                                                                      \
             R_THROW(res);                                                                          \
         }                                                                                          \
     }
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index becd6d1b9f..652441bc29 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -290,7 +290,7 @@ public:
         const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
         const auto start_info{page_table.QueryInfo(start - 1)};
 
-        if (start_info.state != Kernel::KMemoryState::Free) {
+        if (start_info.GetState() != Kernel::KMemoryState::Free) {
             return {};
         }
 
@@ -300,7 +300,7 @@ public:
 
         const auto end_info{page_table.QueryInfo(start + size)};
 
-        if (end_info.state != Kernel::KMemoryState::Free) {
+        if (end_info.GetState() != Kernel::KMemoryState::Free) {
             return {};
         }
 
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index ddf273b5ec..b606790217 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -128,7 +128,8 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
     }
     ASSERT(system.CurrentProcess()
                ->PageTable()
-               .LockForDeviceAddressSpace(handle_description->address, handle_description->size)
+               .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size,
+                                             Kernel::KMemoryPermission::None, true)
                .IsSuccess());
     std::memcpy(output.data(), &params, sizeof(params));
     return result;
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 2ac792566e..9637cb5b1e 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -65,7 +65,7 @@ struct Memory::Impl {
             return {};
         }
 
-        return system.DeviceMemory().GetPointer(paddr) + vaddr;
+        return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
     }
 
     [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
@@ -75,7 +75,7 @@ struct Memory::Impl {
             return {};
         }
 
-        return system.DeviceMemory().GetPointer(paddr) + vaddr;
+        return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
     }
 
     u8 Read8(const VAddr addr) {
@@ -499,7 +499,7 @@ struct Memory::Impl {
         } else {
             while (base != end) {
                 page_table.pointers[base].Store(
-                    system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type);
+                    system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type);
                 page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
 
                 ASSERT_MSG(page_table.pointers[base].Pointer(),
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp
index 7c432a63c5..284b2ae66f 100644
--- a/src/tests/core/core_timing.cpp
+++ b/src/tests/core/core_timing.cpp
@@ -40,9 +40,6 @@ struct ScopeInit final {
         core_timing.SetMulticore(true);
         core_timing.Initialize([]() {});
     }
-    ~ScopeInit() {
-        core_timing.Shutdown();
-    }
 
     Core::Timing::CoreTiming core_timing;
 };
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 7cb02631c4..4b15c0f85b 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -59,10 +59,11 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
         std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
             return query_pool == *pool;
         });
-    ASSERT(it != std::end(pools));
 
-    const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
-    usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
+    if (it != std::end(pools)) {
+        const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
+        usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
+    }
 }
 
 QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp
index 24251247d2..6acfb7b06c 100644
--- a/src/yuzu/bootmanager.cpp
+++ b/src/yuzu/bootmanager.cpp
@@ -120,8 +120,8 @@ void EmuThread::run() {
         }
     }
 
-    // Shutdown the core emulation
-    system.Shutdown();
+    // Shutdown the main emulated process
+    system.ShutdownMainProcess();
 
 #if MICROPROFILE_ENABLED
     MicroProfileOnThreadExit();
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index a94624be63..501c342551 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -294,6 +294,7 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan
 #ifdef __linux__
     SetupSigInterrupts();
 #endif
+    system->Initialize();
 
     Common::Log::Initialize();
     LoadTranslation();
diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp
index 3a0f33cba1..e16f79eb40 100644
--- a/src/yuzu_cmd/yuzu.cpp
+++ b/src/yuzu_cmd/yuzu.cpp
@@ -302,6 +302,8 @@ int main(int argc, char** argv) {
     }
 
     Core::System system{};
+    system.Initialize();
+
     InputCommon::InputSubsystem input_subsystem{};
 
     // Apply the command line arguments
@@ -392,7 +394,7 @@ int main(int argc, char** argv) {
     }
     system.DetachDebugger();
     void(system.Pause());
-    system.Shutdown();
+    system.ShutdownMainProcess();
 
     detached_tasks.WaitForAllTasks();
     return 0;