diff --git a/libraries/config/board/nintendo/switch/board.mk b/libraries/config/board/nintendo/nx/board.mk similarity index 60% rename from libraries/config/board/nintendo/switch/board.mk rename to libraries/config/board/nintendo/nx/board.mk index facdf9632..0a7acfca7 100644 --- a/libraries/config/board/nintendo/switch/board.mk +++ b/libraries/config/board/nintendo/nx/board.mk @@ -1,4 +1,4 @@ -export ATMOSPHERE_DEFINES += -DATMOSPHERE_BOARD_NINTENDO_SWITCH -D__SWITCH__ +export ATMOSPHERE_DEFINES += -DATMOSPHERE_BOARD_NINTENDO_NX -D__SWITCH__ export ATMOSPHERE_SETTINGS += export ATMOSPHERE_CFLAGS += export ATMOSPHERE_CXXFLAGS += diff --git a/libraries/config/common.mk b/libraries/config/common.mk index 937e10d2c..b05082292 100644 --- a/libraries/config/common.mk +++ b/libraries/config/common.mk @@ -24,11 +24,11 @@ export ATMOSPHERE_ASFLAGS := ifeq ($(ATMOSPHERE_BOARD),nx-hac-001) export ATMOSPHERE_ARCH_DIR := arch/arm64 -export ATMOSPHERE_BOARD_DIR := board/nintendo/switch +export ATMOSPHERE_BOARD_DIR := board/nintendo/nx export ATMOSPHERE_OS_DIR := os/horizon export ATMOSPHERE_ARCH_NAME := arm64 -export ATMOSPHERE_BOARD_NAME := nintendo_switch +export ATMOSPHERE_BOARD_NAME := nintendo_nx export ATMOSPHERE_OS_NAME := horizon endif @@ -76,14 +76,22 @@ TARGET := $(notdir $(CURDIR)) BUILD := build DATA := data INCLUDES := include -SOURCES ?= $(foreach d,$(filter-out source/arch source/board,$(wildcard source)),$(call DIR_WILDCARD,$d) $d) +SOURCES ?= source $(foreach d,$(filter-out source/arch source/board source,$(wildcard source/*)),$(if $(wildcard $d/.),$(call DIR_WILDCARD,$d) $d,)) ifneq ($(strip $(wildcard source/$(ATMOSPHERE_ARCH_DIR)/.*)),) -SOURCES += $(call DIR_WILDCARD,source/$(ATMOSPHERE_ARCH_DIR)) +SOURCES += source/$(ATMOSPHERE_ARCH_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_ARCH_DIR)) endif ifneq ($(strip $(wildcard source/$(ATMOSPHERE_BOARD_DIR)/.*)),) -SOURCES += $(call DIR_WILDCARD,source/$(ATMOSPHERE_BOARD_DIR)) +SOURCES += source/$(ATMOSPHERE_BOARD_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_BOARD_DIR)) endif ifneq ($(strip $(wildcard source/$(ATMOSPHERE_OS_DIR)/.*)),) -SOURCES += $(call DIR_WILDCARD,source/$(ATMOSPHERE_OS_DIR)) +SOURCES += source/$(ATMOSPHERE_OS_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_OS_DIR)) endif + +#--------------------------------------------------------------------------------- +# Rules for compiling pre-compiled headers +#--------------------------------------------------------------------------------- +%.gch: %.hpp + @echo $< + $(CXX) -w -x c++-header -MMD -MP -MF $(DEPSDIR)/$*.d $(CXXFLAGS) -c $< -o $@ $(ERROR_FILTER) + @cp $@ $(<).gch diff --git a/libraries/config/templates/mesosphere.mk b/libraries/config/templates/mesosphere.mk index 39301c66a..a8a36c9e6 100644 --- a/libraries/config/templates/mesosphere.mk +++ b/libraries/config/templates/mesosphere.mk @@ -7,12 +7,12 @@ include $(dir $(abspath $(lastword $(MAKEFILE_LIST))))/../common.mk # options for code generation #--------------------------------------------------------------------------------- export DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE -export SETTINGS := $(ATMOSPHERE_SETTINGS) -O2 -mgeneral-regs-only -ffixed-x18 -Werror +export SETTINGS := $(ATMOSPHERE_SETTINGS) -O2 -mgeneral-regs-only -ffixed-x18 -Werror -fno-non-call-exceptions export CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE) -export CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) +export CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit export ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES) -export LDFLAGS = -specs=$(TOPDIR)/kernel_ldr.specs -nostdlib -nostartfiles -g $(SETTINGS) -Wl,-Map,$(notdir $*.map) +export LDFLAGS = -specs=$(TOPDIR)/$(notdir $(TOPDIR)).specs -nostdlib -nostartfiles -g $(SETTINGS) -Wl,-Map,$(notdir $*.map) -Wl,-z,relro,-z,now export CXXWRAPS := -Wl,--wrap,__cxa_pure_virtual \ -Wl,--wrap,__cxa_throw \ @@ -25,7 +25,6 @@ export CXXWRAPS := -Wl,--wrap,__cxa_pure_virtual \ -Wl,--wrap,__cxa_call_terminate \ -Wl,--wrap,__gxx_personality_v0 \ -Wl,--wrap,_Unwind_Resume \ - -Wl,--wrap,_Unwind_Resume \ -Wl,--wrap,_ZSt19__throw_logic_errorPKc \ -Wl,--wrap,_ZSt20__throw_length_errorPKc \ -Wl,--wrap,_ZNSt11logic_errorC2EPKc diff --git a/libraries/libmesosphere/Makefile b/libraries/libmesosphere/Makefile index e3047967b..be6fb94d6 100644 --- a/libraries/libmesosphere/Makefile +++ b/libraries/libmesosphere/Makefile @@ -6,10 +6,12 @@ include $(dir $(abspath $(lastword $(MAKEFILE_LIST))))/../config/common.mk #--------------------------------------------------------------------------------- # options for code generation #--------------------------------------------------------------------------------- +PRECOMPILED_HEADERS := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))/include/mesosphere.hpp + DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE -SETTINGS := $(ATMOSPHERE_SETTINGS) -O2 -mgeneral-regs-only -ffixed-x18 -Werror +SETTINGS := $(ATMOSPHERE_SETTINGS) -O2 -mgeneral-regs-only -ffixed-x18 -Werror -fno-non-call-exceptions CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE) -CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -flto +CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit -flto ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) LIBS := @@ -27,7 +29,7 @@ LIBDIRS := $(ATMOSPHERE_LIBRARIES_DIR)/libvapours ifneq ($(BUILD),$(notdir $(CURDIR))) #--------------------------------------------------------------------------------- -export VPATH := $(foreach dir,$(SOURCES),$(CURDIR)/$(dir)) \ +export VPATH := $(foreach dir,$(SOURCES),$(CURDIR)/$(dir)) $(CURDIR)/include \ $(foreach dir,$(DATA),$(CURDIR)/$(dir)) CFILES := $(foreach dir,$(SOURCES),$(filter-out $(notdir $(wildcard $(dir)/*.arch.*.c)) $(notdir $(wildcard $(dir)/*.board.*.c)) $(notdir $(wildcard $(dir)/*.os.*.c)), \ @@ -64,6 +66,7 @@ endif export OFILES_BIN := $(addsuffix .o,$(BINFILES)) export OFILES_SRC := $(CPPFILES:.cpp=.o) $(CFILES:.c=.o) $(SFILES:.s=.o) +export GCH_FILES := $(foreach hdr,$(PRECOMPILED_HEADERS:.hpp=.gch),$(notdir $(hdr))) export OFILES := $(OFILES_BIN) $(OFILES_SRC) export HFILES_BIN := $(addsuffix .h,$(subst .,_,$(BINFILES))) @@ -105,13 +108,15 @@ clean: #--------------------------------------------------------------------------------- else -DEPENDS := $(OFILES:.o=.d) +DEPENDS := $(OFILES:.o=.d) $(GCH_FILES:.gch=.d) #--------------------------------------------------------------------------------- # main targets #--------------------------------------------------------------------------------- $(OUTPUT) : $(OFILES) +$(filter-out kern_svc_tables.o, $(OFILES)) : $(GCH_FILES) + $(OFILES_SRC) : $(HFILES_BIN) #--------------------------------------------------------------------------------- diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp index f155cebc1..ab2d02bcd 100644 --- a/libraries/libmesosphere/include/mesosphere.hpp +++ b/libraries/libmesosphere/include/mesosphere.hpp @@ -19,20 +19,70 @@ #include /* First, pull in core macros (panic, etc). */ -#include "mesosphere/kern_panic.hpp" +#include +#include /* Primitive types. */ -#include "mesosphere/kern_k_typed_address.hpp" -#include "mesosphere/kern_initial_process.hpp" +#include +#include +#include /* Core pre-initialization includes. */ -#include "mesosphere/kern_select_cpu.hpp" +#include +#include +#include /* Initialization headers. */ -#include "mesosphere/init/kern_init_elf.hpp" -#include "mesosphere/init/kern_init_layout.hpp" -#include "mesosphere/init/kern_init_page_table_select.hpp" +#include +#include +#include +#include +#include +#include /* Core functionality. */ -#include "mesosphere/kern_select_interrupts.hpp" -#include "mesosphere/kern_select_k_system_control.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Miscellaneous objects. */ +#include +#include + +/* Auto Objects. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* More Miscellaneous objects. */ +#include +#include + +/* Supervisor Calls. */ +#include + +/* Main functionality. */ +#include diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp new file mode 100644 index 000000000..b0334cb14 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +namespace ams::kern::init { + + struct KInitArguments { + u64 ttbr0; + u64 ttbr1; + u64 tcr; + u64 mair; + u64 cpuactlr; + u64 cpuectlr; + u64 sctlr; + u64 sp; + u64 entrypoint; + u64 argument; + u64 setup_function; + }; + +} \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index 96d7296b5..bfa6528ce 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -15,173 +15,12 @@ */ #pragma once #include -#include +#include #include -#include "../kern_cpu.hpp" - -namespace ams::kern::init { - - constexpr size_t PageSize = 0x1000; - constexpr size_t L1BlockSize = 0x40000000; - constexpr size_t L2BlockSize = 0x200000; - constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize; - constexpr size_t L3BlockSize = 0x1000; - constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize; - - class PageTableEntry { - public: - enum Permission : u64 { - Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)), - Permission_KernelRX = ((0ul << 53) | (1ul << 54) | (2ul << 6)), - Permission_KernelR = ((1ul << 53) | (1ul << 54) | (2ul << 6)), - Permission_KernelRW = ((1ul << 53) | (1ul << 54) | (0ul << 6)), - - Permission_UserRX = ((1ul << 53) | (0ul << 54) | (3ul << 6)), - Permission_UserR = ((1ul << 53) | (1ul << 54) | (3ul << 6)), - Permission_UserRW = ((1ul << 53) | (1ul << 54) | (1ul << 6)), - }; - - enum Shareable : u64 { - Shareable_NonShareable = (0 << 8), - Shareable_OuterShareable = (2 << 8), - Shareable_InnerShareable = (3 << 8), - }; - - /* Official attributes are: */ - /* 0x00, 0x04, 0xFF, 0x44. 4-7 are unused. */ - enum PageAttribute : u64 { - PageAttribute_Device_nGnRnE = (0 << 2), - PageAttribute_Device_nGnRE = (1 << 2), - PageAttribute_NormalMemory = (2 << 2), - PageAttribute_NormalMemoryNotCacheable = (3 << 2), - }; - - enum AccessFlag : u64 { - AccessFlag_NotAccessed = (0 << 10), - AccessFlag_Accessed = (1 << 10), - }; - protected: - u64 attributes; - public: - /* Take in a raw attribute. */ - constexpr ALWAYS_INLINE PageTableEntry(u64 attr) : attributes(attr) { /* ... */ } - - /* Extend a previous attribute. */ - constexpr ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : attributes(rhs.attributes | new_attr) { /* ... */ } - - /* Construct a new attribute. */ - constexpr ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share) - : attributes(static_cast(perm) | static_cast(AccessFlag_Accessed) | static_cast(p_a) | static_cast(share)) - { - /* ... */ - } - protected: - constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const { - return (this->attributes >> offset) & ((1ul << count) - 1); - } - - constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const { - return this->attributes & (((1ul << count) - 1) << offset); - } - public: - constexpr ALWAYS_INLINE bool IsUserExecuteNever() const { return this->GetBits(54, 1) != 0; } - constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; } - constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; } - constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast(this->GetBits(10, 1)); } - constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast(this->GetBits(8, 2)); } - constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast(this->GetBits(2, 3)); } - constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; } - constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x1; } - constexpr ALWAYS_INLINE bool IsTable() const { return this->GetBits(0, 2) == 0x3; } - - /* Should not be called except by derived classes. */ - constexpr ALWAYS_INLINE u64 GetRawAttributes() const { - return this->attributes; - } - }; - - static_assert(sizeof(PageTableEntry) == sizeof(u64)); - - constexpr PageTableEntry InvalidPageTableEntry = PageTableEntry(0); - - constexpr size_t MaxPageTableEntries = PageSize / sizeof(PageTableEntry); - - class L1PageTableEntry : public PageTableEntry { - public: - constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, bool pxn) - : PageTableEntry((0x3ul << 60) | (static_cast(pxn) << 59) | GetInteger(phys_addr) | 0x3) - { - /* ... */ - } - - constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig) - : PageTableEntry(attr, (static_cast(contig) << 52) | GetInteger(phys_addr) | 0x1) - { - /* ... */ - } - - constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const { - return this->SelectBits(30, 18); - } - - constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const { - return this->SelectBits(12, 36); - } - - constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const { - /* Check whether this has the same permission/etc as the desired attributes. */ - return L1PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes(); - } - }; - - class L2PageTableEntry : public PageTableEntry { - public: - constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, bool pxn) - : PageTableEntry((0x3ul << 60) | (static_cast(pxn) << 59) | GetInteger(phys_addr) | 0x3) - { - /* ... */ - } - - constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig) - : PageTableEntry(attr, (static_cast(contig) << 52) | GetInteger(phys_addr) | 0x1) - { - /* ... */ - } - - constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const { - return this->SelectBits(21, 27); - } - - constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const { - return this->SelectBits(12, 36); - } - - constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const { - /* Check whether this has the same permission/etc as the desired attributes. */ - return L2PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes(); - } - }; - - class L3PageTableEntry : public PageTableEntry { - public: - constexpr ALWAYS_INLINE L3PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig) - : PageTableEntry(attr, (static_cast(contig) << 52) | GetInteger(phys_addr) | 0x3) - { - /* ... */ - } - - constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x3; } - - constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const { - return this->SelectBits(12, 36); - } - - constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const { - /* Check whether this has the same permission/etc as the desired attributes. */ - return L3PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes(); - } - }; +#include +#include +namespace ams::kern::arch::arm64::init { class KInitialPageTable { public: @@ -190,10 +29,14 @@ namespace ams::kern::init { virtual KPhysicalAddress Allocate() { return Null; } virtual void Free(KPhysicalAddress phys_addr) { /* Nothing to do here. */ (void)(phys_addr); } }; + + struct NoClear{}; private: KPhysicalAddress l1_table; public: - constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : l1_table(l1) { + constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : l1_table(l1) { /* ... */ } + + constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : KInitialPageTable(l1, NoClear{}) { ClearNewPageTable(this->l1_table); } @@ -224,9 +67,9 @@ namespace ams::kern::init { public: void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, IPageAllocator &allocator) { /* Ensure that addresses and sizes are page aligned. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize)); /* Iteratively map pages until the requested region is mapped. */ while (size > 0) { @@ -309,10 +152,37 @@ namespace ams::kern::init { } } + KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const { + /* Get the L1 entry. */ + const L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr); + + if (l1_entry->IsBlock()) { + return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1)); + } + + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable()); + + /* Get the L2 entry. */ + const L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); + + if (l2_entry->IsBlock()) { + return l2_entry->GetBlock() + (GetInteger(virt_addr) & (L2BlockSize - 1)); + } + + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable()); + + /* Get the L3 entry. */ + const L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); + + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock()); + + return l3_entry->GetBlock() + (GetInteger(virt_addr) & (L3BlockSize - 1)); + } + bool IsFree(KVirtualAddress virt_addr, size_t size) { /* Ensure that addresses and sizes are page aligned. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize)); const KVirtualAddress end_virt_addr = virt_addr + size; while (virt_addr < end_virt_addr) { @@ -360,8 +230,8 @@ namespace ams::kern::init { cpu::DataSynchronizationBarrierInnerShareable(); /* Ensure that addresses and sizes are page aligned. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize)); /* Iteratively reprotect pages until the requested region is reprotected. */ while (size > 0) { @@ -371,9 +241,9 @@ namespace ams::kern::init { if (l1_entry->IsBlock()) { /* Ensure that we are allowed to have an L1 block here. */ const KPhysicalAddress block = l1_entry->GetBlock(); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L1BlockSize)); - MESOSPHERE_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L1BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false)); /* Invalidate the existing L1 block. */ *static_cast(l1_entry) = InvalidPageTableEntry; @@ -389,7 +259,7 @@ namespace ams::kern::init { } /* Not a block, so we must be a table. */ - MESOSPHERE_ABORT_UNLESS(l1_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable()); L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); if (l2_entry->IsBlock()) { @@ -397,14 +267,14 @@ namespace ams::kern::init { if (l2_entry->IsContiguous()) { /* Ensure that we are allowed to have a contiguous L2 block here. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize)); /* Invalidate the existing contiguous L2 block. */ for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { /* Ensure that the entry is valid. */ - MESOSPHERE_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true)); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true)); static_cast(l2_entry)[i] = InvalidPageTableEntry; } cpu::DataSynchronizationBarrierInnerShareable(); @@ -419,10 +289,10 @@ namespace ams::kern::init { size -= L2ContiguousBlockSize; } else { /* Ensure that we are allowed to have an L2 block here. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2BlockSize)); - MESOSPHERE_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L2BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false)); /* Invalidate the existing L2 block. */ *static_cast(l2_entry) = InvalidPageTableEntry; @@ -440,23 +310,23 @@ namespace ams::kern::init { } /* Not a block, so we must be a table. */ - MESOSPHERE_ABORT_UNLESS(l2_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable()); /* We must have a mapped l3 entry to reprotect. */ L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); - MESOSPHERE_ABORT_UNLESS(l3_entry->IsBlock()); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock()); const KPhysicalAddress block = l3_entry->GetBlock(); if (l3_entry->IsContiguous()) { /* Ensure that we are allowed to have a contiguous L3 block here. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize)); /* Invalidate the existing contiguous L3 block. */ for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { /* Ensure that the entry is valid. */ - MESOSPHERE_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true)); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true)); static_cast(l3_entry)[i] = InvalidPageTableEntry; } cpu::DataSynchronizationBarrierInnerShareable(); @@ -471,10 +341,10 @@ namespace ams::kern::init { size -= L3ContiguousBlockSize; } else { /* Ensure that we are allowed to have an L3 block here. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3BlockSize)); - MESOSPHERE_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L3BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false)); /* Invalidate the existing L3 block. */ *static_cast(l3_entry) = InvalidPageTableEntry; @@ -495,4 +365,35 @@ namespace ams::kern::init { }; + class KInitialPageAllocator : public KInitialPageTable::IPageAllocator { + private: + uintptr_t next_address; + public: + constexpr ALWAYS_INLINE KInitialPageAllocator() : next_address(Null) { /* ... */ } + + ALWAYS_INLINE void Initialize(uintptr_t address) { + this->next_address = address; + } + + ALWAYS_INLINE uintptr_t GetFinalNextAddress() { + const uintptr_t final_address = this->next_address; + this->next_address = Null; + return final_address; + } + + ALWAYS_INLINE uintptr_t GetFinalState() { + return this->GetFinalNextAddress(); + } + public: + virtual KPhysicalAddress Allocate() override { + MESOSPHERE_INIT_ABORT_UNLESS(this->next_address != Null); + const uintptr_t allocated = this->next_address; + this->next_address += PageSize; + std::memset(reinterpret_cast(allocated), 0, PageSize); + return allocated; + } + + /* No need to override free. The default does nothing, and so would we. */ + }; + } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp index 1e1610c95..03311a65d 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp @@ -15,9 +15,27 @@ */ #pragma once #include -#include "kern_cpu_system_registers.hpp" +#include +#include -namespace ams::kern::arm64::cpu { +namespace ams::kern::arch::arm64::cpu { + +#if defined(ATMOSPHERE_CPU_ARM_CORTEX_A57) || defined(ATMOSPHERE_CPU_ARM_CORTEX_A53) + constexpr inline size_t InstructionCacheLineSize = 0x40; + constexpr inline size_t DataCacheLineSize = 0x40; + constexpr inline size_t NumPerformanceCounters = 6; +#else + #error "Unknown CPU for cache line sizes" +#endif + +#if defined(ATMOSPHERE_BOARD_NINTENDO_NX) + constexpr inline size_t NumCores = 4; +#else + #error "Unknown Board for cpu::NumCores" +#endif + + /* Initialization. */ + NOINLINE void InitializeInterruptThreads(s32 core_id); /* Helpers for managing memory state. */ ALWAYS_INLINE void DataSynchronizationBarrier() { @@ -46,13 +64,154 @@ namespace ams::kern::arm64::cpu { EnsureInstructionConsistency(); } + ALWAYS_INLINE void SwitchProcess(u64 ttbr, u32 proc_id) { + SetTtbr0El1(ttbr); + ContextIdRegisterAccessor(0).SetProcId(proc_id).Store(); + InstructionMemoryBarrier(); + } + + /* Performance counter helpers. */ + ALWAYS_INLINE u64 GetCycleCounter() { + return cpu::GetPmcCntrEl0(); + } + + ALWAYS_INLINE u32 GetPerformanceCounter(s32 n) { + u64 counter = 0; + if (n < static_cast(NumPerformanceCounters)) { + switch (n) { + case 0: + counter = cpu::GetPmevCntr0El0(); + break; + case 1: + counter = cpu::GetPmevCntr1El0(); + break; + case 2: + counter = cpu::GetPmevCntr2El0(); + break; + case 3: + counter = cpu::GetPmevCntr3El0(); + break; + case 4: + counter = cpu::GetPmevCntr4El0(); + break; + case 5: + counter = cpu::GetPmevCntr5El0(); + break; + default: + break; + } + } + return static_cast(counter); + } + + /* Helper for address access. */ + ALWAYS_INLINE bool GetPhysicalAddressWritable(KPhysicalAddress *out, KVirtualAddress addr, bool privileged = false) { + const uintptr_t va = GetInteger(addr); + + if (privileged) { + __asm__ __volatile__("at s1e1w, %[va]" :: [va]"r"(va) : "memory"); + } else { + __asm__ __volatile__("at s1e0w, %[va]" :: [va]"r"(va) : "memory"); + } + InstructionMemoryBarrier(); + + u64 par = GetParEl1(); + + if (par & 0x1) { + return false; + } + + if (out) { + *out = KPhysicalAddress((par & 0xFFFFFFFFF000ull) | (va & 0xFFFull)); + } + return true; + } + + ALWAYS_INLINE bool GetPhysicalAddressReadable(KPhysicalAddress *out, KVirtualAddress addr, bool privileged = false) { + const uintptr_t va = GetInteger(addr); + + if (privileged) { + __asm__ __volatile__("at s1e1r, %[va]" :: [va]"r"(va) : "memory"); + } else { + __asm__ __volatile__("at s1e0r, %[va]" :: [va]"r"(va) : "memory"); + } + InstructionMemoryBarrier(); + + u64 par = GetParEl1(); + + if (par & 0x1) { + return false; + } + + if (out) { + *out = KPhysicalAddress((par & 0xFFFFFFFFF000ull) | (va & 0xFFFull)); + } + return true; + } + + /* Synchronization helpers. */ + NOINLINE void SynchronizeAllCores(); + /* Cache management helpers. */ - void FlushEntireDataCacheShared(); - void FlushEntireDataCacheLocal(); + void ClearPageToZeroImpl(void *); + void FlushEntireDataCacheSharedForInit(); + void FlushEntireDataCacheLocalForInit(); + + void FlushEntireDataCache(); + + Result InvalidateDataCache(void *addr, size_t size); + Result StoreDataCache(const void *addr, size_t size); + Result FlushDataCache(const void *addr, size_t size); + Result InvalidateInstructionCache(void *addr, size_t size); + + ALWAYS_INLINE void ClearPageToZero(void *page) { + MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast(page), PageSize)); + MESOSPHERE_ASSERT(page != nullptr); + ClearPageToZeroImpl(page); + } + + ALWAYS_INLINE void InvalidateTlbByAsid(u32 asid) { + const u64 value = (static_cast(asid) << 48); + __asm__ __volatile__("tlbi aside1is, %[value]" :: [value]"r"(static_cast(value) << 48) : "memory"); + EnsureInstructionConsistency(); + } + + ALWAYS_INLINE void InvalidateTlbByAsidAndVa(u32 asid, KProcessAddress virt_addr) { + const u64 value = (static_cast(asid) << 48) | ((GetInteger(virt_addr) >> 12) & 0xFFFFFFFFFFFul); + __asm__ __volatile__("tlbi aside1is, %[value]" :: [value]"r"(value) : "memory"); + EnsureInstructionConsistency(); + } ALWAYS_INLINE void InvalidateEntireTlb() { __asm__ __volatile__("tlbi vmalle1is" ::: "memory"); EnsureInstructionConsistency(); } + ALWAYS_INLINE void InvalidateEntireTlbDataOnly() { + __asm__ __volatile__("tlbi vmalle1is" ::: "memory"); + DataSynchronizationBarrier(); + } + + ALWAYS_INLINE void InvalidateTlbByVaDataOnly(KProcessAddress virt_addr) { + const u64 value = ((GetInteger(virt_addr) >> 12) & 0xFFFFFFFFFFFul); + __asm__ __volatile__("tlbi vaae1is, %[value]" :: [value]"r"(value) : "memory"); + DataSynchronizationBarrier(); + } + + ALWAYS_INLINE uintptr_t GetCoreLocalRegionAddress() { + register uintptr_t x18 asm("x18"); + __asm__ __volatile__("" : [x18]"=r"(x18)); + return x18; + } + + ALWAYS_INLINE void SetCoreLocalRegionAddress(uintptr_t value) { + register uintptr_t x18 asm("x18") = value; + __asm__ __volatile__("":: [x18]"r"(x18)); + SetTpidrEl1(value); + } + + ALWAYS_INLINE void SwitchThreadLocalRegion(uintptr_t tlr) { + cpu::SetTpidrRoEl0(tlr); + } + } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp index d7677f9f4..7912bea3f 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp @@ -16,7 +16,7 @@ #pragma once #include -namespace ams::kern::arm64::cpu { +namespace ams::kern::arch::arm64::cpu { #define MESOSPHERE_CPU_GET_SYSREG(name) \ ({ \ @@ -37,8 +37,14 @@ namespace ams::kern::arm64::cpu { MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr0El1, ttbr0_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr1El1, ttbr1_el1) - MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TcrEl1, tcr_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TpidrEl1, tpidr_el1) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(VbarEl1, vbar_el1) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(FarEl1, far_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(ParEl1, par_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SctlrEl1, sctlr_el1) @@ -46,21 +52,215 @@ namespace ams::kern::arm64::cpu { MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CpuEctlrEl1, s3_1_c15_c2_1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CsselrEl1, csselr_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CcsidrEl1, ccsidr_el1) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(OslarEl1, oslar_el1) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TpidrRoEl0, tpidrro_el0) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(EsrEl1, esr_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Afsr0El1, afsr0_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Afsr1El1, afsr1_el1) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmUserEnrEl0, pmuserenr_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmcCntrEl0, pmccntr_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr0El0, pmevcntr0_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr1El0, pmevcntr1_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr2El0, pmevcntr2_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr3El0, pmevcntr3_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr4El0, pmevcntr4_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr5El0, pmevcntr5_el0) + + #define FOR_I_IN_0_TO_15(HANDLER, ...) \ + HANDLER(0, ## __VA_ARGS__) HANDLER(1, ## __VA_ARGS__) HANDLER(2, ## __VA_ARGS__) HANDLER(3, ## __VA_ARGS__) \ + HANDLER(4, ## __VA_ARGS__) HANDLER(5, ## __VA_ARGS__) HANDLER(6, ## __VA_ARGS__) HANDLER(7, ## __VA_ARGS__) \ + HANDLER(8, ## __VA_ARGS__) HANDLER(9, ## __VA_ARGS__) HANDLER(10, ## __VA_ARGS__) HANDLER(11, ## __VA_ARGS__) \ + HANDLER(12, ## __VA_ARGS__) HANDLER(13, ## __VA_ARGS__) HANDLER(14, ## __VA_ARGS__) HANDLER(15, ## __VA_ARGS__) \ + + #define MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS(ID, ...) \ + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgWcr##ID##El1, dbgwcr##ID##_el1) \ + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgWvr##ID##El1, dbgwvr##ID##_el1) \ + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgBcr##ID##El1, dbgbcr##ID##_el1) \ + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgBvr##ID##El1, dbgbvr##ID##_el1) + + FOR_I_IN_0_TO_15(MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS) + + #undef MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS /* Base class for register accessors. */ - class GenericRegisterAccessor { + class GenericRegisterAccessorBase { + NON_COPYABLE(GenericRegisterAccessorBase); + NON_MOVEABLE(GenericRegisterAccessorBase); private: u64 value; public: - ALWAYS_INLINE GenericRegisterAccessor(u64 v) : value(v) { /* ... */ } + constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : value(v) { /* ... */ } protected: + constexpr ALWAYS_INLINE u64 GetValue() const { + return this->value; + } + constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const { return (this->value >> offset) & ((1ul << count) - 1); } + + constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) { + const u64 mask = ((1ul << count) - 1) << offset; + this->value &= ~mask; + this->value |= (value & (mask >> offset)) << offset; + } + + constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) { + const u64 mask = ((1ul << count) - 1) << offset; + this->value &= ~mask; + this->value |= (value & mask); + } + + constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) { + const u64 mask = 1ul << offset; + if (enabled) { + this->value |= mask; + } else { + this->value &= ~mask; + } + } }; - /* Special code for main id register. */ - class MainIdRegisterAccessor : public GenericRegisterAccessor { + template + class GenericRegisterAccessor : public GenericRegisterAccessorBase { + public: + constexpr ALWAYS_INLINE GenericRegisterAccessor(u64 v) : GenericRegisterAccessorBase(v) { /* ... */ } + protected: + ALWAYS_INLINE void Store() const { + static_cast(this)->Store(); + } + }; + + #define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(name) class name##RegisterAccessor : public GenericRegisterAccessor + + #define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(accessor, reg_name) \ + ALWAYS_INLINE accessor##RegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(reg_name)) { /* ... */ } \ + constexpr ALWAYS_INLINE accessor##RegisterAccessor(u64 v) : GenericRegisterAccessor(v) { /* ... */ } \ + \ + ALWAYS_INLINE void Store() { const u64 v = this->GetValue(); MESOSPHERE_CPU_SET_SYSREG(reg_name, v); } + + /* Accessors. */ + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MemoryAccessIndirection) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MemoryAccessIndirection, mair_el1) + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(TranslationControl) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(TranslationControl, tcr_el1) + + constexpr ALWAYS_INLINE size_t GetT1Size() const { + const size_t shift_value = this->GetBits(16, 6); + return size_t(1) << (size_t(64) - shift_value); + } + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ArchitecturalFeatureAccessControl) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ArchitecturalFeatureAccessControl, cpacr_el1) + + constexpr ALWAYS_INLINE decltype(auto) SetFpEnabled(bool en) { + if (en) { + this->SetBits(20, 2, 0x3); + } else { + this->SetBits(20, 2, 0x0); + } + return *this; + } + + constexpr ALWAYS_INLINE bool IsFpEnabled() { + return this->GetBits(20, 2) != 0; + } + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(DebugFeature) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(DebugFeature, id_aa64dfr0_el1) + + constexpr ALWAYS_INLINE size_t GetNumWatchpoints() const { + return this->GetBits(20, 4); + } + + constexpr ALWAYS_INLINE size_t GetNumBreakpoints() const { + return this->GetBits(12, 4); + } + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MonitorDebugSystemControl) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MonitorDebugSystemControl, mdscr_el1) + + constexpr ALWAYS_INLINE bool GetMde() const { + return this->GetBits(15, 1) != 0; + } + + constexpr ALWAYS_INLINE size_t GetTdcc() const { + return this->GetBits(12, 1) != 0; + } + + constexpr ALWAYS_INLINE decltype(auto) SetMde(bool set) { + this->SetBit(15, set); + return *this; + } + + constexpr ALWAYS_INLINE decltype(auto) SetTdcc(bool set) { + this->SetBit(12, set); + return *this; + } + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MultiprocessorAffinity) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MultiprocessorAffinity, mpidr_el1) + + constexpr ALWAYS_INLINE u64 GetAff0() const { + return this->GetBits(0, 8); + } + + constexpr ALWAYS_INLINE u64 GetAff1() const { + return this->GetBits(8, 8); + } + + constexpr ALWAYS_INLINE u64 GetAff2() const { + return this->GetBits(16, 8); + } + + constexpr ALWAYS_INLINE u64 GetAff3() const { + return this->GetBits(32, 8); + } + + constexpr ALWAYS_INLINE u64 GetCpuOnArgument() const { + constexpr u64 Mask = 0x000000FF00FFFF00ul; + return this->GetValue() & Mask; + } + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ThreadId) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ThreadId, tpidr_el1) + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(OsLockAccess) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(OsLockAccess, oslar_el1) + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ContextId) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ContextId, contextidr_el1) + + constexpr ALWAYS_INLINE decltype(auto) SetProcId(u32 proc_id) { + this->SetBits(0, BITSIZEOF(proc_id), proc_id); + return *this; + } + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MainId) { public: enum class Implementer { ArmLimited = 0x41, @@ -70,7 +270,7 @@ namespace ams::kern::arm64::cpu { CortexA57 = 0xD07, }; public: - ALWAYS_INLINE MainIdRegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(midr_el1)) { /* ... */ } + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MainId, midr_el1) public: constexpr ALWAYS_INLINE Implementer GetImplementer() const { return static_cast(this->GetBits(24, 8)); @@ -93,10 +293,69 @@ namespace ams::kern::arm64::cpu { } }; - /* Accessors for cache registers. */ - class CacheLineIdAccessor : public GenericRegisterAccessor { + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(SystemControl) { public: - ALWAYS_INLINE CacheLineIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(clidr_el1)) { /* ... */ } + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(SystemControl, sctlr_el1) + + constexpr ALWAYS_INLINE decltype(auto) SetWxn(bool en) { + this->SetBit(19, en); + return *this; + } + }; + + /* Accessors for timer registers. */ + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerKernelControl) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerKernelControl, cntkctl_el1) + + constexpr ALWAYS_INLINE decltype(auto) SetEl0PctEn(bool en) { + this->SetBit(0, en); + return *this; + } + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerPhysicalTimerControl) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerPhysicalTimerControl, cntp_ctl_el0) + + constexpr ALWAYS_INLINE decltype(auto) SetEnable(bool en) { + this->SetBit(0, en); + return *this; + } + + constexpr ALWAYS_INLINE decltype(auto) SetIMask(bool en) { + this->SetBit(1, en); + return *this; + } + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerPhysicalTimerCompareValue) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerPhysicalTimerCompareValue, cntp_cval_el0) + + constexpr ALWAYS_INLINE u64 GetCompareValue() { + return this->GetValue(); + } + + constexpr ALWAYS_INLINE decltype(auto) SetCompareValue(u64 value) { + this->SetBits(0, BITSIZEOF(value), value); + return *this; + } + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerPhysicalCountValue) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerPhysicalCountValue, cntpct_el0) + + constexpr ALWAYS_INLINE u64 GetCount() { + return this->GetValue(); + } + }; + + /* Accessors for cache registers. */ + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheLineId) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheLineId, clidr_el1) public: constexpr ALWAYS_INLINE int GetLevelsOfCoherency() const { return static_cast(this->GetBits(24, 3)); @@ -109,9 +368,9 @@ namespace ams::kern::arm64::cpu { /* TODO: Other bitfield accessors? */ }; - class CacheSizeIdAccessor : public GenericRegisterAccessor { + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheSizeId) { public: - ALWAYS_INLINE CacheSizeIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(ccsidr_el1)) { /* ... */ } + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheSizeId, ccsidr_el1) public: constexpr ALWAYS_INLINE int GetNumberOfSets() const { return static_cast(this->GetBits(13, 15)); @@ -128,6 +387,9 @@ namespace ams::kern::arm64::cpu { /* TODO: Other bitfield accessors? */ }; + #undef FOR_I_IN_0_TO_15 + #undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS + #undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS #undef MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS #undef MESOSPHERE_CPU_GET_SYSREG #undef MESOSPHERE_CPU_SET_SYSREG diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_debug.hpp similarity index 54% rename from libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp rename to libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_debug.hpp index 449af1f0e..8bf3c8f6f 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_debug.hpp @@ -14,25 +14,23 @@ * along with this program. If not, see . */ #pragma once -#include +#include +#include +#include namespace ams::kern { - class KSystemControl { - public: - class Init { - public: - /* Initialization. */ - static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address); - static bool ShouldIncreaseThreadResourceLimit(); + class KThread; + class KProcess; - /* Randomness. */ - static void GenerateRandomBytes(void *dst, size_t size); - static u64 GenerateRandomRange(u64 min, u64 max); - }; +} + +namespace ams::kern::arch::arm64 { + + class KDebug final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KDebug, KSynchronizationObject); public: - /* Panic. */ - static NORETURN void StopSystem(); + /* TODO: This is a placeholder definition. */ }; -} \ No newline at end of file +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp new file mode 100644 index 000000000..9d99e334c --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern::arch::arm64 { + + struct KExceptionContext { + u64 x[(30 - 0) + 1]; + u64 sp; + u64 pc; + u64 psr; + u64 tpidr; + u64 reserved; + }; + static_assert(sizeof(KExceptionContext) == 0x120); + +} \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_hardware_timer.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_hardware_timer.hpp new file mode 100644 index 000000000..f69e55891 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_hardware_timer.hpp @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern::arch::arm64 { + + namespace impl { + + class KHardwareTimerInterruptTask; + + } + + class KHardwareTimer : public KHardwareTimerBase { + public: + constexpr KHardwareTimer() : KHardwareTimerBase() { /* ... */ } + public: + /* Public API. */ + NOINLINE void Initialize(s32 core_id); + NOINLINE void Finalize(); + + static s64 GetTick() { + return GetCount(); + } + + void RegisterAbsoluteTask(KTimerTask *task, s64 task_time) { + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->GetLock()); + + if (this->RegisterAbsoluteTaskImpl(task, task_time)) { + SetCompareValue(task_time); + EnableInterrupt(); + } + } + private: + friend class impl::KHardwareTimerInterruptTask; + NOINLINE void DoInterruptTask(); + private: + /* Hardware register accessors. */ + static ALWAYS_INLINE void InitializeGlobalTimer() { + /* Set kernel control. */ + cpu::CounterTimerKernelControlRegisterAccessor(0).SetEl0PctEn(true).Store(); + + /* Disable the physical timer. */ + cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(false).SetIMask(false).Store(); + + /* Set the compare value to the maximum. */ + cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor(0).SetCompareValue(std::numeric_limits::max()).Store(); + + /* Enable the physical timer, with interrupt masked. */ + cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(true).SetIMask(true).Store(); + } + + static ALWAYS_INLINE void EnableInterrupt() { + cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(true).SetIMask(false).Store(); + } + + static ALWAYS_INLINE void DisableInterrupt() { + cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(true).SetIMask(true).Store(); + } + + static ALWAYS_INLINE void StopTimer() { + /* Set the compare value to the maximum. */ + cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor(0).SetCompareValue(std::numeric_limits::max()).Store(); + + /* Disable the physical timer. */ + cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(false).SetIMask(false).Store(); + } + + static ALWAYS_INLINE s64 GetCount() { + return cpu::CounterTimerPhysicalCountValueRegisterAccessor().GetCount(); + } + + static ALWAYS_INLINE void SetCompareValue(s64 value) { + cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor(0).SetCompareValue(static_cast(value)).Store(); + } + + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp new file mode 100644 index 000000000..a6d7a180e --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern::arch::arm64 { + + struct GicDistributor { + u32 ctlr; + u32 typer; + u32 iidr; + u32 reserved_0x0c; + u32 statusr; + u32 reserved_0x14[3]; + u32 impldef_0x20[8]; + u32 setspi_nsr; + u32 reserved_0x44; + u32 clrspi_nsr; + u32 reserved_0x4c; + u32 setspi_sr; + u32 reserved_0x54; + u32 clrspi_sr; + u32 reserved_0x5c[9]; + u32 igroupr[32]; + u32 isenabler[32]; + u32 icenabler[32]; + u32 ispendr[32]; + u32 icpendr[32]; + u32 isactiver[32]; + u32 icactiver[32]; + union { + u8 bytes[1020]; + u32 words[255]; + } ipriorityr; + u32 _0x7fc; + union { + u8 bytes[1020]; + u32 words[255]; + } itargetsr; + u32 _0xbfc; + u32 icfgr[64]; + u32 igrpmodr[32]; + u32 _0xd80[32]; + u32 nsacr[64]; + u32 sgir; + u32 _0xf04[3]; + u32 cpendsgir[4]; + u32 spendsgir[4]; + u32 reserved_0xf30[52]; + + static constexpr size_t SgirCpuTargetListShift = 16; + + enum SgirTargetListFilter : u32 { + SgirTargetListFilter_CpuTargetList = (0 << 24), + SgirTargetListFilter_Others = (1 << 24), + SgirTargetListFilter_Self = (2 << 24), + SgirTargetListFilter_Reserved = (3 << 24), + }; + }; + static_assert(std::is_pod::value); + static_assert(sizeof(GicDistributor) == 0x1000); + + struct GicCpuInterface { + u32 ctlr; + u32 pmr; + u32 bpr; + u32 iar; + u32 eoir; + u32 rpr; + u32 hppir; + u32 abpr; + u32 aiar; + u32 aeoir; + u32 ahppir; + u32 statusr; + u32 reserved_30[4]; + u32 impldef_40[36]; + u32 apr[4]; + u32 nsapr[4]; + u32 reserved_f0[3]; + u32 iidr; + u32 reserved_100[960]; + u32 dir; + u32 _0x1004[1023]; + }; + static_assert(std::is_pod::value); + static_assert(sizeof(GicCpuInterface) == 0x2000); + + struct KInterruptController { + NON_COPYABLE(KInterruptController); + NON_MOVEABLE(KInterruptController); + public: + static constexpr s32 NumSoftwareInterrupts = 16; + static constexpr s32 NumLocalInterrupts = NumSoftwareInterrupts + 16; + static constexpr s32 NumGlobalInterrupts = 988; + static constexpr s32 NumInterrupts = NumLocalInterrupts + NumGlobalInterrupts; + static constexpr s32 NumPriorityLevels = 4; + public: + struct LocalState { + u32 local_isenabler[NumLocalInterrupts / 32]; + u32 local_ipriorityr[NumLocalInterrupts / 4]; + u32 local_targetsr[NumLocalInterrupts / 4]; + u32 local_icfgr[NumLocalInterrupts / 16]; + }; + + struct GlobalState { + u32 global_isenabler[NumGlobalInterrupts / 32]; + u32 global_ipriorityr[NumGlobalInterrupts / 4]; + u32 global_targetsr[NumGlobalInterrupts / 4]; + u32 global_icfgr[NumGlobalInterrupts / 16]; + }; + + enum PriorityLevel : u8 { + PriorityLevel_High = 0, + PriorityLevel_Low = NumPriorityLevels - 1, + + PriorityLevel_Timer = 1, + PriorityLevel_Scheduler = 2, + }; + private: + static inline u32 s_mask[cpu::NumCores]; + private: + volatile GicDistributor *gicd; + volatile GicCpuInterface *gicc; + public: + constexpr KInterruptController() : gicd(nullptr), gicc(nullptr) { /* ... */ } + + void Initialize(s32 core_id); + void Finalize(s32 core_id); + public: + u32 GetIrq() const { + return this->gicc->iar; + } + + static constexpr s32 ConvertRawIrq(u32 irq) { + return (irq == 0x3FF) ? -1 : (irq & 0x3FF); + } + + void Enable(s32 irq) const { + this->gicd->isenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32))); + } + + void Disable(s32 irq) const { + this->gicd->icenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32))); + } + + void Clear(s32 irq) const { + this->gicd->icpendr[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32))); + } + + void SetTarget(s32 irq, s32 core_id) const { + this->gicd->itargetsr.bytes[irq] |= GetGicMask(core_id); + } + + void ClearTarget(s32 irq, s32 core_id) const { + this->gicd->itargetsr.bytes[irq] &= ~GetGicMask(core_id); + } + + void SetPriorityLevel(s32 irq, s32 level) const { + MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low); + this->gicd->ipriorityr.bytes[irq] = ToGicPriorityValue(level); + } + + s32 GetPriorityLevel(s32 irq) const { + return FromGicPriorityValue(this->gicd->ipriorityr.bytes[irq]); + } + + void SetPriorityLevel(s32 level) const { + MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low); + this->gicc->pmr = ToGicPriorityValue(level); + } + + void SetEdge(s32 irq) const { + u32 cfg = this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)]; + cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2)))); + cfg |= (0x2 << (2 * (irq % (BITSIZEOF(u32) / 2)))); + this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg; + } + + void SetLevel(s32 irq) const { + u32 cfg = this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)]; + cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2)))); + cfg |= (0x0 << (2 * (irq % (BITSIZEOF(u32) / 2)))); + this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg; + } + + void SendInterProcessorInterrupt(s32 irq, u64 core_mask) { + MESOSPHERE_ASSERT(IsSoftware(irq)); + this->gicd->sgir = GetCpuTargetListMask(irq, core_mask); + } + + void SendInterProcessorInterrupt(s32 irq) { + MESOSPHERE_ASSERT(IsSoftware(irq)); + this->gicd->sgir = GicDistributor::SgirTargetListFilter_Others | irq; + } + + void EndOfInterrupt(u32 irq) const { + this->gicc->eoir = irq; + } + + bool IsInterruptDefined(s32 irq) { + const s32 num_interrupts = std::min(32 + 32 * (this->gicd->typer & 0x1F), static_cast(NumInterrupts)); + return (0 <= irq && irq < num_interrupts); + } + + /* TODO: Implement more KInterruptController functionality. */ + public: + static constexpr ALWAYS_INLINE bool IsSoftware(s32 id) { + MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts); + return id < NumSoftwareInterrupts; + } + + static constexpr ALWAYS_INLINE bool IsLocal(s32 id) { + MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts); + return id < NumLocalInterrupts; + } + + static constexpr ALWAYS_INLINE bool IsGlobal(s32 id) { + MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts); + return NumLocalInterrupts <= id; + } + + static constexpr size_t GetGlobalInterruptIndex(s32 id) { + MESOSPHERE_ASSERT(IsGlobal(id)); + return id - NumLocalInterrupts; + } + + static constexpr size_t GetLocalInterruptIndex(s32 id) { + MESOSPHERE_ASSERT(IsLocal(id)); + return id; + } + private: + static constexpr size_t PriorityShift = BITSIZEOF(u8) - __builtin_ctz(NumPriorityLevels); + static_assert(PriorityShift < BITSIZEOF(u8)); + + static constexpr ALWAYS_INLINE u8 ToGicPriorityValue(s32 level) { + return (level << PriorityShift) | ((1 << PriorityShift) - 1); + } + + static constexpr ALWAYS_INLINE s32 FromGicPriorityValue(u8 priority) { + return (priority >> PriorityShift) & (NumPriorityLevels - 1); + } + + static constexpr ALWAYS_INLINE s32 GetCpuTargetListMask(s32 irq, u64 core_mask) { + MESOSPHERE_ASSERT(IsSoftware(irq)); + MESOSPHERE_ASSERT(core_mask < (1ul << cpu::NumCores)); + return GicDistributor::SgirTargetListFilter_CpuTargetList | irq | (static_cast(core_mask) << GicDistributor::SgirCpuTargetListShift); + } + + static ALWAYS_INLINE s32 GetGicMask(s32 core_id) { + return s_mask[core_id]; + } + + ALWAYS_INLINE void SetGicMask(s32 core_id) const { + s_mask[core_id] = this->gicd->itargetsr.bytes[0]; + } + + NOINLINE void SetupInterruptLines(s32 core_id) const; + }; +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp new file mode 100644 index 000000000..2ecf5ebd5 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include + +namespace ams::kern::arch::arm64 { + + class KInterruptManager { + NON_COPYABLE(KInterruptManager); + NON_MOVEABLE(KInterruptManager); + private: + struct KCoreLocalInterruptEntry { + KInterruptHandler *handler; + bool manually_cleared; + bool needs_clear; + u8 priority; + + constexpr KCoreLocalInterruptEntry() + : handler(nullptr), manually_cleared(false), needs_clear(false), priority(KInterruptController::PriorityLevel_Low) + { + /* ... */ + } + }; + + struct KGlobalInterruptEntry { + KInterruptHandler *handler; + bool manually_cleared; + bool needs_clear; + + constexpr KGlobalInterruptEntry() : handler(nullptr), manually_cleared(false), needs_clear(false) { /* ... */ } + }; + private: + static KSpinLock s_lock; + static std::array s_global_interrupts; + static KInterruptController::GlobalState s_global_state; + static bool s_global_state_saved; + private: + KCoreLocalInterruptEntry core_local_interrupts[KInterruptController::NumLocalInterrupts]; + KInterruptController interrupt_controller; + KInterruptController::LocalState local_state; + bool local_state_saved; + private: + static ALWAYS_INLINE KSpinLock &GetLock() { return s_lock; } + static ALWAYS_INLINE KGlobalInterruptEntry &GetGlobalInterruptEntry(s32 irq) { return s_global_interrupts[KInterruptController::GetGlobalInterruptIndex(irq)]; } + ALWAYS_INLINE KCoreLocalInterruptEntry &GetLocalInterruptEntry(s32 irq) { return this->core_local_interrupts[KInterruptController::GetLocalInterruptIndex(irq)]; } + + bool OnHandleInterrupt(); + public: + constexpr KInterruptManager() : core_local_interrupts(), interrupt_controller(), local_state(), local_state_saved(false) { /* ... */ } + NOINLINE void Initialize(s32 core_id); + NOINLINE void Finalize(s32 core_id); + + bool IsInterruptDefined(s32 irq) { + return this->interrupt_controller.IsInterruptDefined(irq); + } + + NOINLINE Result BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level); + NOINLINE Result UnbindHandler(s32 irq, s32 core); + + NOINLINE Result ClearInterrupt(s32 irq); + NOINLINE Result ClearInterrupt(s32 irq, s32 core_id); + + ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq, u64 core_mask) { + this->interrupt_controller.SendInterProcessorInterrupt(irq, core_mask); + } + + ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq) { + this->interrupt_controller.SendInterProcessorInterrupt(irq); + } + + static void HandleInterrupt(bool user_mode); + + /* Implement more KInterruptManager functionality. */ + private: + Result BindGlobal(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level); + Result BindLocal(KInterruptHandler *handler, s32 irq, s32 priority, bool manual_clear); + Result UnbindGlobal(s32 irq); + Result UnbindLocal(s32 irq); + Result ClearGlobal(s32 irq); + Result ClearLocal(s32 irq); + public: + static ALWAYS_INLINE u32 DisableInterrupts() { + u64 intr_state; + __asm__ __volatile__("mrs %[intr_state], daif\n" + "msr daifset, #2" + : [intr_state]"=r"(intr_state) + :: "memory"); + return intr_state; + } + + static ALWAYS_INLINE u32 EnableInterrupts() { + u64 intr_state; + __asm__ __volatile__("mrs %[intr_state], daif\n" + "msr daifclr, #2" + : [intr_state]"=r"(intr_state) + :: "memory"); + return intr_state; + } + + static ALWAYS_INLINE void RestoreInterrupts(u32 intr_state) { + u64 cur_state; + __asm__ __volatile__("mrs %[cur_state], daif" : [cur_state]"=r"(cur_state)); + __asm__ __volatile__("msr daif, %[intr_state]" :: [intr_state]"r"((cur_state & ~0x80ul) | (intr_state & 0x80))); + } + + static ALWAYS_INLINE bool AreInterruptsEnabled() { + u64 intr_state; + __asm__ __volatile__("mrs %[intr_state], daif" : [intr_state]"=r"(intr_state)); + return (intr_state & 0x80) == 0; + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_name.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_name.hpp new file mode 100644 index 000000000..97900023d --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_name.hpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +namespace ams::kern::arch::arm64 { + + namespace interrupt_name { + + enum KInterruptName : s32 { + /* SGIs */ + KInterruptName_ThreadTerminate = 4, + KInterruptName_CacheOperation = 5, + KInterruptName_Scheduler = 6, + + KInterruptName_PerformanceCounter = 8, + + /* PPIs */ + #if defined(ATMOSPHERE_BOARD_NINTENDO_NX) + KInterruptName_VirtualMaintenance = 25, + KInterruptName_HypervisorTimer = 26, + KInterruptName_VirtualTimer = 27, + KInterruptName_LegacyNFiq = 38, + KInterruptName_SecurePhysicalTimer = 29, + KInterruptName_NonSecurePhysicalTimer = 30, + KInterruptName_LegacyNIrq = 31, + #endif + + #if defined(ATMOSPHERE_BOARD_NINTENDO_NX) + KInterruptName_MemoryController = 109, + #endif + }; + + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp new file mode 100644 index 000000000..bb8fcb3e9 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include + +namespace ams::kern::arch::arm64 { + + class KPageTable : public KPageTableBase { + NON_COPYABLE(KPageTable); + NON_MOVEABLE(KPageTable); + public: + using TraversalEntry = KPageTableImpl::TraversalEntry; + using TraversalContext = KPageTableImpl::TraversalContext; + + enum BlockType { + BlockType_L3Block, + BlockType_L3ContiguousBlock, + BlockType_L2Block, + +#ifdef ATMOSPHERE_BOARD_NINTENDO_NX + BlockType_L2TegraSmmuBlock, +#endif + + BlockType_L2ContiguousBlock, + BlockType_L1Block, + + BlockType_Count, + }; + static_assert(L3BlockSize == PageSize); + static constexpr size_t ContiguousPageSize = L3ContiguousBlockSize; + +#ifdef ATMOSPHERE_BOARD_NINTENDO_NX + static constexpr size_t L2TegraSmmuBlockSize = 2 * L2BlockSize; +#endif + static constexpr size_t BlockSizes[BlockType_Count] = { + [BlockType_L3Block] = L3BlockSize, + [BlockType_L3ContiguousBlock] = L3ContiguousBlockSize, + [BlockType_L2Block] = L2BlockSize, +#ifdef ATMOSPHERE_BOARD_NINTENDO_NX + [BlockType_L2TegraSmmuBlock] = L2TegraSmmuBlockSize, +#endif + [BlockType_L2ContiguousBlock] = L2ContiguousBlockSize, + [BlockType_L1Block] = L1BlockSize, + }; + + static constexpr BlockType GetMaxBlockType() { + return BlockType_L1Block; + } + + static constexpr size_t GetBlockSize(BlockType type) { + return BlockSizes[type]; + } + + static constexpr BlockType GetBlockType(size_t size) { + switch (size) { + case L3BlockSize: return BlockType_L3Block; + case L3ContiguousBlockSize: return BlockType_L3ContiguousBlock; + case L2BlockSize: return BlockType_L2Block; +#ifdef ATMOSPHERE_BOARD_NINTENDO_NX + case L2TegraSmmuBlockSize: return BlockType_L2TegraSmmuBlock; +#endif + case L2ContiguousBlockSize: return BlockType_L2ContiguousBlock; + case L1BlockSize: return BlockType_L1Block; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + + static constexpr size_t GetSmallerAlignment(size_t alignment) { + MESOSPHERE_ASSERT(alignment > L3BlockSize); + return KPageTable::GetBlockSize(static_cast(KPageTable::GetBlockType(alignment) - 1)); + } + + static constexpr size_t GetLargerAlignment(size_t alignment) { + MESOSPHERE_ASSERT(alignment < L1BlockSize); + return KPageTable::GetBlockSize(static_cast(KPageTable::GetBlockType(alignment) + 1)); + } + private: + KPageTableManager *manager; + u64 ttbr; + u8 asid; + protected: + virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override; + virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override; + virtual void FinalizeUpdate(PageLinkedList *page_list) override; + + KPageTableManager &GetPageTableManager() const { return *this->manager; } + private: + constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const { + /* Set basic attributes. */ + PageTableEntry entry; + entry.SetPrivilegedExecuteNever(true); + entry.SetAccessFlag(PageTableEntry::AccessFlag_Accessed); + entry.SetShareable(PageTableEntry::Shareable_InnerShareable); + + if (!this->IsKernel()) { + entry.SetGlobal(false); + } + + /* Set page attribute. */ + if (properties.io) { + MESOSPHERE_ABORT_UNLESS(!properties.uncached); + MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0); + + entry.SetPageAttribute(PageTableEntry::PageAttribute_Device_nGnRnE) + .SetUserExecuteNever(true); + } else if (properties.uncached) { + MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0); + + entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemoryNotCacheable); + } else { + entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemory); + } + + /* Set user execute never bit. */ + if (properties.perm != KMemoryPermission_UserReadExecute) { + MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0); + entry.SetUserExecuteNever(true); + } + + /* Set can be contiguous. */ + entry.SetContiguousAllowed(!properties.non_contiguous); + + /* Set AP[1] based on perm. */ + switch (properties.perm & KMemoryPermission_UserReadWrite) { + case KMemoryPermission_UserReadWrite: + case KMemoryPermission_UserRead: + entry.SetUserAccessible(true); + break; + case KMemoryPermission_KernelReadWrite: + case KMemoryPermission_KernelRead: + entry.SetUserAccessible(false); + break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + + /* Set AP[2] based on perm. */ + switch (properties.perm & KMemoryPermission_UserReadWrite) { + case KMemoryPermission_UserReadWrite: + case KMemoryPermission_KernelReadWrite: + entry.SetReadOnly(false); + break; + case KMemoryPermission_KernelRead: + case KMemoryPermission_UserRead: + entry.SetReadOnly(true); + break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + + return entry; + } + public: + constexpr KPageTable() : KPageTableBase(), manager(), ttbr(), asid() { /* ... */ } + + static NOINLINE void Initialize(s32 core_id); + + ALWAYS_INLINE void Activate(u32 proc_id) { + cpu::DataSynchronizationBarrier(); + cpu::SwitchProcess(this->ttbr, proc_id); + } + + NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end); + NOINLINE Result InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager); + Result Finalize(); + private: + Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll); + + Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, size_t page_size, PageLinkedList *page_list, bool reuse_ll) { + switch (page_size) { + case L1BlockSize: +#ifdef ATMOSPHERE_BOARD_NINTENDO_NX + case L2TegraSmmuBlockSize: +#endif + case L2BlockSize: + case L3BlockSize: + break; + case L2ContiguousBlockSize: + case L3ContiguousBlockSize: + entry_template.SetContiguous(true); + break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + return this->Map(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); + } + + Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + + bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list); + + ALWAYS_INLINE Result SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll); + Result SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll); + + Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll); + + static void PteDataSynchronizationBarrier() { + cpu::DataSynchronizationBarrierInnerShareable(); + } + + static void ClearPageTable(KVirtualAddress table) { + cpu::ClearPageToZero(GetVoidPointer(table)); + } + + void OnTableUpdated() const { + cpu::InvalidateTlbByAsid(this->asid); + } + + void OnKernelTableUpdated() const { + cpu::InvalidateEntireTlbDataOnly(); + } + + void OnKernelTableSinglePageUpdated(KProcessAddress virt_addr) const { + cpu::InvalidateTlbByVaDataOnly(virt_addr); + } + + void NoteUpdated() const { + cpu::DataSynchronizationBarrier(); + + if (this->IsKernel()) { + this->OnKernelTableUpdated(); + } else { + this->OnTableUpdated(); + } + } + + void NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const { + MESOSPHERE_ASSERT(this->IsKernel()); + + cpu::DataSynchronizationBarrier(); + this->OnKernelTableSinglePageUpdated(virt_addr); + } + + KVirtualAddress AllocatePageTable(PageLinkedList *page_list, bool reuse_ll) const { + KVirtualAddress table = this->GetPageTableManager().Allocate(); + + if (table == Null) { + if (reuse_ll && page_list->Peek()) { + table = KVirtualAddress(reinterpret_cast(page_list->Pop())); + } else { + return Null; + } + } + + ClearPageTable(table); + + MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(table) == 0); + + return table; + } + + void FreePageTable(PageLinkedList *page_list, KVirtualAddress table) const { + MESOSPHERE_ASSERT(this->GetPageTableManager().IsInPageTableHeap(table)); + MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(table) == 0); + page_list->Push(table); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp new file mode 100644 index 000000000..09d7df979 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern::arch::arm64 { + + constexpr size_t L1BlockSize = 1_GB; + constexpr size_t L1ContiguousBlockSize = 0x10 * L1BlockSize; + constexpr size_t L2BlockSize = 2_MB; + constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize; + constexpr size_t L3BlockSize = PageSize; + constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize; + + class PageTableEntry { + public: + struct InvalidTag{}; + + enum Permission : u64 { + Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)), + Permission_KernelRX = ((0ul << 53) | (1ul << 54) | (2ul << 6)), + Permission_KernelR = ((1ul << 53) | (1ul << 54) | (2ul << 6)), + Permission_KernelRW = ((1ul << 53) | (1ul << 54) | (0ul << 6)), + + Permission_UserRX = ((1ul << 53) | (0ul << 54) | (3ul << 6)), + Permission_UserR = ((1ul << 53) | (1ul << 54) | (3ul << 6)), + Permission_UserRW = ((1ul << 53) | (1ul << 54) | (1ul << 6)), + }; + + enum Shareable : u64 { + Shareable_NonShareable = (0 << 8), + Shareable_OuterShareable = (2 << 8), + Shareable_InnerShareable = (3 << 8), + }; + + /* Official attributes are: */ + /* 0x00, 0x04, 0xFF, 0x44. 4-7 are unused. */ + enum PageAttribute : u64 { + PageAttribute_Device_nGnRnE = (0 << 2), + PageAttribute_Device_nGnRE = (1 << 2), + PageAttribute_NormalMemory = (2 << 2), + PageAttribute_NormalMemoryNotCacheable = (3 << 2), + }; + + enum AccessFlag : u64 { + AccessFlag_NotAccessed = (0 << 10), + AccessFlag_Accessed = (1 << 10), + }; + + enum Type : u64 { + Type_None = 0x0, + Type_L1Block = 0x1, + Type_L1Table = 0x3, + Type_L2Block = 0x1, + Type_L2Table = 0x3, + Type_L3Block = 0x3, + }; + + enum ContigType : u64 { + ContigType_NotContiguous = (0x0ul << 52), + ContigType_Contiguous = (0x1ul << 52), + }; + protected: + u64 attributes; + public: + /* Take in a raw attribute. */ + constexpr ALWAYS_INLINE PageTableEntry() : attributes() { /* ... */ } + constexpr ALWAYS_INLINE PageTableEntry(u64 attr) : attributes(attr) { /* ... */ } + + constexpr ALWAYS_INLINE PageTableEntry(InvalidTag) : attributes(0) { /* ... */ } + + /* Extend a previous attribute. */ + constexpr ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : attributes(rhs.attributes | new_attr) { /* ... */ } + + /* Construct a new attribute. */ + constexpr ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share) + : attributes(static_cast(perm) | static_cast(AccessFlag_Accessed) | static_cast(p_a) | static_cast(share)) + { + /* ... */ + } + protected: + constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const { + return (this->attributes >> offset) & ((1ul << count) - 1); + } + + constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const { + return this->attributes & (((1ul << count) - 1) << offset); + } + + constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) { + const u64 mask = ((1ul << count) - 1) << offset; + this->attributes &= ~mask; + this->attributes |= (value & (mask >> offset)) << offset; + } + + constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) { + const u64 mask = ((1ul << count) - 1) << offset; + this->attributes &= ~mask; + this->attributes |= (value & mask); + } + + constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) { + const u64 mask = 1ul << offset; + if (enabled) { + this->attributes |= mask; + } else { + this->attributes &= ~mask; + } + } + public: + constexpr ALWAYS_INLINE bool IsContiguousAllowed() const { return this->GetBits(55, 1) != 0; } + constexpr ALWAYS_INLINE bool IsUserExecuteNever() const { return this->GetBits(54, 1) != 0; } + constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; } + constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; } + constexpr ALWAYS_INLINE bool IsGlobal() const { return this->GetBits(11, 1) == 0; } + constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast(this->GetBits(10, 1)); } + constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast(this->GetBits(8, 2)); } + constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast(this->GetBits(2, 3)); } + constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; } + constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; } + constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; } + constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x1; } + constexpr ALWAYS_INLINE bool IsTable() const { return this->GetBits(0, 2) == 0x3; } + + constexpr ALWAYS_INLINE decltype(auto) SetContiguousAllowed(bool en) { this->SetBit(55, !en); return *this; } + constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; } + constexpr ALWAYS_INLINE decltype(auto) SetPrivilegedExecuteNever(bool en) { this->SetBit(53, en); return *this; } + constexpr ALWAYS_INLINE decltype(auto) SetContiguous(bool en) { this->SetBit(52, en); return *this; } + constexpr ALWAYS_INLINE decltype(auto) SetGlobal(bool en) { this->SetBit(11, !en); return *this; } + constexpr ALWAYS_INLINE decltype(auto) SetAccessFlag(AccessFlag f) { this->SetBitsDirect(10, 1, f); return *this; } + constexpr ALWAYS_INLINE decltype(auto) SetShareable(Shareable s) { this->SetBitsDirect(8, 2, s); return *this; } + constexpr ALWAYS_INLINE decltype(auto) SetReadOnly(bool en) { this->SetBit(7, en); return *this; } + constexpr ALWAYS_INLINE decltype(auto) SetUserAccessible(bool en) { this->SetBit(6, en); return *this; } + constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; } + + constexpr ALWAYS_INLINE u64 GetEntryTemplate() const { + constexpr u64 Mask = (0xFFF0000000000FFFul & ~u64(0x3ul | (0x1ul << 52))); + return this->attributes & Mask; + } + + constexpr ALWAYS_INLINE bool Is(u64 attr) const { + return this->attributes == attr; + } + + protected: + constexpr ALWAYS_INLINE u64 GetRawAttributes() const { + return this->attributes; + } + }; + + static_assert(sizeof(PageTableEntry) == sizeof(u64)); + + constexpr inline PageTableEntry InvalidPageTableEntry = PageTableEntry(PageTableEntry::InvalidTag{}); + + constexpr inline size_t MaxPageTableEntries = PageSize / sizeof(PageTableEntry); + + class L1PageTableEntry : public PageTableEntry { + public: + constexpr ALWAYS_INLINE L1PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ } + + constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, bool pxn) + : PageTableEntry((0x3ul << 60) | (static_cast(pxn) << 59) | GetInteger(phys_addr) | 0x3) + { + /* ... */ + } + + constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, bool is_kernel, bool pxn) + : PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast(pxn) << 59) | GetInteger(phys_addr) | 0x3) + { + /* ... */ + } + + constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig) + : PageTableEntry(attr, (static_cast(contig) << 52) | GetInteger(phys_addr) | 0x1) + { + /* ... */ + } + + constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const { + return this->SelectBits(30, 18); + } + + constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const { + return this->SelectBits(12, 36); + } + + constexpr ALWAYS_INLINE bool GetTable(KPhysicalAddress &out) const { + if (this->IsTable()) { + out = this->GetTable(); + return true; + } else { + return false; + } + } + + constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const { + /* Check whether this has the same permission/etc as the desired attributes. */ + return L1PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes(); + } + }; + + class L2PageTableEntry : public PageTableEntry { + public: + constexpr ALWAYS_INLINE L2PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ } + + constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, bool pxn) + : PageTableEntry((0x3ul << 60) | (static_cast(pxn) << 59) | GetInteger(phys_addr) | 0x3) + { + /* ... */ + } + + constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, bool is_kernel, bool pxn) + : PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast(pxn) << 59) | GetInteger(phys_addr) | 0x3) + { + /* ... */ + } + + constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig) + : PageTableEntry(attr, (static_cast(contig) << 52) | GetInteger(phys_addr) | 0x1) + { + /* ... */ + } + + constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const { + return this->SelectBits(21, 27); + } + + constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const { + return this->SelectBits(12, 36); + } + + constexpr ALWAYS_INLINE bool GetTable(KPhysicalAddress &out) const { + if (this->IsTable()) { + out = this->GetTable(); + return true; + } else { + return false; + } + } + + constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const { + /* Check whether this has the same permission/etc as the desired attributes. */ + return L2PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes(); + } + }; + + class L3PageTableEntry : public PageTableEntry { + public: + constexpr ALWAYS_INLINE L3PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ } + + constexpr ALWAYS_INLINE L3PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig) + : PageTableEntry(attr, (static_cast(contig) << 52) | GetInteger(phys_addr) | 0x3) + { + /* ... */ + } + + constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x3; } + + constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const { + return this->SelectBits(12, 36); + } + + constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const { + /* Check whether this has the same permission/etc as the desired attributes. */ + return L3PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes(); + } + }; + + constexpr inline L1PageTableEntry InvalidL1PageTableEntry = L1PageTableEntry(PageTableEntry::InvalidTag{}); + constexpr inline L2PageTableEntry InvalidL2PageTableEntry = L2PageTableEntry(PageTableEntry::InvalidTag{}); + constexpr inline L3PageTableEntry InvalidL3PageTableEntry = L3PageTableEntry(PageTableEntry::InvalidTag{}); + +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp new file mode 100644 index 000000000..c1597bf22 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include + +namespace ams::kern::arch::arm64 { + + class KPageTableImpl { + NON_COPYABLE(KPageTableImpl); + NON_MOVEABLE(KPageTableImpl); + public: + struct TraversalEntry { + KPhysicalAddress phys_addr; + size_t block_size; + }; + + struct TraversalContext { + const L1PageTableEntry *l1_entry; + const L2PageTableEntry *l2_entry; + const L3PageTableEntry *l3_entry; + }; + private: + static constexpr size_t PageBits = __builtin_ctzll(PageSize); + static constexpr size_t NumLevels = 3; + static constexpr size_t LevelBits = 9; + static_assert(NumLevels > 0); + + template + static constexpr ALWAYS_INLINE u64 GetBits(u64 value) { + return (value >> Offset) & ((1ul << Count) - 1); + } + + template + constexpr ALWAYS_INLINE u64 SelectBits(u64 value) { + return value & (((1ul << Count) - 1) << Offset); + } + + static constexpr ALWAYS_INLINE uintptr_t GetL0Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } + static constexpr ALWAYS_INLINE uintptr_t GetL1Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } + static constexpr ALWAYS_INLINE uintptr_t GetL2Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } + static constexpr ALWAYS_INLINE uintptr_t GetL3Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } + + static constexpr ALWAYS_INLINE uintptr_t GetL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1)>(GetInteger(addr)); } + static constexpr ALWAYS_INLINE uintptr_t GetL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2)>(GetInteger(addr)); } + static constexpr ALWAYS_INLINE uintptr_t GetL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3)>(GetInteger(addr)); } + static constexpr ALWAYS_INLINE uintptr_t GetContiguousL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1) + 4>(GetInteger(addr)); } + static constexpr ALWAYS_INLINE uintptr_t GetContiguousL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2) + 4>(GetInteger(addr)); } + static constexpr ALWAYS_INLINE uintptr_t GetContiguousL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3) + 4>(GetInteger(addr)); } + + static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) { + return KMemoryLayout::GetLinearVirtualAddress(addr); + } + + ALWAYS_INLINE bool ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const; + ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const; + ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const; + private: + L1PageTableEntry *table; + bool is_kernel; + u32 num_entries; + public: + ALWAYS_INLINE KVirtualAddress GetTableEntry(KVirtualAddress table, size_t index) const { + return table + index * sizeof(PageTableEntry); + } + + ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KProcessAddress address) const { + return GetPointer(GetTableEntry(KVirtualAddress(this->table), GetL1Index(address) & (this->num_entries - 1))); + } + + ALWAYS_INLINE L2PageTableEntry *GetL2EntryFromTable(KVirtualAddress table, KProcessAddress address) const { + return GetPointer(GetTableEntry(table, GetL2Index(address))); + } + + ALWAYS_INLINE L2PageTableEntry *GetL2Entry(const L1PageTableEntry *entry, KProcessAddress address) const { + return GetL2EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address); + } + + ALWAYS_INLINE L3PageTableEntry *GetL3EntryFromTable(KVirtualAddress table, KProcessAddress address) const { + return GetPointer(GetTableEntry(table, GetL3Index(address))); + } + + ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KProcessAddress address) const { + return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address); + } + public: + constexpr KPageTableImpl() : table(), is_kernel(), num_entries() { /* ... */ } + + NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end); + NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end); + L1PageTableEntry *Finalize(); + + bool BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const; + bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const; + + bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const; + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp new file mode 100644 index 000000000..1376a865f --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern::arch::arm64 { + + class KProcessPageTable { + private: + KPageTable page_table; + public: + constexpr KProcessPageTable() : page_table() { /* ... */ } + + void Activate(u64 id) { + /* Activate the page table with the specified contextidr. */ + this->page_table.Activate(id); + } + + Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) { + return this->page_table.InitializeForProcess(id, as_type, enable_aslr, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager); + } + + void Finalize() { this->page_table.Finalize(); } + + Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) { + return this->page_table.SetMemoryPermission(addr, size, perm); + } + + Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) { + return this->page_table.SetProcessMemoryPermission(addr, size, perm); + } + + Result SetHeapSize(KProcessAddress *out, size_t size) { + return this->page_table.SetHeapSize(out, size); + } + + Result SetMaxHeapSize(size_t size) { + return this->page_table.SetMaxHeapSize(size); + } + + Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const { + return this->page_table.QueryInfo(out_info, out_page_info, addr); + } + + Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { + return this->page_table.MapIo(phys_addr, size, perm); + } + + Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { + return this->page_table.MapStatic(phys_addr, size, perm); + } + + Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) { + return this->page_table.MapRegion(region_type, perm); + } + + Result MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) { + return this->page_table.MapPageGroup(addr, pg, state, perm); + } + + Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { + return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm); + } + + Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) { + return this->page_table.MapPages(out_addr, num_pages, state, perm); + } + + Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) { + return this->page_table.UnmapPages(addr, num_pages, state); + } + + bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const { + return this->page_table.GetPhysicalAddress(out, address); + } + + bool Contains(KProcessAddress addr, size_t size) const { return this->page_table.Contains(addr, size); } + bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return this->page_table.CanContain(addr, size, state); } + + KProcessAddress GetAddressSpaceStart() const { return this->page_table.GetAddressSpaceStart(); } + KProcessAddress GetHeapRegionStart() const { return this->page_table.GetHeapRegionStart(); } + KProcessAddress GetAliasRegionStart() const { return this->page_table.GetAliasRegionStart(); } + KProcessAddress GetStackRegionStart() const { return this->page_table.GetStackRegionStart(); } + KProcessAddress GetKernelMapRegionStart() const { return this->page_table.GetKernelMapRegionStart(); } + + size_t GetAddressSpaceSize() const { return this->page_table.GetAddressSpaceSize(); } + size_t GetHeapRegionSize() const { return this->page_table.GetHeapRegionSize(); } + size_t GetAliasRegionSize() const { return this->page_table.GetAliasRegionSize(); } + size_t GetStackRegionSize() const { return this->page_table.GetStackRegionSize(); } + size_t GetKernelMapRegionSize() const { return this->page_table.GetKernelMapRegionSize(); } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp new file mode 100644 index 000000000..909d504b2 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern::arch::arm64 { + + class KNotAlignedSpinLock { + private: + u32 packed_tickets; + public: + constexpr KNotAlignedSpinLock() : packed_tickets(0) { /* ... */ } + + void Lock() { + u32 tmp0, tmp1; + + __asm__ __volatile__( + " prfm pstl1keep, %[packed_tickets]\n" + "1:\n" + " ldaxr %w[tmp0], %[packed_tickets]\n" + " add %w[tmp0], %w[tmp0], #0x10000\n" + " stxr %w[tmp1], %w[tmp0], %[packed_tickets]\n" + " cbnz %w[tmp1], 1b\n" + " \n" + " and %w[tmp1], %w[tmp0], #0xFFFF\n" + " cmp %w[tmp1], %w[tmp0], lsr #16\n" + " b.eq done" + " sevl\n" + "2:\n" + " wfe\n" + " ldaxrh %w[tmp1], %[packed_tickets]\n" + " cmp %w[tmp1], %w[tmp0], lsr #16\n" + " b.ne 2b\n" + "done:\n" + : [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [packed_tickets]"+Q"(this->packed_tickets) + : + : "cc", "memory" + ); + } + + void Unlock() { + const u32 value = this->packed_tickets + 1; + __asm__ __volatile__( + " stlrh %w[value], %[packed_tickets]\n" + : [packed_tickets]"+Q"(this->packed_tickets) + : [value]"r"(value) + : "memory" + ); + } + }; + static_assert(sizeof(KNotAlignedSpinLock) == sizeof(u32)); + + class KAlignedSpinLock { + private: + alignas(cpu::DataCacheLineSize) u16 current_ticket; + alignas(cpu::DataCacheLineSize) u16 next_ticket; + public: + constexpr KAlignedSpinLock() : current_ticket(0), next_ticket(0) { /* ... */ } + + void Lock() { + u32 tmp0, tmp1, got_lock; + + __asm__ __volatile__( + " prfm pstl1keep, %[next_ticket]\n" + "1:\n" + " ldaxrh %w[tmp0], %[next_ticket]\n" + " add %w[tmp1], %w[tmp0], #0x1\n" + " stxrh %w[got_lock], %w[tmp1], %[next_ticket]\n" + " cbnz %w[got_lock], 1b\n" + " \n" + " sevl\n" + "2:\n" + " wfe\n" + " ldaxrh %w[tmp1], %[current_ticket]\n" + " cmp %w[tmp1], %w[tmp0]\n" + " b.ne 2b\n" + : [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [next_ticket]"+Q"(this->next_ticket) + : [current_ticket]"Q"(this->current_ticket) + : "cc", "memory" + ); + } + + void Unlock() { + const u32 value = this->current_ticket + 1; + __asm__ __volatile__( + " stlrh %w[value], %[current_ticket]\n" + : [current_ticket]"+Q"(this->current_ticket) + : [value]"r"(value) + : "memory" + ); + } + }; + static_assert(sizeof(KAlignedSpinLock) == 2 * cpu::DataCacheLineSize); + + using KSpinLock = KAlignedSpinLock; + +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp new file mode 100644 index 000000000..0a1f71872 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern::arch::arm64 { + + class KSupervisorPageTable { + private: + KPageTable page_table; + u64 ttbr0[cpu::NumCores]; + public: + constexpr KSupervisorPageTable() : page_table(), ttbr0() { /* ... */ } + + NOINLINE void Initialize(s32 core_id); + + void Activate() { + /* Activate, using process id = 0xFFFFFFFF */ + this->page_table.Activate(0xFFFFFFFF); + } + + void ActivateForInit() { + this->Activate(); + + /* Invalidate entire TLB. */ + cpu::InvalidateEntireTlb(); + } + + void Finalize(s32 core_id); + + Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { + return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm); + } + + Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { + return this->page_table.UnmapPages(address, num_pages, state); + } + + Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { + return this->page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm); + } + + Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) { + return this->page_table.UnmapPageGroup(address, pg, state); + } + + bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const { + return this->page_table.GetPhysicalAddress(out, address); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp new file mode 100644 index 000000000..fe46d2f25 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KThread; + +} + +namespace ams::kern::arch::arm64 { + + class KThreadContext { + public: + static constexpr size_t NumCalleeSavedRegisters = (29 - 19) + 1; + static constexpr size_t NumFpuRegisters = 32; + private: + union { + u64 registers[NumCalleeSavedRegisters]; + struct { + u64 x19; + u64 x20; + u64 x21; + u64 x22; + u64 x23; + u64 x24; + u64 x25; + u64 x26; + u64 x27; + u64 x28; + u64 x29; + }; + } callee_saved; + u64 lr; + u64 sp; + u64 cpacr; + u64 fpcr; + u64 fpsr; + alignas(0x10) u128 fpu_registers[NumFpuRegisters]; + bool locked; + private: + static void RestoreFpuRegisters64(const KThreadContext &); + static void RestoreFpuRegisters32(const KThreadContext &); + public: + constexpr explicit KThreadContext() : callee_saved(), lr(), sp(), cpacr(), fpcr(), fpsr(), fpu_registers(), locked() { /* ... */ } + + Result Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main); + Result Finalize(); + + void SetArguments(uintptr_t arg0, uintptr_t arg1); + + static void FpuContextSwitchHandler(KThread *thread); + + /* TODO: More methods (especially FPU management) */ + }; + +} \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp new file mode 100644 index 000000000..db67b0acb --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern::arch::arm64 { + + void UserspaceAccessFunctionAreaBegin(); + + class UserspaceAccess { + public: + static bool CopyMemoryFromUser(void *dst, const void *src, size_t size); + static bool CopyMemoryFromUserAligned32Bit(void *dst, const void *src, size_t size); + static bool CopyMemoryFromUserAligned64Bit(void *dst, const void *src, size_t size); + static bool CopyMemoryFromUserSize32Bit(void *dst, const void *src); + static s32 CopyStringFromUser(void *dst, const void *src, size_t size); + + static bool CopyMemoryToUser(void *dst, const void *src, size_t size); + static bool CopyMemoryToUserAligned32Bit(void *dst, const void *src, size_t size); + static bool CopyMemoryToUserAligned64Bit(void *dst, const void *src, size_t size); + static bool CopyMemoryToUserSize32Bit(void *dst, const void *src); + static s32 CopyStringToUser(void *dst, const void *src, size_t size); + + static bool ClearMemory(void *dst, size_t size); + static bool ClearMemoryAligned32Bit(void *dst, size_t size); + static bool ClearMemoryAligned64Bit(void *dst, size_t size); + static bool ClearMemorySize32Bit(void *dst); + + static bool StoreDataCache(uintptr_t start, uintptr_t end); + static bool FlushDataCache(uintptr_t start, uintptr_t end); + static bool InvalidateDataCache(uintptr_t start, uintptr_t end); + static bool InvalidateInstructionCache(uintptr_t start, uintptr_t end); + + static bool ReadIoMemory32Bit(void *dst, const void *src, size_t size); + static bool ReadIoMemory16Bit(void *dst, const void *src, size_t size); + static bool ReadIoMemory8Bit(void *dst, const void *src, size_t size); + static bool WriteIoMemory32Bit(void *dst, const void *src, size_t size); + static bool WriteIoMemory16Bit(void *dst, const void *src, size_t size); + static bool WriteIoMemory8Bit(void *dst, const void *src, size_t size); + }; + + + void UserspaceAccessFunctionAreaEnd(); + +} \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp new file mode 100644 index 000000000..48e5c8e99 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern::board::nintendo::nx { + + using KDeviceVirtualAddress = u64; + + class KDevicePageTable { + private: + static constexpr size_t TableCount = 4; + private: + KVirtualAddress tables[TableCount]; + u8 table_asids[TableCount]; + u64 attached_device; + u32 attached_value; + u32 detached_value; + u32 hs_attached_value; + u32 hs_detached_value; + private: + static ALWAYS_INLINE KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) { + return KPageTable::GetHeapVirtualAddress(addr); + } + + static ALWAYS_INLINE KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) { + return KPageTable::GetHeapPhysicalAddress(addr); + } + + static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) { + return KPageTable::GetPageTableVirtualAddress(addr); + } + + static ALWAYS_INLINE KPhysicalAddress GetPageTablePhysicalAddress(KVirtualAddress addr) { + return KPageTable::GetPageTablePhysicalAddress(addr); + } + public: + constexpr KDevicePageTable() : tables(), table_asids(), attached_device(), attached_value(), detached_value(), hs_attached_value(), hs_detached_value() { /* ... */ } + + static void Initialize(); + }; + +} \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp new file mode 100644 index 000000000..499159bb7 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern::board::nintendo::nx { + + class KSystemControl { + public: + class Init { + public: + /* Initialization. */ + static size_t GetIntendedMemorySize(); + static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address); + static bool ShouldIncreaseThreadResourceLimit(); + static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg); + static size_t GetApplicationPoolSize(); + static size_t GetAppletPoolSize(); + static size_t GetMinimumNonSecureSystemPoolSize(); + + /* Randomness. */ + static void GenerateRandomBytes(void *dst, size_t size); + static u64 GenerateRandomRange(u64 min, u64 max); + }; + public: + /* Initialization. */ + static NOINLINE void InitializePhase1(); + static NOINLINE void InitializePhase2(); + static NOINLINE u32 GetInitialProcessBinaryPool(); + + /* Randomness. */ + static void GenerateRandomBytes(void *dst, size_t size); + static u64 GenerateRandomRange(u64 min, u64 max); + + /* Privileged Access. */ + static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value); + static void ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value); + + static ALWAYS_INLINE u32 ReadRegisterPrivileged(ams::svc::PhysicalAddress address) { + u32 v; + ReadWriteRegisterPrivileged(std::addressof(v), address, 0x00000000u, 0); + return v; + } + + static ALWAYS_INLINE void WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value) { + u32 v; + ReadWriteRegisterPrivileged(std::addressof(v), address, 0xFFFFFFFFu, value); + } + + /* Power management. */ + static void SleepSystem(); + static NORETURN void StopSystem(); + }; + +} \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp new file mode 100644 index 000000000..09360267f --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +#ifdef ATMOSPHERE_ARCH_ARM64 + #include +#else + #error "Unknown architecture for KInitArguments" +#endif + +namespace ams::kern::init { + + KPhysicalAddress GetInitArgumentsAddress(s32 core_id); + void SetInitArguments(s32 core_id, KPhysicalAddress address, uintptr_t arg); + void StoreInitArguments(); + +} diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_elf.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_elf.hpp index 02aee4a5d..294bee9c6 100644 --- a/libraries/libmesosphere/include/mesosphere/init/kern_init_elf.hpp +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_elf.hpp @@ -17,23 +17,21 @@ #include #ifdef ATMOSPHERE_ARCH_ARM64 - #include "kern_init_elf64.hpp" + #include + + namespace ams::kern::init::Elf { + using namespace ams::kern::init::Elf::Elf64; + + enum RelocationType { + R_ARCHITECTURE_RELATIVE = 0x403, /* Real name R_AARCH64_RELATIVE */ + }; + } #else #error "Unknown Architecture" #endif namespace ams::kern::init::Elf { - #ifdef ATMOSPHERE_ARCH_ARM64 - using namespace ams::kern::init::Elf::Elf64; - - enum RelocationType { - R_ARCHITECTURE_RELATIVE = 0x403, /* Real name R_AARCH64_RELATIVE */ - }; - #else - #error "Unknown Architecture" - #endif - /* API to apply relocations or call init array. */ void ApplyRelocations(uintptr_t base_address, const Dyn *dynamic); void CallInitArrayFuncs(uintptr_t init_array_start, uintptr_t init_array_end); diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_layout.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_layout.hpp index 904defc41..620ef85fe 100644 --- a/libraries/libmesosphere/include/mesosphere/init/kern_init_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_layout.hpp @@ -27,7 +27,7 @@ namespace ams::kern::init { u32 rw_end_offset; u32 bss_offset; u32 bss_end_offset; - u32 ini_end_offset; + u32 ini_load_offset; u32 dynamic_offset; u32 init_array_offset; u32 init_array_end_offset; diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_page_table_select.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_page_table_select.hpp index d13c8e49c..aa9cb5df4 100644 --- a/libraries/libmesosphere/include/mesosphere/init/kern_init_page_table_select.hpp +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_page_table_select.hpp @@ -14,9 +14,16 @@ * along with this program. If not, see . */ #pragma once +#include #ifdef ATMOSPHERE_ARCH_ARM64 - #include "../arch/arm64/init/kern_k_init_page_table.hpp" + #include + + namespace ams::kern::init { + using ams::kern::arch::arm64::PageTableEntry; + using ams::kern::arch::arm64::init::KInitialPageTable; + using ams::kern::arch::arm64::init::KInitialPageAllocator; + } #else #error "Unknown architecture for KInitialPageTable" #endif diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_slab_setup.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_slab_setup.hpp new file mode 100644 index 000000000..8cfdd9f07 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_slab_setup.hpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern::init { + + struct KSlabResourceCounts { + size_t num_KProcess; + size_t num_KThread; + size_t num_KEvent; + size_t num_KInterruptEvent; + size_t num_KPort; + size_t num_KSharedMemory; + size_t num_KTransferMemory; + size_t num_KCodeMemory; + size_t num_KDeviceAddressSpace; + size_t num_KSession; + size_t num_KLightSession; + size_t num_KObjectName; + size_t num_KResourceLimit; + size_t num_KDebug; + }; + + NOINLINE void InitializeSlabResourceCounts(); + const KSlabResourceCounts &GetSlabResourceCounts(); + + size_t CalculateTotalSlabHeapSize(); + NOINLINE void InitializeKPageBufferSlabHeap(); + NOINLINE void InitializeSlabHeaps(); + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_common.hpp b/libraries/libmesosphere/include/mesosphere/kern_common.hpp index e69de29bb..db9679f6b 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_common.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_common.hpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + constexpr size_t PageSize = 4_KB; + +} + +#if 1 +#define MESOSPHERE_BUILD_FOR_AUDITING +#endif + +#ifdef MESOSPHERE_BUILD_FOR_AUDITING +#define MESOSPHERE_BUILD_FOR_DEBUGGING +#endif + +#ifdef MESOSPHERE_BUILD_FOR_DEBUGGING +#define MESOSPHERE_ENABLE_ASSERTIONS +#define MESOSPHERE_ENABLE_DEBUG_PRINT +#endif + +#include diff --git a/libraries/libmesosphere/include/mesosphere/kern_debug_log.hpp b/libraries/libmesosphere/include/mesosphere/kern_debug_log.hpp new file mode 100644 index 000000000..94aaabf66 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_debug_log.hpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KDebugLog { + private: + static NOINLINE void VSNPrintf(char *dst, const size_t dst_size, const char *format, ::std::va_list vl); + public: + static NOINLINE void Initialize(); + + static NOINLINE void Printf(const char *format, ...) __attribute__((format(printf, 1, 2))); + static NOINLINE void VPrintf(const char *format, ::std::va_list vl); + }; + +} + +#ifndef MESOSPHERE_DEBUG_LOG_SELECTED + + #ifdef ATMOSPHERE_BOARD_NINTENDO_NX + #define MESOSPHERE_DEBUG_LOG_USE_UART_C + #else + #error "Unknown board for Default Debug Log Source" + #endif + + #define MESOSPHERE_DEBUG_LOG_SELECTED + +#endif + +#define MESOSPHERE_RELEASE_LOG(fmt, ...) ::ams::kern::KDebugLog::Printf((fmt), ## __VA_ARGS__) +#define MESOSPHERE_RELEASE_VLOG(fmt, vl) ::ams::kern::KDebugLog::VPrintf((fmt), (vl)) + +#ifdef MESOSPHERE_ENABLE_DEBUG_PRINT +#define MESOSPHERE_LOG(fmt, ...) MESOSPHERE_RELEASE_LOG((fmt), ## __VA_ARGS__) +#define MESOSPHERE_VLOG(fmt, vl) MESOSPHERE_RELEASE_VLOG((fmt), (vl)) +#else +#define MESOSPHERE_LOG(fmt, ...) do { MESOSPHERE_UNUSED(fmt); MESOSPHERE_UNUSED(__VA_ARGS__); } while (0) +#define MESOSPHERE_VLOG(fmt, vl) do { MESOSPHERE_UNUSED(fmt); MESOSPHERE_UNUSED(vl); } while (0) +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp index e4674aa1a..e5e6c2f40 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp @@ -14,8 +14,8 @@ * along with this program. If not, see . */ #pragma once -#include -#include "kern_panic.hpp" +#include +#include namespace ams::kern { @@ -29,4 +29,10 @@ namespace ams::kern { u32 reserved; }; + NOINLINE void CopyInitialProcessBinaryToKernelMemory(); + NOINLINE void CreateAndRunInitialProcesses(); + + u64 GetInitialProcessIdMin(); + u64 GetInitialProcessIdMax(); + } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_address_arbiter.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_address_arbiter.hpp new file mode 100644 index 000000000..14f23b4e4 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_address_arbiter.hpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KAddressArbiter { + public: + using ThreadTree = KConditionVariable::ThreadTree; + private: + ThreadTree tree; + public: + constexpr KAddressArbiter() : tree() { /* ... */ } + + Result SignalToAddress(uintptr_t addr, ams::svc::SignalType type, s32 value, s32 count) { + switch (type) { + case ams::svc::SignalType_Signal: + return this->Signal(addr, count); + case ams::svc::SignalType_SignalAndIncrementIfEqual: + return this->SignalAndIncrementIfEqual(addr, value, count); + case ams::svc::SignalType_SignalAndModifyByWaitingCountIfEqual: + return this->SignalAndModifyByWaitingCountIfEqual(addr, value, count); + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + + Result WaitForAddress(uintptr_t addr, ams::svc::ArbitrationType type, s32 value, s64 timeout) { + switch (type) { + case ams::svc::ArbitrationType_WaitIfLessThan: + return this->WaitIfLessThan(addr, value, false, timeout); + case ams::svc::ArbitrationType_DecrementAndWaitIfLessThan: + return this->WaitIfLessThan(addr, value, true, timeout); + case ams::svc::ArbitrationType_WaitIfEqual: + return this->WaitIfEqual(addr, value, timeout); + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + private: + Result Signal(uintptr_t addr, s32 count); + Result SignalAndIncrementIfEqual(uintptr_t addr, s32 value, s32 count); + Result SignalAndModifyByWaitingCountIfEqual(uintptr_t addr, s32 value, s32 count); + Result WaitIfLessThan(uintptr_t addr, s32 value, bool decrement, s64 timeout); + Result WaitIfEqual(uintptr_t addr, s32 value, s64 timeout); + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp new file mode 100644 index 000000000..0457381f6 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +namespace ams::kern { + + + + struct KAddressSpaceInfo { + public: + enum Type { + Type_32Bit = 0, + Type_Small64Bit = 1, + Type_Large64Bit = 2, + Type_Heap = 3, + Type_Stack = 4, + Type_Alias = 5, + + Type_Count, + }; + private: + size_t bit_width; + size_t address; + size_t size; + Type type; + public: + static uintptr_t GetAddressSpaceStart(size_t width, Type type); + static size_t GetAddressSpaceSize(size_t width, Type type); + + constexpr KAddressSpaceInfo(size_t bw, size_t a, size_t s, Type t) : bit_width(bw), address(a), size(s), type(t) { /* ... */ } + + constexpr size_t GetWidth() const { return this->bit_width; } + constexpr size_t GetAddress() const { return this->address; } + constexpr size_t GetSize() const { return this->size; } + constexpr Type GetType() const { return this->type; } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_affinity_mask.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_affinity_mask.hpp new file mode 100644 index 000000000..a3a263fba --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_affinity_mask.hpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KAffinityMask { + private: + static constexpr u64 AllowedAffinityMask = (1ul << cpu::NumCores) - 1; + private: + u64 mask; + private: + static constexpr ALWAYS_INLINE u64 GetCoreBit(s32 core) { + MESOSPHERE_ASSERT(0 <= core && core < static_cast(cpu::NumCores)); + return (1ul << core); + } + public: + constexpr ALWAYS_INLINE KAffinityMask() : mask(0) { MESOSPHERE_ASSERT_THIS(); } + + constexpr ALWAYS_INLINE u64 GetAffinityMask() const { return this->mask; } + + constexpr ALWAYS_INLINE void SetAffinityMask(u64 new_mask) { + MESOSPHERE_ASSERT((new_mask & ~AllowedAffinityMask) == 0); + this->mask = new_mask; + } + + constexpr ALWAYS_INLINE bool GetAffinity(s32 core) const { + return this->mask & GetCoreBit(core); + } + + constexpr ALWAYS_INLINE void SetAffinity(s32 core, bool set) { + MESOSPHERE_ASSERT(0 <= core && core < static_cast(cpu::NumCores)); + + if (set) { + this->mask |= GetCoreBit(core); + } else { + this->mask &= ~GetCoreBit(core); + } + } + + constexpr ALWAYS_INLINE void SetAll() { + this->mask = AllowedAffinityMask; + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp new file mode 100644 index 000000000..460dbe0f3 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KProcess; + + #define MESOSPHERE_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \ + NON_COPYABLE(CLASS); \ + NON_MOVEABLE(CLASS); \ + private: \ + friend class ::ams::kern::KClassTokenGenerator; \ + static constexpr inline auto ObjectType = ::ams::kern::KClassTokenGenerator::ObjectType::CLASS; \ + static constexpr inline const char * const TypeName = #CLASS; \ + static constexpr inline ClassTokenType ClassToken() { return ::ams::kern::ClassToken; } \ + public: \ + using BaseClass = BASE_CLASS; \ + static constexpr ALWAYS_INLINE TypeObj GetStaticTypeObj() { \ + constexpr ClassTokenType Token = ClassToken(); \ + return TypeObj(TypeName, Token); \ + } \ + static constexpr ALWAYS_INLINE const char *GetStaticTypeName() { return TypeName; } \ + virtual TypeObj GetTypeObj() const { return GetStaticTypeObj(); } \ + virtual const char *GetTypeName() { return GetStaticTypeName(); } \ + private: + + + class KAutoObject { + protected: + class TypeObj { + private: + const char *name; + ClassTokenType class_token; + public: + constexpr explicit TypeObj(const char *n, ClassTokenType tok) : name(n), class_token(tok) { /* ... */ } + + constexpr ALWAYS_INLINE const char *GetName() const { return this->name; } + constexpr ALWAYS_INLINE ClassTokenType GetClassToken() const { return this->class_token; } + + constexpr ALWAYS_INLINE bool operator==(const TypeObj &rhs) { + return this->GetClassToken() == rhs.GetClassToken(); + } + + constexpr ALWAYS_INLINE bool operator!=(const TypeObj &rhs) { + return this->GetClassToken() != rhs.GetClassToken(); + } + + constexpr ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) { + return (this->GetClassToken() | rhs.GetClassToken()) == this->GetClassToken(); + } + }; + private: + MESOSPHERE_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject); + private: + std::atomic ref_count; + public: + static KAutoObject *Create(KAutoObject *ptr); + public: + constexpr ALWAYS_INLINE explicit KAutoObject() : ref_count(0) { MESOSPHERE_ASSERT_THIS(); } + virtual ~KAutoObject() { MESOSPHERE_ASSERT_THIS(); } + + /* Destroy is responsible for destroying the auto object's resources when ref_count hits zero. */ + virtual void Destroy() { MESOSPHERE_ASSERT_THIS(); } + + /* Finalize is responsible for cleaning up resource, but does not destroy the object. */ + virtual void Finalize() { MESOSPHERE_ASSERT_THIS(); } + + virtual KProcess *GetOwner() const { return nullptr; } + + u32 GetReferenceCount() const { + return this->ref_count; + } + + ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) const { + return this->GetTypeObj().IsDerivedFrom(rhs); + } + + ALWAYS_INLINE bool IsDerivedFrom(const KAutoObject &rhs) const { + return this->IsDerivedFrom(rhs.GetTypeObj()); + } + + template + ALWAYS_INLINE Derived DynamicCast() { + static_assert(std::is_pointer::value); + using DerivedType = typename std::remove_pointer::type; + + if (AMS_LIKELY(this->IsDerivedFrom(DerivedType::GetStaticTypeObj()))) { + return static_cast(this); + } else { + return nullptr; + } + } + + template + ALWAYS_INLINE const Derived DynamicCast() const { + static_assert(std::is_pointer::value); + using DerivedType = typename std::remove_pointer::type; + + if (AMS_LIKELY(this->IsDerivedFrom(DerivedType::GetStaticTypeObj()))) { + return static_cast(this); + } else { + return nullptr; + } + } + + ALWAYS_INLINE bool Open() { + MESOSPHERE_ASSERT_THIS(); + + /* Atomically increment the reference count, only if it's positive. */ + u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire); + do { + if (AMS_UNLIKELY(cur_ref_count == 0)) { + return false; + } + MESOSPHERE_ABORT_UNLESS(cur_ref_count < cur_ref_count + 1); + } while (!this->ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, std::memory_order_relaxed)); + + return true; + } + + ALWAYS_INLINE void Close() { + MESOSPHERE_ASSERT_THIS(); + + /* Atomically decrement the reference count, not allowing it to become negative. */ + u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire); + do { + MESOSPHERE_ABORT_UNLESS(cur_ref_count > 0); + } while (!this->ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, std::memory_order_relaxed)); + + /* If ref count hits zero, destroy the object. */ + if (cur_ref_count - 1 == 0) { + this->Destroy(); + } + } + }; + + class KAutoObjectWithListContainer; + + class KAutoObjectWithList : public KAutoObject { + private: + friend class KAutoObjectWithListContainer; + private: + util::IntrusiveRedBlackTreeNode list_node; + public: + static ALWAYS_INLINE int Compare(const KAutoObjectWithList &lhs, const KAutoObjectWithList &rhs) { + const u64 lid = lhs.GetId(); + const u64 rid = rhs.GetId(); + + if (lid < rid) { + return -1; + } else if (lid > rid) { + return 1; + } else { + return 0; + } + } + public: + virtual u64 GetId() const { + return reinterpret_cast(this); + } + }; + + template + class KScopedAutoObject { + static_assert(std::is_base_of::value); + NON_COPYABLE(KScopedAutoObject); + private: + T *obj; + private: + constexpr ALWAYS_INLINE void Swap(KScopedAutoObject &rhs) { + /* TODO: C++20 constexpr std::swap */ + T *tmp = rhs.obj; + rhs.obj = this->obj; + this->obj = tmp; + } + public: + constexpr ALWAYS_INLINE KScopedAutoObject() : obj(nullptr) { /* ... */ } + constexpr ALWAYS_INLINE KScopedAutoObject(T *o) : obj(o) { + if (this->obj != nullptr) { + this->obj->Open(); + } + } + + ALWAYS_INLINE ~KScopedAutoObject() { + if (this->obj != nullptr) { + this->obj->Close(); + } + this->obj = nullptr; + } + + constexpr ALWAYS_INLINE KScopedAutoObject(KScopedAutoObject &&rhs) { + this->obj = rhs.obj; + rhs.obj = nullptr; + } + + constexpr ALWAYS_INLINE KScopedAutoObject &operator=(KScopedAutoObject &&rhs) { + rhs.Swap(*this); + return *this; + } + + constexpr ALWAYS_INLINE T *operator->() { return this->obj; } + constexpr ALWAYS_INLINE T &operator*() { return *this->obj; } + + constexpr ALWAYS_INLINE void Reset(T *o) { + KScopedAutoObject(o).Swap(*this); + } + + constexpr ALWAYS_INLINE bool IsNull() const { return this->obj == nullptr; } + constexpr ALWAYS_INLINE bool IsNotNull() const { return this->obj != nullptr; } + }; + + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp new file mode 100644 index 000000000..3d501bdf1 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KAutoObjectWithListContainer { + NON_COPYABLE(KAutoObjectWithListContainer); + NON_MOVEABLE(KAutoObjectWithListContainer); + private: + using ListType = util::IntrusiveRedBlackTreeMemberTraits<&KAutoObjectWithList::list_node>::TreeType; + public: + class ListAccessor : public KScopedLightLock { + private: + ListType &list; + public: + explicit ListAccessor(KAutoObjectWithListContainer *container) : KScopedLightLock(container->lock), list(container->object_list) { /* ... */ } + explicit ListAccessor(KAutoObjectWithListContainer &container) : KScopedLightLock(container.lock), list(container.object_list) { /* ... */ } + + typename ListType::iterator begin() const { + return this->list.begin(); + } + + typename ListType::iterator end() const { + return this->list.end(); + } + + typename ListType::iterator find(typename ListType::const_reference ref) const { + return this->list.find(ref); + } + }; + + friend class ListAccessor; + private: + KLightLock lock; + ListType object_list; + public: + constexpr KAutoObjectWithListContainer() : lock(), object_list() { MESOSPHERE_ASSERT_THIS(); } + + void Initialize() { MESOSPHERE_ASSERT_THIS(); } + void Finalize() { MESOSPHERE_ASSERT_THIS(); } + + Result Register(KAutoObjectWithList *obj); + Result Unregister(KAutoObjectWithList *obj); + size_t GetOwnedCount(KProcess *owner); + }; + + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp new file mode 100644 index 000000000..721c145f8 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class KCapabilities { + private: + static constexpr size_t SvcFlagCount = svc::NumSupervisorCalls / BITSIZEOF(u8); + static constexpr size_t IrqFlagCount = /* TODO */0x80; + + enum class CapabilityType : u32 { + CorePriority = (1u << 3) - 1, + SyscallMask = (1u << 4) - 1, + MapRange = (1u << 6) - 1, + MapIoPage = (1u << 7) - 1, + MapRegion = (1u << 10) - 1, + InterruptPair = (1u << 11) - 1, + ProgramType = (1u << 13) - 1, + KernelVersion = (1u << 14) - 1, + HandleTable = (1u << 15) - 1, + DebugFlags = (1u << 16) - 1, + + Invalid = 0u, + Padding = ~0u, + }; + + using RawCapabilityValue = util::BitPack32::Field<0, BITSIZEOF(util::BitPack32), u32>; + + static constexpr CapabilityType GetCapabilityType(const util::BitPack32 cap) { + const u32 value = cap.Get(); + return static_cast((~value & (value + 1)) - 1); + } + + static constexpr u32 GetCapabilityFlag(CapabilityType type) { + return static_cast(type) + 1; + } + + static constexpr u32 CountTrailingZero(u32 flag) { + for (u32 i = 0; i < BITSIZEOF(u32); i++) { + if (flag & (1u << i)) { + return i; + } + } + return BITSIZEOF(u32); + } + + static constexpr u32 GetCapabilityId(CapabilityType type) { + const u32 flag = GetCapabilityFlag(type); + if (true /* C++20: std::is_constant_evaluated() */) { + return CountTrailingZero(flag); + } else { + return static_cast(__builtin_ctz(flag)); + } + } + + template + using Field = util::BitPack32::Field; + + #define DEFINE_FIELD(name, prev, ...) using name = Field + + template + static constexpr inline u32 CapabilityFlag = []() -> u32 { + return static_cast(Type) + 1; + }(); + + template + static constexpr inline u32 CapabilityId = []() -> u32 { + const u32 flag = static_cast(Type) + 1; + if (true /* C++20: std::is_constant_evaluated() */) { + for (u32 i = 0; i < BITSIZEOF(u32); i++) { + if (flag & (1u << i)) { + return i; + } + } + return BITSIZEOF(u32); + } else { + return __builtin_ctz(flag); + } + }(); + + struct CorePriority { + using IdBits = Field<0, CapabilityId + 1>; + + DEFINE_FIELD(LowestThreadPriority, IdBits, 6); + DEFINE_FIELD(HighestThreadPriority, LowestThreadPriority, 6); + DEFINE_FIELD(MinimumCoreId, HighestThreadPriority, 8); + DEFINE_FIELD(MaximumCoreId, MinimumCoreId, 8); + }; + + struct SyscallMask { + using IdBits = Field<0, CapabilityId + 1>; + + DEFINE_FIELD(Mask, IdBits, 24); + DEFINE_FIELD(Index, Mask, 3); + }; + + static constexpr u64 PhysicalMapAllowedMask = (1ul << 36) - 1; + + struct MapRange { + using IdBits = Field<0, CapabilityId + 1>; + + DEFINE_FIELD(Address, IdBits, 24); + DEFINE_FIELD(ReadOnly, Address, 1, bool); + }; + + struct MapRangeSize { + using IdBits = Field<0, CapabilityId + 1>; + + DEFINE_FIELD(Pages, IdBits, 20); + DEFINE_FIELD(Reserved, Pages, 4); + DEFINE_FIELD(Normal, Reserved, 1, bool); + }; + + struct MapIoPage { + using IdBits = Field<0, CapabilityId + 1>; + + DEFINE_FIELD(Address, IdBits, 24); + }; + + enum class RegionType : u32 { + None = 0, + KernelTraceBuffer = 1, + OnMemoryBootImage = 2, + DTB = 3, + }; + + struct MapRegion { + using IdBits = Field<0, CapabilityId + 1>; + + DEFINE_FIELD(Region0, IdBits, 6, RegionType); + DEFINE_FIELD(ReadOnly0, Region0, 1, bool); + DEFINE_FIELD(Region1, ReadOnly0, 6, RegionType); + DEFINE_FIELD(ReadOnly1, Region1, 1, bool); + DEFINE_FIELD(Region2, ReadOnly1, 6, RegionType); + DEFINE_FIELD(ReadOnly2, Region2, 1, bool); + }; + + static const u32 PaddingInterruptId = 0x3FF; + + struct InterruptPair { + using IdBits = Field<0, CapabilityId + 1>; + + DEFINE_FIELD(InterruptId0, IdBits, 10); + DEFINE_FIELD(InterruptId1, InterruptId0, 10); + }; + + + struct ProgramType { + using IdBits = Field<0, CapabilityId + 1>; + + DEFINE_FIELD(Type, IdBits, 3); + DEFINE_FIELD(Reserved, Type, 15); + }; + + struct KernelVersion { + using IdBits = Field<0, CapabilityId + 1>; + + DEFINE_FIELD(MinorVersion, IdBits, 4); + DEFINE_FIELD(MajorVersion, MinorVersion, 13); + }; + + struct HandleTable { + using IdBits = Field<0, CapabilityId + 1>; + + DEFINE_FIELD(Size, IdBits, 10); + DEFINE_FIELD(Reserved, Size, 6); + }; + + struct DebugFlags { + using IdBits = Field<0, CapabilityId + 1>; + + DEFINE_FIELD(AllowDebug, IdBits, 1, bool); + DEFINE_FIELD(ForceDebug, AllowDebug, 1, bool); + DEFINE_FIELD(Reserved, ForceDebug, 13); + }; + + #undef DEFINE_FIELD + + static constexpr u32 InitializeOnceFlags = CapabilityFlag | + CapabilityFlag | + CapabilityFlag | + CapabilityFlag | + CapabilityFlag; + private: + u8 svc_access_flags[SvcFlagCount]{}; + u8 irq_access_flags[IrqFlagCount]{}; + u64 core_mask{}; + u64 priority_mask{}; + util::BitPack32 debug_capabilities; + s32 handle_table_size{}; + util::BitPack32 intended_kernel_version; + u32 program_type{}; + private: + static constexpr ALWAYS_INLINE void SetSvcAllowedImpl(u8 *data, u32 id) { + constexpr size_t BitsPerWord = BITSIZEOF(*data); + MESOSPHERE_ASSERT(id < svc::SvcId_Count); + data[id / BitsPerWord] |= (1ul << (id % BitsPerWord)); + } + + static constexpr ALWAYS_INLINE void ClearSvcAllowedImpl(u8 *data, u32 id) { + constexpr size_t BitsPerWord = BITSIZEOF(*data); + MESOSPHERE_ASSERT(id < svc::SvcId_Count); + data[id / BitsPerWord] &= ~(1ul << (id % BitsPerWord)); + } + + bool SetSvcAllowed(u32 id) { + if (id < BITSIZEOF(this->svc_access_flags)) { + SetSvcAllowedImpl(this->svc_access_flags, id); + return true; + } else { + return false; + } + } + + bool SetInterruptAllowed(u32 id) { + constexpr size_t BitsPerWord = BITSIZEOF(this->irq_access_flags[0]); + if (id < BITSIZEOF(this->irq_access_flags)) { + this->irq_access_flags[id / BitsPerWord] = (1ul << (id % BitsPerWord)); + return true; + } else { + return false; + } + } + + Result SetCorePriorityCapability(const util::BitPack32 cap); + Result SetSyscallMaskCapability(const util::BitPack32 cap, u32 &set_svc); + Result MapRange(const util::BitPack32 cap, const util::BitPack32 size_cap, KProcessPageTable *page_table); + Result MapIoPage(const util::BitPack32 cap, KProcessPageTable *page_table); + Result MapRegion(const util::BitPack32 cap, KProcessPageTable *page_table); + Result SetInterruptPairCapability(const util::BitPack32 cap); + Result SetProgramTypeCapability(const util::BitPack32 cap); + Result SetKernelVersionCapability(const util::BitPack32 cap); + Result SetHandleTableCapability(const util::BitPack32 cap); + Result SetDebugFlagsCapability(const util::BitPack32 cap); + + Result SetCapability(const util::BitPack32 cap, u32 &set_flags, u32 &set_svc, KProcessPageTable *page_table); + Result SetCapabilities(const u32 *caps, s32 num_caps, KProcessPageTable *page_table); + public: + constexpr KCapabilities() : debug_capabilities(0), intended_kernel_version(0) { /* ... */ } + + Result Initialize(const u32 *caps, s32 num_caps, KProcessPageTable *page_table); + + constexpr u64 GetCoreMask() const { return this->core_mask; } + constexpr u64 GetPriorityMask() const { return this->priority_mask; } + constexpr s32 GetHandleTableSize() const { return this->handle_table_size; } + + ALWAYS_INLINE void CopySvcPermissionsTo(KThread::StackParameters &sp) const { + static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags)); + + /* Clear specific SVCs based on our state. */ + ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); + ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState); + if (sp.is_preemption_state_pinned) { + ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo); + } + } + + /* TODO: Member functions. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_class_token.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_class_token.hpp new file mode 100644 index 000000000..c37c88f1a --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_class_token.hpp @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KAutoObject; + + class KClassTokenGenerator { + public: + using TokenBaseType = u16; + public: + static constexpr size_t BaseClassBits = 8; + static constexpr size_t FinalClassBits = (sizeof(TokenBaseType) * CHAR_BIT) - BaseClassBits; + /* One bit per base class. */ + static constexpr size_t NumBaseClasses = BaseClassBits; + /* Final classes are permutations of three bits. */ + static constexpr size_t NumFinalClasses = [] { + TokenBaseType index = 0; + for (size_t i = 0; i < FinalClassBits; i++) { + for (size_t j = i + 1; j < FinalClassBits; j++) { + for (size_t k = j + 1; k < FinalClassBits; k++) { + index++; + } + } + } + return index; + }(); + private: + template + static constexpr inline TokenBaseType BaseClassToken = BIT(Index); + + template + static constexpr inline TokenBaseType FinalClassToken = [] { + TokenBaseType index = 0; + for (size_t i = 0; i < FinalClassBits; i++) { + for (size_t j = i + 1; j < FinalClassBits; j++) { + for (size_t k = j + 1; k < FinalClassBits; k++) { + if ((index++) == Index) { + return ((1ul << i) | (1ul << j) | (1ul << k)) << BaseClassBits; + } + } + } + } + __builtin_unreachable(); + }(); + + template + static constexpr inline TokenBaseType GetClassToken() { + static_assert(std::is_base_of::value); + if constexpr (std::is_same::value) { + static_assert(T::ObjectType == ObjectType::KAutoObject); + return 0; + } else if constexpr (!std::is_final::value) { + static_assert(ObjectType::BaseClassesStart <= T::ObjectType && T::ObjectType < ObjectType::BaseClassesEnd); + constexpr auto ClassIndex = static_cast(T::ObjectType) - static_cast(ObjectType::BaseClassesStart); + return BaseClassToken | GetClassToken(); + } else if constexpr (ObjectType::FinalClassesStart <= T::ObjectType && T::ObjectType < ObjectType::FinalClassesEnd) { + constexpr auto ClassIndex = static_cast(T::ObjectType) - static_cast(ObjectType::FinalClassesStart); + return FinalClassToken | GetClassToken(); + } else { + static_assert(!std::is_same::value, "GetClassToken: Invalid Type"); + } + }; + public: + enum class ObjectType { + KAutoObject, + + BaseClassesStart, + + KSynchronizationObject = BaseClassesStart, + KReadableEvent, + + BaseClassesEnd, + + FinalClassesStart = BaseClassesEnd, + + KInterruptEvent = FinalClassesStart, + KDebug, + KThread, + KServerPort, + KServerSession, + KClientPort, + KClientSession, + KProcess, + KResourceLimit, + KLightSession, + KPort, + KSession, + KSharedMemory, + KEvent, + KWritableEvent, + KLightClientSession, + KLightServerSession, + KTransferMemory, + KDeviceAddressSpace, + KSessionRequest, + KCodeMemory, + + FinalClassesEnd = FinalClassesStart + NumFinalClasses, + }; + + template + static constexpr inline TokenBaseType ClassToken = GetClassToken(); + }; + + using ClassTokenType = KClassTokenGenerator::TokenBaseType; + + template + static constexpr inline ClassTokenType ClassToken = KClassTokenGenerator::ClassToken; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_code_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_code_memory.hpp new file mode 100644 index 000000000..5f3f30afa --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_code_memory.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KCodeMemory final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject); + public: + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_condition_variable.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_condition_variable.hpp new file mode 100644 index 000000000..85462c611 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_condition_variable.hpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + struct KConditionVariableComparator { + static constexpr ALWAYS_INLINE int Compare(const KThread &lhs, const KThread &rhs) { + const uintptr_t l_key = lhs.GetConditionVariableKey(); + const uintptr_t r_key = rhs.GetConditionVariableKey(); + + if (l_key < r_key) { + /* Sort first by key */ + return -1; + } else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) { + /* And then by priority. */ + return -1; + } else { + return 1; + } + } + }; + + class KConditionVariable { + public: + using ThreadTree = util::IntrusiveRedBlackTreeMemberTraits<&KThread::condvar_arbiter_tree_node>::TreeType; + private: + ThreadTree tree; + public: + constexpr KConditionVariable() : tree() { /* ... */ } + + /* Arbitration. */ + Result SignalToAddress(KProcessAddress addr); + Result WaitForAddress(ams::svc::Handle handle, KProcessAddress addr, u32 value); + + /* Condition variable. */ + void Signal(uintptr_t cv_key, s32 count); + Result Wait(KProcessAddress addr, uintptr_t key, u32 value, s64 timeout); + + ALWAYS_INLINE void BeforeUpdatePriority(KThread *thread) { + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + this->tree.erase(this->tree.iterator_to(*thread)); + } + + ALWAYS_INLINE void AfterUpdatePriority(KThread *thread) { + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + this->tree.insert(*thread); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp new file mode 100644 index 000000000..8b4b74a15 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ams::kern { + + struct KCoreLocalContext { + KCurrentContext current; + KScheduler scheduler; + KInterruptTaskManager interrupt_task_manager; + KInterruptManager interrupt_manager; + KHardwareTimer hardware_timer; + /* Everything after this point is for debugging. */ + /* Retail kernel doesn't even consistently update these fields. */ + u64 num_sw_interrupts; + u64 num_hw_interrupts; + std::atomic num_svc; + u64 num_process_switches; + u64 num_thread_switches; + u64 num_fpu_switches; + u64 num_scheduler_updates; + u64 num_invoked_scheduler_updates; + std::atomic num_specific_svc[0x80]; + u32 perf_counters[6]; + }; + static_assert(sizeof(KCoreLocalContext) < PageSize); + + struct KCoreLocalPage { + KCoreLocalContext context; + u8 padding[PageSize - sizeof(KCoreLocalContext)]; + }; + static_assert(sizeof(KCoreLocalPage) == PageSize); + + struct KCoreLocalRegion { + KCoreLocalPage current; + KCoreLocalPage absolute[cpu::NumCores]; + }; + static_assert(sizeof(KCoreLocalRegion) == PageSize * (1 + cpu::NumCores)); + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp new file mode 100644 index 000000000..ae8a7947d --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KThread; + class KProcess; + class KScheduler; + class KInterruptTaskManager; + + struct KCurrentContext { + std::atomic current_thread; + std::atomic current_process; + KScheduler *scheduler; + KInterruptTaskManager *interrupt_task_manager; + s32 core_id; + void *exception_stack_top; + }; + static_assert(std::is_pod::value); + static_assert(sizeof(KCurrentContext) <= cpu::DataCacheLineSize); + + namespace impl { + + ALWAYS_INLINE KCurrentContext &GetCurrentContext() { + return *reinterpret_cast(cpu::GetCoreLocalRegionAddress()); + } + + } + + ALWAYS_INLINE KThread *GetCurrentThreadPointer() { + return impl::GetCurrentContext().current_thread.load(std::memory_order_relaxed); + } + + ALWAYS_INLINE KThread &GetCurrentThread() { + return *GetCurrentThreadPointer(); + } + + ALWAYS_INLINE KProcess *GetCurrentProcessPointer() { + return impl::GetCurrentContext().current_process.load(std::memory_order_relaxed); + } + + ALWAYS_INLINE KProcess &GetCurrentProcess() { + return *GetCurrentProcessPointer(); + } + + ALWAYS_INLINE KScheduler *GetCurrentSchedulerPointer() { + return impl::GetCurrentContext().scheduler; + } + + ALWAYS_INLINE KScheduler &GetCurrentScheduler() { + return *GetCurrentSchedulerPointer(); + } + + ALWAYS_INLINE KInterruptTaskManager *GetCurrentInterruptTaskManagerPointer() { + return impl::GetCurrentContext().interrupt_task_manager; + } + + ALWAYS_INLINE KInterruptTaskManager &GetCurrentInterruptTaskManager() { + return *GetCurrentInterruptTaskManagerPointer(); + } + + ALWAYS_INLINE s32 GetCurrentCoreId() { + return impl::GetCurrentContext().core_id; + } + + ALWAYS_INLINE void SetCurrentThread(KThread *new_thread) { + impl::GetCurrentContext().current_thread = new_thread; + } + + ALWAYS_INLINE void SetCurrentProcess(KProcess *new_process) { + impl::GetCurrentContext().current_process = new_process; + } + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp new file mode 100644 index 000000000..b1515b379 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KDebugBase : public KSynchronizationObject { + public: + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_device_address_space.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_device_address_space.hpp new file mode 100644 index 000000000..74fe90922 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_device_address_space.hpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class KDeviceAddressSpace final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KDeviceAddressSpace, KAutoObject); + public: + static void Initialize(); + + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_dpc_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_dpc_manager.hpp new file mode 100644 index 000000000..2b720140f --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_dpc_manager.hpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KDpcManager { + private: + static constexpr s32 DpcManagerNormalThreadPriority = 59; + static constexpr s32 DpcManagerPreemptionThreadPriority = 63; + + static_assert(ams::svc::HighestThreadPriority <= DpcManagerNormalThreadPriority && DpcManagerNormalThreadPriority <= ams::svc::LowestThreadPriority); + static_assert(ams::svc::HighestThreadPriority <= DpcManagerPreemptionThreadPriority && DpcManagerPreemptionThreadPriority <= ams::svc::LowestThreadPriority); + private: + static NOINLINE void Initialize(s32 core_id, s32 priority); + public: + static void Initialize() { + const s32 core_id = GetCurrentCoreId(); + if (core_id == static_cast(cpu::NumCores) - 1) { + Initialize(core_id, DpcManagerPreemptionThreadPriority); + } else { + Initialize(core_id, DpcManagerNormalThreadPriority); + } + } + + static NOINLINE void HandleDpc(); + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp new file mode 100644 index 000000000..2b8e8fbd2 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + namespace impl { + + class DynamicSlabHeapPage { + private: + u8 buffer[PageSize]; + }; + static_assert(sizeof(DynamicSlabHeapPage) == PageSize); + + }; + + template + class KDynamicSlabHeap { + NON_COPYABLE(KDynamicSlabHeap); + NON_MOVEABLE(KDynamicSlabHeap); + private: + using Impl = impl::KSlabHeapImpl; + using PageBuffer = impl::DynamicSlabHeapPage; + private: + Impl impl; + KDynamicSlabHeap *next_allocator; + std::atomic used; + std::atomic peak; + std::atomic count; + KVirtualAddress address; + size_t size; + private: + ALWAYS_INLINE Impl *GetImpl() { + return std::addressof(this->impl); + } + ALWAYS_INLINE const Impl *GetImpl() const { + return std::addressof(this->impl); + } + public: + constexpr KDynamicSlabHeap() : impl(), next_allocator(), used(), peak(), count(), address(), size() { /* ... */ } + + constexpr KVirtualAddress GetAddress() const { return this->address; } + constexpr size_t GetSize() const { return this->size; } + constexpr size_t GetUsed() const { return this->used; } + constexpr size_t GetPeak() const { return this->peak; } + constexpr size_t GetCount() const { return this->count; } + + constexpr bool IsInRange(KVirtualAddress addr) const { + return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1; + } + + void Initialize(KVirtualAddress memory, size_t sz) { + /* Set tracking fields. */ + this->address = memory; + this->count = sz / sizeof(T); + this->size = this->count * sizeof(T); + + /* Free blocks to memory. */ + u8 *cur = GetPointer(this->address + this->size); + for (size_t i = 0; i < this->count; i++) { + cur -= sizeof(T); + this->GetImpl()->Free(cur); + } + } + + void Initialize(KDynamicSlabHeap *next) { + this->next_allocator = next; + this->address = next->GetAddress(); + this->size = next->GetSize(); + } + + T *Allocate() { + T *allocated = reinterpret_cast(this->GetImpl()->Allocate()); + + /* If we fail to allocate, try to get a new page from our next allocator. */ + if (AMS_UNLIKELY(allocated == nullptr)) { + if (this->next_allocator != nullptr) { + allocated = reinterpret_cast(this->next_allocator->Allocate()); + if (allocated != nullptr) { + /* If we succeeded in getting a page, free the rest to our slab. */ + for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) { + this->GetImpl()->Free(allocated + i); + } + this->count += sizeof(PageBuffer) / sizeof(T); + } + } + } + + if (AMS_LIKELY(allocated != nullptr)) { + /* Construct the object. */ + new (allocated) T(); + + /* Update our tracking. */ + size_t used = ++this->used; + size_t peak = this->peak; + while (peak < used) { + if (this->peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) { + break; + } + } + } + + return allocated; + } + + void Free(T *t) { + this->GetImpl()->Free(t); + --this->used; + } + }; + + class KDynamicPageManager : public KDynamicSlabHeap{}; + class KBlockInfoManager : public KDynamicSlabHeap{}; + class KMemoryBlockSlabManager : public KDynamicSlabHeap{}; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp new file mode 100644 index 000000000..f1db211e1 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class KEvent final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KEvent, KAutoObject); + public: + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_event_info.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_event_info.hpp new file mode 100644 index 000000000..c458da021 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_event_info.hpp @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KEventInfo : public KSlabAllocated, public util::IntrusiveListBaseNode { + public: + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_exception_context.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_exception_context.hpp new file mode 100644 index 000000000..e07ce69eb --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_exception_context.hpp @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +#ifdef ATMOSPHERE_ARCH_ARM64 + #include + + namespace ams::kern { + using ams::kern::arch::arm64::KExceptionContext; + } +#else + #error "Unknown architecture for KExceptionContext" +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp new file mode 100644 index 000000000..6f8ceeebc --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp @@ -0,0 +1,315 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include + +namespace ams::kern { + + constexpr ALWAYS_INLINE util::BitPack32 GetHandleBitPack(ams::svc::Handle handle) { + return util::BitPack32{handle}; + } + + class KProcess; + class KThread; + + class KHandleTable { + NON_COPYABLE(KHandleTable); + NON_MOVEABLE(KHandleTable); + public: + static constexpr size_t MaxTableSize = 1024; + private: + using HandleRawValue = util::BitPack32::Field<0, BITSIZEOF(u32), u32>; + using HandleEncoded = util::BitPack32::Field<0, BITSIZEOF(ams::svc::Handle), ams::svc::Handle>; + + using HandleIndex = util::BitPack32::Field<0, 15, u16>; + using HandleLinearId = util::BitPack32::Field; + using HandleReserved = util::BitPack32::Field; + + static constexpr u16 MinLinearId = 1; + static constexpr u16 MaxLinearId = util::BitPack32{std::numeric_limits::max()}.Get(); + + static constexpr ALWAYS_INLINE ams::svc::Handle EncodeHandle(u16 index, u16 linear_id) { + util::BitPack32 pack = {0}; + pack.Set(index); + pack.Set(linear_id); + pack.Set(0); + return pack.Get(); + } + + class Entry { + private: + union { + struct { + u16 linear_id; + u16 type; + } info; + Entry *next_free_entry; + } meta; + KAutoObject *object; + public: + constexpr Entry() : meta(), object(nullptr) { /* ... */ } + + constexpr ALWAYS_INLINE void SetFree(Entry *next) { + this->object = nullptr; + this->meta.next_free_entry = next; + } + + constexpr ALWAYS_INLINE void SetUsed(KAutoObject *obj, u16 linear_id, u16 type) { + this->object = obj; + this->meta.info = { linear_id, type }; + } + + constexpr ALWAYS_INLINE KAutoObject *GetObject() const { return this->object; } + constexpr ALWAYS_INLINE Entry *GetNextFreeEntry() const { return this->meta.next_free_entry; } + constexpr ALWAYS_INLINE u16 GetLinearId() const { return this->meta.info.linear_id; } + constexpr ALWAYS_INLINE u16 GetType() const { return this->meta.info.type; } + }; + private: + mutable KSpinLock lock; + Entry *table; + Entry *free_head; + Entry entries[MaxTableSize]; + u16 table_size; + u16 max_count; + u16 next_linear_id; + u16 count; + public: + constexpr KHandleTable() : + lock(), table(nullptr), free_head(nullptr), entries(), table_size(0), max_count(0), next_linear_id(MinLinearId), count(0) + { MESOSPHERE_ASSERT_THIS(); } + + constexpr NOINLINE Result Initialize(s32 size) { + MESOSPHERE_ASSERT_THIS(); + + R_UNLESS(size <= static_cast(MaxTableSize), svc::ResultOutOfMemory()); + + /* Initialize all fields. */ + this->table = this->entries; + this->table_size = (size <= 0) ? MaxTableSize : size; + this->next_linear_id = MinLinearId; + this->count = 0; + this->max_count = 0; + + /* Free all entries. */ + for (size_t i = 0; i < static_cast(this->table_size - 1); i++) { + this->entries[i].SetFree(std::addressof(this->entries[i + 1])); + } + this->entries[this->table_size - 1].SetFree(nullptr); + + this->free_head = std::addressof(this->entries[0]); + + return ResultSuccess(); + } + + constexpr ALWAYS_INLINE size_t GetTableSize() const { return this->table_size; } + constexpr ALWAYS_INLINE size_t GetCount() const { return this->count; } + constexpr ALWAYS_INLINE size_t GetMaxCount() const { return this->max_count; } + + NOINLINE Result Finalize(); + NOINLINE bool Remove(ams::svc::Handle handle); + + template + ALWAYS_INLINE KScopedAutoObject GetObject(ams::svc::Handle handle) const { + MESOSPHERE_ASSERT_THIS(); + + /* Handle pseudo-handles. */ + if constexpr (std::is_same::value) { + if (handle == ams::svc::PseudoHandle::CurrentProcess) { + return GetCurrentProcessPointer(); + } + } else if constexpr (std::is_same::value) { + if (handle == ams::svc::PseudoHandle::CurrentThread) { + return GetCurrentThreadPointer(); + } + } + + /* Lock and look up in table. */ + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + if constexpr (std::is_same::value) { + return this->GetObjectImpl(handle); + } else { + return this->GetObjectImpl(handle)->DynamicCast(); + } + } + + template + ALWAYS_INLINE KScopedAutoObject GetObjectForIpc(ams::svc::Handle handle) const { + static_assert(!std::is_base_of::value); + + /* Handle pseudo-handles. */ + if constexpr (std::is_same::value) { + if (handle == ams::svc::PseudoHandle::CurrentProcess) { + return GetCurrentProcessPointer(); + } + } else if constexpr (std::is_same::value) { + if (handle == ams::svc::PseudoHandle::CurrentThread) { + return GetCurrentThreadPointer(); + } + } + + /* Lock and look up in table. */ + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + KAutoObject *obj = this->GetObjectImpl(handle); + if (obj->DynamicCast() != nullptr) { + return nullptr; + } + if constexpr (std::is_same::value) { + return obj; + } else { + return obj->DynamicCast(); + } + } + + ALWAYS_INLINE KScopedAutoObject GetObjectByIndex(ams::svc::Handle *out_handle, size_t index) const { + MESOSPHERE_ASSERT_THIS(); + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + return this->GetObjectByIndexImpl(out_handle, index); + } + + NOINLINE Result Reserve(ams::svc::Handle *out_handle); + NOINLINE void Unreserve(ams::svc::Handle handle); + + template + ALWAYS_INLINE Result Add(ams::svc::Handle *out_handle, T *obj) { + static_assert(std::is_base_of::value); + return this->Add(out_handle, obj, obj->GetTypeObj().GetClassToken()); + } + + template + ALWAYS_INLINE void Register(ams::svc::Handle handle, T *obj) { + static_assert(std::is_base_of::value); + return this->Add(handle, obj, obj->GetTypeObj().GetClassToken()); + } + private: + NOINLINE Result Add(ams::svc::Handle *out_handle, KAutoObject *obj, u16 type); + NOINLINE void Register(ams::svc::Handle handle, KAutoObject *obj, u16 type); + + constexpr ALWAYS_INLINE Entry *AllocateEntry() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this->count < this->table_size); + + Entry *entry = this->free_head; + this->free_head = entry->GetNextFreeEntry(); + + this->count++; + this->max_count = std::max(this->max_count, this->count); + + return entry; + } + + constexpr ALWAYS_INLINE void FreeEntry(Entry *entry) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this->count > 0); + + entry->SetFree(this->free_head); + this->free_head = entry; + + this->count--; + } + + constexpr ALWAYS_INLINE u16 AllocateLinearId() { + const u16 id = this->next_linear_id++; + if (this->next_linear_id > MaxLinearId) { + this->next_linear_id = MinLinearId; + } + return id; + } + + constexpr ALWAYS_INLINE size_t GetEntryIndex(Entry *entry) { + const size_t index = entry - this->table; + MESOSPHERE_ASSERT(index < this->table_size); + return index; + } + + constexpr ALWAYS_INLINE Entry *FindEntry(ams::svc::Handle handle) const { + MESOSPHERE_ASSERT_THIS(); + + /* Unpack the handle. */ + const auto handle_pack = GetHandleBitPack(handle); + const auto raw_value = handle_pack.Get(); + const auto index = handle_pack.Get(); + const auto linear_id = handle_pack.Get(); + const auto reserved = handle_pack.Get(); + MESOSPHERE_ASSERT(reserved == 0); + + /* Validate our indexing information. */ + if (raw_value == 0) { + return nullptr; + } + if (linear_id == 0) { + return nullptr; + } + if (index >= this->table_size) { + return nullptr; + } + + /* Get the entry, and ensure our serial id is correct. */ + Entry *entry = std::addressof(this->table[index]); + if (entry->GetObject() == nullptr) { + return nullptr; + } + if (entry->GetLinearId() != linear_id) { + return nullptr; + } + + return entry; + } + + constexpr NOINLINE KAutoObject *GetObjectImpl(ams::svc::Handle handle) const { + MESOSPHERE_ASSERT_THIS(); + + /* Handles must not have reserved bits set. */ + if (GetHandleBitPack(handle).Get() != 0) { + return nullptr; + } + + if (Entry *entry = this->FindEntry(handle); entry != nullptr) { + return entry->GetObject(); + } else { + return nullptr; + } + } + + constexpr NOINLINE KAutoObject *GetObjectByIndexImpl(ams::svc::Handle *out_handle, size_t index) const { + MESOSPHERE_ASSERT_THIS(); + + /* Index must be in bounds. */ + if (index >= this->table_size || this->table == nullptr) { + return nullptr; + } + + /* Ensure entry has an object. */ + Entry *entry = std::addressof(this->table[index]); + if (entry->GetObject() == nullptr) { + return nullptr; + } + + *out_handle = EncodeHandle(index, entry->GetLinearId()); + return entry->GetObject(); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_hardware_timer_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_hardware_timer_base.hpp new file mode 100644 index 000000000..c7f29758e --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_hardware_timer_base.hpp @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KHardwareTimerBase { + private: + using TimerTaskTree = util::IntrusiveRedBlackTreeBaseTraits::TreeType; + private: + KSpinLock lock; + TimerTaskTree task_tree; + KTimerTask *next_task; + public: + constexpr ALWAYS_INLINE KHardwareTimerBase() : lock(), task_tree(), next_task(nullptr) { /* ... */ } + private: + ALWAYS_INLINE void RemoveTaskFromTree(KTimerTask *task) { + /* Erase from the tree. */ + auto it = this->task_tree.erase(this->task_tree.iterator_to(*task)); + + /* Clear the task's scheduled time. */ + task->SetTime(0); + + /* Update our next task if relevant. */ + if (this->next_task == task) { + this->next_task = (it != this->task_tree.end()) ? std::addressof(*it) : nullptr; + } + } + public: + NOINLINE void CancelTask(KTimerTask *task) { + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + if (const s64 task_time = task->GetTime(); task_time > 0) { + this->RemoveTaskFromTree(task); + } + } + protected: + ALWAYS_INLINE KSpinLock &GetLock() { return this->lock; } + + ALWAYS_INLINE s64 DoInterruptTaskImpl(s64 cur_time) { + /* We want to handle all tasks, returning the next time that a task is scheduled. */ + while (true) { + /* Get the next task. If there isn't one, return 0. */ + KTimerTask *task = this->next_task; + if (task == nullptr) { + return 0; + } + + /* If the task needs to be done in the future, do it in the future and not now. */ + if (const s64 task_time = task->GetTime(); task_time > cur_time) { + return task_time; + } + + /* Remove the task from the tree of tasks, and update our next task. */ + this->RemoveTaskFromTree(task); + + /* Handle the task. */ + task->OnTimer(); + } + } + + ALWAYS_INLINE bool RegisterAbsoluteTaskImpl(KTimerTask *task, s64 task_time) { + MESOSPHERE_ASSERT(task_time > 0); + + /* Set the task's time, and insert it into our tree. */ + task->SetTime(task_time); + this->task_tree.insert(*task); + + /* Update our next task if relevant. */ + if (this->next_task != nullptr && this->next_task->GetTime() <= task_time) { + return false; + } + this->next_task = task; + return true; + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp new file mode 100644 index 000000000..bde2dddb3 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KInitialProcessHeader { + private: + static constexpr u32 Magic = util::FourCC<'K','I','P','1'>::Code; + private: + u32 magic; + u8 name[12]; + u64 program_id; + u32 version; + u8 priority; + u8 ideal_core_id; + u8 _1E; + u8 flags; + u32 rx_address; + u32 rx_size; + u32 rx_compressed_size; + u32 affinity_mask; + u32 ro_address; + u32 ro_size; + u32 ro_compressed_size; + u32 stack_size; + u32 rw_address; + u32 rw_size; + u32 rw_compressed_size; + u32 _4C; + u32 bss_address; + u32 bss_size; + u32 pad[(0x80 - 0x58) / sizeof(u32)]; + u32 capabilities[0x80 / sizeof(u32)]; + public: + constexpr bool IsValid() const { return this->magic == Magic; } + + constexpr void GetName(char *dst, size_t size) const { + std::memset(dst, 0, size); + std::memcpy(dst, this->name, std::min(sizeof(this->name), size)); + } + + constexpr const u32 *GetCapabilities() const { return this->capabilities; } + constexpr size_t GetNumCapabilities() const { return util::size(this->capabilities); } + + constexpr u64 GetProgramId() const { return this->program_id; } + constexpr u32 GetVersion() const { return this->version; } + constexpr u8 GetPriority() const { return this->priority; } + constexpr u8 GetIdealCoreId() const { return this->ideal_core_id; } + + constexpr bool IsRxCompressed() const { return (this->flags & (1 << 0)); } + constexpr bool IsRoCompressed() const { return (this->flags & (1 << 1)); } + constexpr bool IsRwCompressed() const { return (this->flags & (1 << 2)); } + constexpr bool Is64Bit() const { return (this->flags & (1 << 3)); } + constexpr bool Is64BitAddressSpace() const { return (this->flags & (1 << 4)); } + constexpr bool UsesSecureMemory() const { return (this->flags & (1 << 5)); } + + constexpr u32 GetRxAddress() const { return this->rx_address; } + constexpr u32 GetRxSize() const { return this->rx_size; } + constexpr u32 GetRxCompressedSize() const { return this->rx_compressed_size; } + constexpr u32 GetRoAddress() const { return this->ro_address; } + constexpr u32 GetRoSize() const { return this->ro_size; } + constexpr u32 GetRoCompressedSize() const { return this->ro_compressed_size; } + constexpr u32 GetRwAddress() const { return this->rw_address; } + constexpr u32 GetRwSize() const { return this->rw_size; } + constexpr u32 GetRwCompressedSize() const { return this->rw_compressed_size; } + constexpr u32 GetBssAddress() const { return this->bss_address; } + constexpr u32 GetBssSize() const { return this->bss_size; } + + constexpr u32 GetAffinityMask() const { return this->affinity_mask; } + constexpr u32 GetStackSize() const { return this->stack_size; } + }; + static_assert(sizeof(KInitialProcessHeader) == 0x100); + + class KInitialProcessReader { + private: + KInitialProcessHeader *kip_header; + public: + constexpr KInitialProcessReader() : kip_header() { /* ... */ } + + constexpr const u32 *GetCapabilities() const { return this->kip_header->GetCapabilities(); } + constexpr size_t GetNumCapabilities() const { return this->kip_header->GetNumCapabilities(); } + + constexpr size_t GetBinarySize() const { + return sizeof(*kip_header) + this->kip_header->GetRxCompressedSize() + this->kip_header->GetRoCompressedSize() + this->kip_header->GetRwCompressedSize(); + } + + constexpr size_t GetSize() const { + if (const size_t bss_size = this->kip_header->GetBssSize(); bss_size != 0) { + return this->kip_header->GetBssAddress() + this->kip_header->GetBssSize(); + } else { + return this->kip_header->GetRwAddress() + this->kip_header->GetRwSize(); + } + } + + constexpr u8 GetPriority() const { return this->kip_header->GetPriority(); } + constexpr u8 GetIdealCoreId() const { return this->kip_header->GetIdealCoreId(); } + constexpr u32 GetAffinityMask() const { return this->kip_header->GetAffinityMask(); } + constexpr u32 GetStackSize() const { return this->kip_header->GetStackSize(); } + + constexpr bool Is64Bit() const { return this->kip_header->Is64Bit(); } + constexpr bool Is64BitAddressSpace() const { return this->kip_header->Is64BitAddressSpace(); } + constexpr bool UsesSecureMemory() const { return this->kip_header->UsesSecureMemory(); } + + bool Attach(u8 *bin) { + if (KInitialProcessHeader *header = reinterpret_cast(bin); header->IsValid()) { + this->kip_header = header; + return true; + } else { + return false; + } + } + + Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const; + Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const; + Result SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const; + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_event.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_event.hpp new file mode 100644 index 000000000..15f102087 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_event.hpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include + +namespace ams::kern { + + class KInterruptEventTask; + + class KInterruptEvent final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KInterruptEvent, KReadableEvent); + public: + /* TODO: This is a placeholder definition. */ + }; + + class KInterruptEventTask : public KSlabAllocated, public KInterruptTask { + public: + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task.hpp new file mode 100644 index 000000000..01b0fae6b --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task.hpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +namespace ams::kern { + + class KInterruptTask; + + class KInterruptHandler { + public: + virtual KInterruptTask *OnInterrupt(s32 interrupt_id) = 0; + }; + + class KInterruptTask : public KInterruptHandler { + private: + KInterruptTask *next_task; + public: + constexpr ALWAYS_INLINE KInterruptTask() : next_task(nullptr) { /* ... */ } + + constexpr ALWAYS_INLINE KInterruptTask *GetNextTask() const { + return this->next_task; + } + + constexpr ALWAYS_INLINE void SetNextTask(KInterruptTask *t) { + this->next_task = t; + } + + virtual void DoTask() = 0; + }; + + static ALWAYS_INLINE KInterruptTask *GetDummyInterruptTask() { + return reinterpret_cast(1); + } + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp new file mode 100644 index 000000000..eaeec87d7 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KThread; + + class KInterruptTaskManager { + private: + class TaskQueue { + private: + KInterruptTask *head; + KInterruptTask *tail; + public: + constexpr TaskQueue() : head(nullptr), tail(nullptr) { /* ... */ } + + constexpr KInterruptTask *GetHead() { return this->head; } + constexpr bool IsEmpty() const { return this->head == nullptr; } + constexpr void Clear() { this->head = nullptr; this->tail = nullptr; } + + void Enqueue(KInterruptTask *task); + void Dequeue(); + }; + private: + TaskQueue task_queue; + KThread *thread; + private: + static void ThreadFunction(uintptr_t arg); + void ThreadFunctionImpl(); + public: + constexpr KInterruptTaskManager() : task_queue(), thread(nullptr) { /* ... */ } + + constexpr KThread *GetThread() const { return this->thread; } + + NOINLINE void Initialize(); + void EnqueueTask(KInterruptTask *task); + + /* TODO: Actually implement KInterruptTaskManager. This is a placeholder. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_condition_variable.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_condition_variable.hpp new file mode 100644 index 000000000..a05e387a3 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_condition_variable.hpp @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include + +namespace ams::kern { + + class KLightConditionVariable { + private: + KThreadQueue thread_queue; + public: + constexpr ALWAYS_INLINE KLightConditionVariable() : thread_queue() { /* ... */ } + private: + void WaitImpl(KLightLock *lock, s64 timeout) { + KThread *owner = GetCurrentThreadPointer(); + KHardwareTimer *timer; + + /* Sleep the thread. */ + { + KScopedSchedulerLockAndSleep lk(&timer, owner, timeout); + lock->Unlock(); + + if (!this->thread_queue.SleepThread(owner)) { + lk.CancelSleep(); + return; + } + } + + /* Cancel the task that the sleep setup. */ + if (timer != nullptr) { + timer->CancelTask(owner); + } + } + public: + void Wait(KLightLock *lock, s64 timeout = -1ll) { + this->WaitImpl(lock, timeout); + lock->Lock(); + } + + void Broadcast() { + KScopedSchedulerLock lk; + while (this->thread_queue.WakeupFrontThread() != nullptr) { + /* We want to signal all threads, and so should continue waking up until there's nothing to wake. */ + } + } + + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp new file mode 100644 index 000000000..0c0f601a7 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class KLightLock { + private: + std::atomic tag; + public: + constexpr KLightLock() : tag(0) { /* ... */ } + + void Lock() { + MESOSPHERE_ASSERT_THIS(); + + const uintptr_t cur_thread = reinterpret_cast(GetCurrentThreadPointer()); + + while (true) { + uintptr_t old_tag = this->tag.load(std::memory_order_relaxed); + + while (!this->tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, std::memory_order_acquire)) { + /* ... */ + } + + if ((old_tag == 0) || ((old_tag | 1) == (cur_thread | 1))) { + break; + } + + this->LockSlowPath(old_tag | 1, cur_thread); + } + } + + void Unlock() { + MESOSPHERE_ASSERT_THIS(); + + const uintptr_t cur_thread = reinterpret_cast(GetCurrentThreadPointer()); + uintptr_t expected = cur_thread; + if (!this->tag.compare_exchange_weak(expected, 0, std::memory_order_release)) { + this->UnlockSlowPath(cur_thread); + } + } + + void LockSlowPath(uintptr_t owner, uintptr_t cur_thread); + void UnlockSlowPath(uintptr_t cur_thread); + + bool IsLocked() const { return this->tag != 0; } + bool IsLockedByCurrentThread() const { return (this->tag | 0x1ul) == (reinterpret_cast(GetCurrentThreadPointer()) | 0x1ul); } + }; + + using KScopedLightLock = KScopedLock; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp new file mode 100644 index 000000000..5edd88999 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KLightSession final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KLightSession, KAutoObject); + public: + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp new file mode 100644 index 000000000..478008056 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KLinkedListNode : public util::IntrusiveListBaseNode, public KSlabAllocated { + private: + void *item; + public: + constexpr KLinkedListNode() : util::IntrusiveListBaseNode(), item(nullptr) { MESOSPHERE_ASSERT_THIS(); } + + constexpr void Initialize(void *it) { + MESOSPHERE_ASSERT_THIS(); + this->item = it; + } + + constexpr void *GetItem() const { + return this->item; + } + }; + static_assert(sizeof(KLinkedListNode) == sizeof(util::IntrusiveListNode) + sizeof(void *)); + + template + class KLinkedList : private util::IntrusiveListBaseTraits::ListType { + private: + using BaseList = util::IntrusiveListBaseTraits::ListType; + public: + template + class Iterator; + + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + using pointer = value_type *; + using const_pointer = const value_type *; + using reference = value_type &; + using const_reference = const value_type &; + using iterator = Iterator; + using const_iterator = Iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + template + class Iterator { + private: + using BaseIterator = BaseList::Iterator; + friend class KLinkedList; + public: + using iterator_category = std::bidirectional_iterator_tag; + using value_type = typename KLinkedList::value_type; + using difference_type = typename KLinkedList::difference_type; + using pointer = typename std::conditional::type; + using reference = typename std::conditional::type; + private: + BaseIterator base_it; + public: + explicit Iterator(BaseIterator it) : base_it(it) { /* ... */ } + + pointer GetItem() const { + return static_cast(this->base_it->GetItem()); + } + + bool operator==(const Iterator &rhs) const { + return this->base_it == rhs.base_it; + } + + bool operator!=(const Iterator &rhs) const { + return !(*this == rhs); + } + + pointer operator->() const { + return this->GetItem(); + } + + reference operator*() const { + return *this->GetItem(); + } + + Iterator &operator++() { + ++this->base_it; + return *this; + } + + Iterator &operator--() { + --this->base_it; + return *this; + } + + Iterator operator++(int) { + const Iterator it{*this}; + ++(*this); + return it; + } + + Iterator operator--(int) { + const Iterator it{*this}; + --(*this); + return it; + } + + operator Iterator() const { + return Iterator(this->base_it); + } + }; + public: + constexpr KLinkedList() : BaseList() { /* ... */ } + + /* Iterator accessors. */ + iterator begin() { + return iterator(BaseList::begin()); + } + + const_iterator begin() const { + return const_iterator(BaseList::begin()); + } + + iterator end() { + return iterator(BaseList::end()); + } + + const_iterator end() const { + return const_iterator(BaseList::end()); + } + + const_iterator cbegin() const { + return this->begin(); + } + + const_iterator cend() const { + return this->end(); + } + + reverse_iterator rbegin() { + return reverse_iterator(this->end()); + } + + const_reverse_iterator rbegin() const { + return const_reverse_iterator(this->end()); + } + + reverse_iterator rend() { + return reverse_iterator(this->begin()); + } + + const_reverse_iterator rend() const { + return const_reverse_iterator(this->begin()); + } + + const_reverse_iterator crbegin() const { + return this->rbegin(); + } + + const_reverse_iterator crend() const { + return this->rend(); + } + + /* Content management. */ + using BaseList::empty; + using BaseList::size; + + reference back() { + return *(--this->end()); + } + + const_reference back() const { + return *(--this->end()); + } + + reference front() { + return *this->begin(); + } + + const_reference front() const { + return *this->begin(); + } + + iterator insert(const_iterator pos, reference ref) { + KLinkedListNode *node = KLinkedListNode::Allocate(); + MESOSPHERE_ABORT_UNLESS(node != nullptr); + node->Initialize(std::addressof(ref)); + return iterator(BaseList::insert(pos.base_it, *node)); + } + + void push_back(reference ref) { + this->insert(this->end(), ref); + } + + void push_front(reference ref) { + this->insert(this->begin(), ref); + } + + void pop_back() { + this->erase(--this->end()); + } + + void pop_front() { + this->erase(this->begin()); + } + + iterator erase(const iterator pos) { + KLinkedListNode *freed_node = std::addressof(*pos.base_it); + iterator ret = iterator(BaseList::erase(pos.base_it)); + KLinkedListNode::Free(freed_node); + + return ret; + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp new file mode 100644 index 000000000..7162cdb81 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + enum KMemoryState : u32 { + KMemoryState_None = 0, + KMemoryState_Mask = 0xFF, + KMemoryState_All = ~KMemoryState_None, + + KMemoryState_FlagCanReprotect = (1 << 8), + KMemoryState_FlagCanDebug = (1 << 9), + KMemoryState_FlagCanUseIpc = (1 << 10), + KMemoryState_FlagCanUseNonDeviceIpc = (1 << 11), + KMemoryState_FlagCanUseNonSecureIpc = (1 << 12), + KMemoryState_FlagMapped = (1 << 13), + KMemoryState_FlagCode = (1 << 14), + KMemoryState_FlagCanAlias = (1 << 15), + KMemoryState_FlagCanCodeAlias = (1 << 16), + KMemoryState_FlagCanTransfer = (1 << 17), + KMemoryState_FlagCanQueryPhysical = (1 << 18), + KMemoryState_FlagCanDeviceMap = (1 << 19), + KMemoryState_FlagCanAlignedDeviceMap = (1 << 20), + KMemoryState_FlagCanIpcUserBuffer = (1 << 21), + KMemoryState_FlagReferenceCounted = (1 << 22), + KMemoryState_FlagCanMapProcess = (1 << 23), + KMemoryState_FlagCanChangeAttribute = (1 << 24), + KMemoryState_FlagCanCodeMemory = (1 << 25), + + KMemoryState_FlagsData = KMemoryState_FlagCanReprotect | KMemoryState_FlagCanUseIpc | + KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc | + KMemoryState_FlagMapped | KMemoryState_FlagCanAlias | + KMemoryState_FlagCanTransfer | KMemoryState_FlagCanQueryPhysical | + KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap | + KMemoryState_FlagCanIpcUserBuffer | KMemoryState_FlagReferenceCounted | + KMemoryState_FlagCanChangeAttribute, + + KMemoryState_FlagsCode = KMemoryState_FlagCanDebug | KMemoryState_FlagCanUseIpc | + KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc | + KMemoryState_FlagMapped | KMemoryState_FlagCode | + KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap | + KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagReferenceCounted, + + KMemoryState_FlagsMisc = KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | + KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap, + + + KMemoryState_Free = ams::svc::MemoryState_Free, + KMemoryState_Io = ams::svc::MemoryState_Io | KMemoryState_FlagMapped, + KMemoryState_Static = ams::svc::MemoryState_Static | KMemoryState_FlagMapped | KMemoryState_FlagCanQueryPhysical, + KMemoryState_Code = ams::svc::MemoryState_Code | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess, + KMemoryState_CodeData = ams::svc::MemoryState_CodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeMemory, + KMemoryState_Normal = ams::svc::MemoryState_Normal | KMemoryState_FlagsData | KMemoryState_FlagCanCodeMemory, + KMemoryState_Shared = ams::svc::MemoryState_Shared | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted, + + /* KMemoryState_Alias was removed after 1.0.0. */ + + KMemoryState_AliasCode = ams::svc::MemoryState_AliasCode | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias, + KMemoryState_AliasCodeData = ams::svc::MemoryState_AliasCodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias | KMemoryState_FlagCanCodeMemory, + + + KMemoryState_Ipc = ams::svc::MemoryState_Ipc | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap + | KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc, + + KMemoryState_Stack = ams::svc::MemoryState_Stack | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap + | KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc, + + KMemoryState_ThreadLocal = ams::svc::MemoryState_ThreadLocal | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted, + + KMemoryState_Transfered = ams::svc::MemoryState_Transfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagCanChangeAttribute + | KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc, + + KMemoryState_SharedTransfered = ams::svc::MemoryState_SharedTransfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap + | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc, + + KMemoryState_SharedCode = ams::svc::MemoryState_SharedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted + | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc, + + KMemoryState_Inaccessible = ams::svc::MemoryState_Inaccessible, + + KMemoryState_NonSecureIpc = ams::svc::MemoryState_NonSecureIpc | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap + | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc, + + KMemoryState_NonDeviceIpc = ams::svc::MemoryState_NonDeviceIpc | KMemoryState_FlagsMisc | KMemoryState_FlagCanUseNonDeviceIpc, + + + KMemoryState_Kernel = ams::svc::MemoryState_Kernel | KMemoryState_FlagMapped, + + KMemoryState_GeneratedCode = ams::svc::MemoryState_GeneratedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDebug, + KMemoryState_CodeOut = ams::svc::MemoryState_CodeOut | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted, + }; + +#if 1 + static_assert(KMemoryState_Free == 0x00000000); + static_assert(KMemoryState_Io == 0x00002001); + static_assert(KMemoryState_Static == 0x00042002); + static_assert(KMemoryState_Code == 0x00DC7E03); + static_assert(KMemoryState_CodeData == 0x03FEBD04); + static_assert(KMemoryState_Normal == 0x037EBD05); + static_assert(KMemoryState_Shared == 0x00402006); + + static_assert(KMemoryState_AliasCode == 0x00DD7E08); + static_assert(KMemoryState_AliasCodeData == 0x03FFBD09); + static_assert(KMemoryState_Ipc == 0x005C3C0A); + static_assert(KMemoryState_Stack == 0x005C3C0B); + static_assert(KMemoryState_ThreadLocal == 0x0040200C); + static_assert(KMemoryState_Transfered == 0x015C3C0D); + static_assert(KMemoryState_SharedTransfered == 0x005C380E); + static_assert(KMemoryState_SharedCode == 0x0040380F); + static_assert(KMemoryState_Inaccessible == 0x00000010); + static_assert(KMemoryState_NonSecureIpc == 0x005C3811); + static_assert(KMemoryState_NonDeviceIpc == 0x004C2812); + static_assert(KMemoryState_Kernel == 0x00002013); + static_assert(KMemoryState_GeneratedCode == 0x00402214); + static_assert(KMemoryState_CodeOut == 0x00402015); +#endif + + enum KMemoryPermission : u8 { + KMemoryPermission_None = 0, + KMemoryPermission_All = static_cast(~KMemoryPermission_None), + + KMemoryPermission_KernelShift = 3, + + KMemoryPermission_KernelRead = ams::svc::MemoryPermission_Read << KMemoryPermission_KernelShift, + KMemoryPermission_KernelWrite = ams::svc::MemoryPermission_Write << KMemoryPermission_KernelShift, + KMemoryPermission_KernelExecute = ams::svc::MemoryPermission_Execute << KMemoryPermission_KernelShift, + + KMemoryPermission_KernelReadWrite = KMemoryPermission_KernelRead | KMemoryPermission_KernelWrite, + KMemoryPermission_KernelReadExecute = KMemoryPermission_KernelRead | KMemoryPermission_KernelExecute, + + KMemoryPermission_UserRead = ams::svc::MemoryPermission_Read | KMemoryPermission_KernelRead, + KMemoryPermission_UserWrite = ams::svc::MemoryPermission_Write | KMemoryPermission_KernelWrite, + KMemoryPermission_UserExecute = ams::svc::MemoryPermission_Execute, + + KMemoryPermission_UserReadWrite = KMemoryPermission_UserRead | KMemoryPermission_UserWrite, + KMemoryPermission_UserReadExecute = KMemoryPermission_UserRead | KMemoryPermission_UserExecute, + + KMemoryPermission_UserMask = ams::svc::MemoryPermission_Read | ams::svc::MemoryPermission_Write | ams::svc::MemoryPermission_Execute, + }; + + constexpr KMemoryPermission ConvertToKMemoryPermission(ams::svc::MemoryPermission perm) { + return static_cast((perm & KMemoryPermission_UserMask) | KMemoryPermission_KernelRead | ((perm & KMemoryPermission_UserWrite) << KMemoryPermission_KernelShift)); + } + + enum KMemoryAttribute : u8 { + KMemoryAttribute_None = 0x00, + KMemoryAttribute_Mask = 0x7F, + KMemoryAttribute_All = KMemoryAttribute_Mask, + KMemoryAttribute_DontCareMask = 0x80, + + KMemoryAttribute_Locked = ams::svc::MemoryAttribute_Locked, + KMemoryAttribute_IpcLocked = ams::svc::MemoryAttribute_IpcLocked, + KMemoryAttribute_DeviceShared = ams::svc::MemoryAttribute_DeviceShared, + KMemoryAttribute_Uncached = ams::svc::MemoryAttribute_Uncached, + }; + + static_assert((KMemoryAttribute_Mask & KMemoryAttribute_DontCareMask) == 0); + static_assert(static_cast::type>(~(KMemoryAttribute_Mask | KMemoryAttribute_DontCareMask)) == 0); + + struct KMemoryInfo { + uintptr_t address; + size_t size; + KMemoryState state; + KMemoryPermission perm; + KMemoryAttribute attribute; + KMemoryPermission original_perm; + u16 ipc_lock_count; + u16 device_use_count; + + constexpr ams::svc::MemoryInfo GetSvcMemoryInfo() const { + return { + .addr = this->address, + .size = this->size, + .state = static_cast(this->state & KMemoryState_Mask), + .attr = static_cast(this->attribute & KMemoryAttribute_Mask), + .perm = static_cast(this->perm & KMemoryPermission_UserMask), + .ipc_refcount = this->ipc_lock_count, + .device_refcount = this->device_use_count, + }; + } + + constexpr uintptr_t GetAddress() const { + return this->address; + } + + constexpr size_t GetSize() const { + return this->size; + } + + constexpr size_t GetNumPages() const { + return this->GetSize() / PageSize; + } + + constexpr uintptr_t GetEndAddress() const { + return this->GetAddress() + this->GetSize(); + } + + constexpr uintptr_t GetLastAddress() const { + return this->GetEndAddress() - 1; + } + }; + + class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode { + private: + KProcessAddress address; + size_t num_pages; + KMemoryState memory_state; + u16 ipc_lock_count; + u16 device_use_count; + KMemoryPermission perm; + KMemoryPermission original_perm; + KMemoryAttribute attribute; + public: + static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) { + if (lhs.GetAddress() < rhs.GetAddress()) { + return -1; + } else if (lhs.GetAddress() <= rhs.GetLastAddress()) { + return 0; + } else { + return 1; + } + } + public: + constexpr KProcessAddress GetAddress() const { + return this->address; + } + + constexpr size_t GetNumPages() const { + return this->num_pages; + } + + constexpr size_t GetSize() const { + return this->GetNumPages() * PageSize; + } + + constexpr KProcessAddress GetEndAddress() const { + return this->GetAddress() + this->GetSize(); + } + + constexpr KProcessAddress GetLastAddress() const { + return this->GetEndAddress() - 1; + } + + constexpr KMemoryInfo GetMemoryInfo() const { + return { + .address = GetInteger(this->GetAddress()), + .size = this->GetSize(), + .state = this->memory_state, + .perm = this->perm, + .attribute = this->attribute, + .original_perm = this->original_perm, + .ipc_lock_count = this->ipc_lock_count, + .device_use_count = this->device_use_count, + }; + } + public: + constexpr KMemoryBlock() + : address(), num_pages(), memory_state(KMemoryState_None), ipc_lock_count(), device_use_count(), perm(), original_perm(), attribute() + { + /* ... */ + } + + constexpr KMemoryBlock(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) + : address(addr), num_pages(np), memory_state(ms), ipc_lock_count(0), device_use_count(0), perm(p), original_perm(KMemoryPermission_None), attribute(attr) + { + /* ... */ + } + + constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) { + MESOSPHERE_ASSERT_THIS(); + this->address = addr; + this->num_pages = np; + this->memory_state = ms; + this->ipc_lock_count = 0; + this->device_use_count = 0; + this->perm = p; + this->original_perm = KMemoryPermission_None; + this->attribute = attr; + } + + constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { + MESOSPHERE_ASSERT_THIS(); + constexpr auto AttributeIgnoreMask = KMemoryAttribute_DontCareMask | KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared; + return this->memory_state == s && this->perm == p && (this->attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); + } + + constexpr bool HasSameProperties(const KMemoryBlock &rhs) const { + MESOSPHERE_ASSERT_THIS(); + + return this->memory_state == rhs.memory_state && + this->perm == rhs.perm && + this->original_perm == rhs.original_perm && + this->attribute == rhs.attribute && + this->ipc_lock_count == rhs.ipc_lock_count && + this->device_use_count == rhs.device_use_count; + } + + constexpr bool Contains(KProcessAddress addr) const { + MESOSPHERE_ASSERT_THIS(); + + return this->GetAddress() <= addr && addr <= this->GetEndAddress(); + } + + constexpr void Add(size_t np) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(np > 0); + MESOSPHERE_ASSERT(this->GetAddress() + np * PageSize - 1 < this->GetEndAddress() + np * PageSize - 1); + + this->num_pages += np; + } + + constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this->original_perm == KMemoryPermission_None); + MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_IpcLocked) == 0); + + this->memory_state = s; + this->perm = p; + this->attribute = static_cast(a | (this->attribute & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared))); + } + + constexpr void Split(KMemoryBlock *block, KProcessAddress addr) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this->GetAddress() < addr); + MESOSPHERE_ASSERT(this->Contains(addr)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), PageSize)); + + block->address = this->address; + block->num_pages = (addr - this->GetAddress()) / PageSize; + block->memory_state = this->memory_state; + block->ipc_lock_count = this->ipc_lock_count; + block->device_use_count = this->device_use_count; + block->perm = this->perm; + block->original_perm = this->original_perm; + block->attribute = this->attribute; + + this->address = addr; + this->num_pages -= block->num_pages; + } + }; + static_assert(std::is_trivially_destructible::value); + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp new file mode 100644 index 000000000..ca24b2b99 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KMemoryBlockManagerUpdateAllocator { + public: + static constexpr size_t NumBlocks = 2; + private: + KMemoryBlock *blocks[NumBlocks]; + size_t index; + KMemoryBlockSlabManager *slab_manager; + Result result; + public: + explicit KMemoryBlockManagerUpdateAllocator(KMemoryBlockSlabManager *sm) : blocks(), index(), slab_manager(sm), result(svc::ResultOutOfResource()) { + for (size_t i = 0; i < NumBlocks; i++) { + this->blocks[i] = this->slab_manager->Allocate(); + if (this->blocks[i] == nullptr) { + this->result = svc::ResultOutOfResource(); + return; + } + } + + this->result = ResultSuccess(); + } + + ~KMemoryBlockManagerUpdateAllocator() { + for (size_t i = 0; i < NumBlocks; i++) { + if (this->blocks[i] != nullptr) { + this->slab_manager->Free(this->blocks[i]); + } + } + } + + Result GetResult() const { + return this->result; + } + + KMemoryBlock *Allocate() { + MESOSPHERE_ABORT_UNLESS(this->index < NumBlocks); + MESOSPHERE_ABORT_UNLESS(this->blocks[this->index] != nullptr); + KMemoryBlock *block = nullptr; + std::swap(block, this->blocks[this->index++]); + return block; + } + + void Free(KMemoryBlock *block) { + MESOSPHERE_ABORT_UNLESS(this->index <= NumBlocks); + MESOSPHERE_ABORT_UNLESS(block != nullptr); + if (this->index == 0) { + this->slab_manager->Free(block); + } else { + this->blocks[--this->index] = block; + } + } + }; + + class KMemoryBlockManager { + public: + using MemoryBlockTree = util::IntrusiveRedBlackTreeBaseTraits::TreeType; + using iterator = MemoryBlockTree::iterator; + using const_iterator = MemoryBlockTree::const_iterator; + private: + MemoryBlockTree memory_block_tree; + KProcessAddress start_address; + KProcessAddress end_address; + public: + constexpr KMemoryBlockManager() : memory_block_tree(), start_address(), end_address() { /* ... */ } + + iterator end() { return this->memory_block_tree.end(); } + const_iterator end() const { return this->memory_block_tree.end(); } + const_iterator cend() const { return this->memory_block_tree.cend(); } + + Result Initialize(KProcessAddress st, KProcessAddress nd, KMemoryBlockSlabManager *slab_manager); + void Finalize(KMemoryBlockSlabManager *slab_manager); + + KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const; + + void Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr); + + iterator FindIterator(KProcessAddress address) const { + return this->memory_block_tree.find(KMemoryBlock(address, 1, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None)); + } + + const KMemoryBlock *FindBlock(KProcessAddress address) const { + if (const_iterator it = this->FindIterator(address); it != this->memory_block_tree.end()) { + return std::addressof(*it); + } + + return nullptr; + } + + /* Debug. */ + bool CheckState() const; + void DumpBlocks() const; + }; + + class KScopedMemoryBlockManagerAuditor { + private: + KMemoryBlockManager *manager; + public: + explicit ALWAYS_INLINE KScopedMemoryBlockManagerAuditor(KMemoryBlockManager *m) : manager(m) { MESOSPHERE_AUDIT(this->manager->CheckState()); } + explicit ALWAYS_INLINE KScopedMemoryBlockManagerAuditor(KMemoryBlockManager &m) : KScopedMemoryBlockManagerAuditor(std::addressof(m)) { /* ... */ } + ALWAYS_INLINE ~KScopedMemoryBlockManagerAuditor() { MESOSPHERE_AUDIT(this->manager->CheckState()); } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp new file mode 100644 index 000000000..22b593a42 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -0,0 +1,700 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + constexpr size_t KernelAslrAlignment = 2_MB; + constexpr size_t KernelVirtualAddressSpaceWidth = size_t(1ul) << 39ul; + constexpr size_t KernelPhysicalAddressSpaceWidth = size_t(1ul) << 48ul; + + constexpr size_t KernelVirtualAddressSpaceBase = 0ul - KernelVirtualAddressSpaceWidth; + constexpr size_t KernelVirtualAddressSpaceEnd = KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment); + constexpr size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1ul; + constexpr size_t KernelVirtualAddressSpaceSize = KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase; + + constexpr size_t KernelPhysicalAddressSpaceBase = 0ul; + constexpr size_t KernelPhysicalAddressSpaceEnd = KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceWidth; + constexpr size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ul; + constexpr size_t KernelPhysicalAddressSpaceSize = KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase; + + enum KMemoryRegionType : u32 { + KMemoryRegionAttr_CarveoutProtected = 0x04000000, + KMemoryRegionAttr_DidKernelMap = 0x08000000, + KMemoryRegionAttr_ShouldKernelMap = 0x10000000, + KMemoryRegionAttr_UserReadOnly = 0x20000000, + KMemoryRegionAttr_NoUserMap = 0x40000000, + KMemoryRegionAttr_LinearMapped = 0x80000000, + + KMemoryRegionType_None = 0, + KMemoryRegionType_Kernel = 1, + KMemoryRegionType_Dram = 2, + KMemoryRegionType_CoreLocal = 4, + + KMemoryRegionType_VirtualKernelPtHeap = 0x2A, + KMemoryRegionType_VirtualKernelTraceBuffer = 0x4A, + KMemoryRegionType_VirtualKernelInitPt = 0x19A, + + KMemoryRegionType_VirtualDramMetadataPool = 0x29A, + KMemoryRegionType_VirtualDramManagedPool = 0x31A, + KMemoryRegionType_VirtualDramApplicationPool = 0x271A, + KMemoryRegionType_VirtualDramAppletPool = 0x1B1A, + KMemoryRegionType_VirtualDramSystemPool = 0x2B1A, + KMemoryRegionType_VirtualDramSystemNonSecurePool = 0x331A, + + KMemoryRegionType_Uart = 0x1D, + KMemoryRegionType_InterruptDistributor = 0x4D | KMemoryRegionAttr_NoUserMap, + KMemoryRegionType_InterruptCpuInterface = 0x2D | KMemoryRegionAttr_NoUserMap, + + KMemoryRegionType_MemoryController = 0x55, + KMemoryRegionType_MemoryController0 = 0x95, + KMemoryRegionType_MemoryController1 = 0x65, + KMemoryRegionType_PowerManagementController = 0x1A5, + + KMemoryRegionType_KernelAutoMap = KMemoryRegionType_Kernel | KMemoryRegionAttr_ShouldKernelMap, + + KMemoryRegionType_KernelTemp = 0x31, + + KMemoryRegionType_KernelCode = 0x19, + KMemoryRegionType_KernelStack = 0x29, + KMemoryRegionType_KernelMisc = 0x49, + KMemoryRegionType_KernelSlab = 0x89, + + KMemoryRegionType_KernelMiscMainStack = 0xB49, + KMemoryRegionType_KernelMiscMappedDevice = 0xD49, + KMemoryRegionType_KernelMiscIdleStack = 0x1349, + KMemoryRegionType_KernelMiscUnknownDebug = 0x1549, + KMemoryRegionType_KernelMiscExceptionStack = 0x2349, + + KMemoryRegionType_DramLinearMapped = KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped, + + KMemoryRegionType_DramReservedEarly = 0x16 | KMemoryRegionAttr_NoUserMap, + KMemoryRegionType_DramPoolPartition = 0x26 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_DramMetadataPool = 0x166 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_CarveoutProtected, + + KMemoryRegionType_DramNonKernel = 0x1A6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped, + + KMemoryRegionType_DramApplicationPool = 0x7A6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_DramAppletPool = 0xBA6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_DramSystemNonSecurePool = 0xDA6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_DramSystemPool = 0x13A6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_CarveoutProtected, + + + + KMemoryRegionType_DramKernel = 0xE | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected, + KMemoryRegionType_DramKernelCode = 0xCE | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected, + KMemoryRegionType_DramKernelSlab = 0x14E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected, + KMemoryRegionType_DramKernelPtHeap = 0x24E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_DramKernelInitPt = 0x44E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_LinearMapped, + + /* These regions aren't normally mapped in retail kernel. */ + KMemoryRegionType_KernelTraceBuffer = 0xA6 | KMemoryRegionAttr_UserReadOnly | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_OnMemoryBootImage = 0x156, + KMemoryRegionType_DTB = 0x256, + }; + + constexpr ALWAYS_INLINE KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { + if (type_id == (type_id | KMemoryRegionType_KernelTraceBuffer)) { + return KMemoryRegionType_VirtualKernelTraceBuffer; + } else if (type_id == (type_id | KMemoryRegionType_DramKernelPtHeap)) { + return KMemoryRegionType_VirtualKernelPtHeap; + } else { + return KMemoryRegionType_Dram; + } + } + + class KMemoryRegion : public util::IntrusiveRedBlackTreeBaseNode { + NON_COPYABLE(KMemoryRegion); + NON_MOVEABLE(KMemoryRegion); + private: + uintptr_t address; + uintptr_t pair_address; + size_t region_size; + u32 attributes; + u32 type_id; + public: + static constexpr ALWAYS_INLINE int Compare(const KMemoryRegion &lhs, const KMemoryRegion &rhs) { + if (lhs.GetAddress() < rhs.GetAddress()) { + return -1; + } else if (lhs.GetAddress() <= rhs.GetLastAddress()) { + return 0; + } else { + return 1; + } + } + public: + constexpr ALWAYS_INLINE KMemoryRegion() : address(0), pair_address(0), region_size(0), attributes(0), type_id(0) { /* ... */ } + constexpr ALWAYS_INLINE KMemoryRegion(uintptr_t a, size_t rs, uintptr_t p, u32 r, u32 t) : + address(a), pair_address(p), region_size(rs), attributes(r), type_id(t) + { + /* ... */ + } + constexpr ALWAYS_INLINE KMemoryRegion(uintptr_t a, size_t rs, u32 r, u32 t) : KMemoryRegion(a, rs, std::numeric_limits::max(), r, t) { /* ... */ } + + constexpr ALWAYS_INLINE uintptr_t GetAddress() const { + return this->address; + } + + constexpr ALWAYS_INLINE uintptr_t GetPairAddress() const { + return this->pair_address; + } + + constexpr ALWAYS_INLINE size_t GetSize() const { + return this->region_size; + } + + constexpr ALWAYS_INLINE uintptr_t GetEndAddress() const { + return this->GetAddress() + this->GetSize(); + } + + constexpr ALWAYS_INLINE uintptr_t GetLastAddress() const { + return this->GetEndAddress() - 1; + } + + constexpr ALWAYS_INLINE u32 GetAttributes() const { + return this->attributes; + } + + constexpr ALWAYS_INLINE u32 GetType() const { + return this->type_id; + } + + constexpr ALWAYS_INLINE void SetType(u32 type) { + MESOSPHERE_INIT_ABORT_UNLESS(this->CanDerive(type)); + this->type_id = type; + } + + constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const { + return this->GetAddress() <= address && address <= this->GetLastAddress(); + } + + constexpr ALWAYS_INLINE bool IsDerivedFrom(u32 type) const { + return (this->GetType() | type) == this->GetType(); + } + + constexpr ALWAYS_INLINE bool HasTypeAttribute(KMemoryRegionType attr) const { + return (this->GetType() | attr) == this->GetType(); + } + + constexpr ALWAYS_INLINE bool CanDerive(u32 type) const { + return (this->GetType() | type) == type; + } + + constexpr ALWAYS_INLINE void SetPairAddress(uintptr_t a) { + this->pair_address = a; + } + + constexpr ALWAYS_INLINE void SetTypeAttribute(KMemoryRegionType attr) { + this->type_id |= attr; + } + }; + static_assert(std::is_trivially_destructible::value); + + class KMemoryRegionTree { + public: + struct DerivedRegionExtents { + const KMemoryRegion *first_region; + const KMemoryRegion *last_region; + + constexpr DerivedRegionExtents() : first_region(nullptr), last_region(nullptr) { /* ... */ } + + constexpr ALWAYS_INLINE uintptr_t GetAddress() const { + return this->first_region->GetAddress(); + } + + constexpr ALWAYS_INLINE uintptr_t GetEndAddress() const { + return this->last_region->GetEndAddress(); + } + + constexpr ALWAYS_INLINE size_t GetSize() const { + return this->GetEndAddress() - this->GetAddress(); + } + + constexpr ALWAYS_INLINE uintptr_t GetLastAddress() const { + return this->GetEndAddress() - 1; + } + }; + private: + using TreeType = util::IntrusiveRedBlackTreeBaseTraits::TreeType; + public: + using value_type = TreeType::value_type; + using size_type = TreeType::size_type; + using difference_type = TreeType::difference_type; + using pointer = TreeType::pointer; + using const_pointer = TreeType::const_pointer; + using reference = TreeType::reference; + using const_reference = TreeType::const_reference; + using iterator = TreeType::iterator; + using const_iterator = TreeType::const_iterator; + private: + TreeType tree; + public: + constexpr ALWAYS_INLINE KMemoryRegionTree() : tree() { /* ... */ } + public: + iterator FindContainingRegion(uintptr_t address) { + return this->find(KMemoryRegion(address, 1, 0, 0)); + } + + iterator FindFirstRegionByTypeAttr(u32 type_id, u32 attr = 0) { + for (auto it = this->begin(); it != this->end(); it++) { + if (it->GetType() == type_id && it->GetAttributes() == attr) { + return it; + } + } + MESOSPHERE_INIT_ABORT(); + } + + iterator FindFirstRegionByType(u32 type_id) { + for (auto it = this->begin(); it != this->end(); it++) { + if (it->GetType() == type_id) { + return it; + } + } + MESOSPHERE_INIT_ABORT(); + } + + iterator FindFirstDerivedRegion(u32 type_id) { + for (auto it = this->begin(); it != this->end(); it++) { + if (it->IsDerivedFrom(type_id)) { + return it; + } + } + MESOSPHERE_INIT_ABORT(); + } + + DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) const { + DerivedRegionExtents extents; + + MESOSPHERE_INIT_ABORT_UNLESS(extents.first_region == nullptr); + MESOSPHERE_INIT_ABORT_UNLESS(extents.last_region == nullptr); + + for (auto it = this->cbegin(); it != this->cend(); it++) { + if (it->IsDerivedFrom(type_id)) { + if (extents.first_region == nullptr) { + extents.first_region = std::addressof(*it); + } + extents.last_region = std::addressof(*it); + } + } + + MESOSPHERE_INIT_ABORT_UNLESS(extents.first_region != nullptr); + MESOSPHERE_INIT_ABORT_UNLESS(extents.last_region != nullptr); + + return extents; + } + public: + NOINLINE bool Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0); + NOINLINE KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id); + + ALWAYS_INLINE KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id, size_t guard_size) { + return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size; + } + public: + /* Iterator accessors. */ + iterator begin() { + return this->tree.begin(); + } + + const_iterator begin() const { + return this->tree.begin(); + } + + iterator end() { + return this->tree.end(); + } + + const_iterator end() const { + return this->tree.end(); + } + + const_iterator cbegin() const { + return this->begin(); + } + + const_iterator cend() const { + return this->end(); + } + + iterator iterator_to(reference ref) { + return this->tree.iterator_to(ref); + } + + const_iterator iterator_to(const_reference ref) const { + return this->tree.iterator_to(ref); + } + + /* Content management. */ + bool empty() const { + return this->tree.empty(); + } + + reference back() { + return this->tree.back(); + } + + const_reference back() const { + return this->tree.back(); + } + + reference front() { + return this->tree.front(); + } + + const_reference front() const { + return this->tree.front(); + } + + /* GCC over-eagerly inlines this operation. */ + NOINLINE iterator insert(reference ref) { + return this->tree.insert(ref); + } + + NOINLINE iterator erase(iterator it) { + return this->tree.erase(it); + } + + iterator find(const_reference ref) const { + return this->tree.find(ref); + } + + iterator nfind(const_reference ref) const { + return this->tree.nfind(ref); + } + }; + + class KMemoryRegionAllocator { + NON_COPYABLE(KMemoryRegionAllocator); + NON_MOVEABLE(KMemoryRegionAllocator); + public: + static constexpr size_t MaxMemoryRegions = 1000; + friend class KMemoryLayout; + private: + KMemoryRegion region_heap[MaxMemoryRegions]; + size_t num_regions; + private: + constexpr ALWAYS_INLINE KMemoryRegionAllocator() : region_heap(), num_regions() { /* ... */ } + public: + ALWAYS_INLINE KMemoryRegion *Allocate() { + /* Ensure we stay within the bounds of our heap. */ + MESOSPHERE_INIT_ABORT_UNLESS(this->num_regions < MaxMemoryRegions); + + return &this->region_heap[this->num_regions++]; + } + + template + ALWAYS_INLINE KMemoryRegion *Create(Args&&... args) { + KMemoryRegion *region = this->Allocate(); + new (region) KMemoryRegion(std::forward(args)...); + return region; + } + }; + + class KMemoryLayout { + private: + static /* constinit */ inline uintptr_t s_linear_phys_to_virt_diff; + static /* constinit */ inline uintptr_t s_linear_virt_to_phys_diff; + static /* constinit */ inline KMemoryRegionAllocator s_region_allocator; + static /* constinit */ inline KMemoryRegionTree s_virtual_tree; + static /* constinit */ inline KMemoryRegionTree s_physical_tree; + static /* constinit */ inline KMemoryRegionTree s_virtual_linear_tree; + static /* constinit */ inline KMemoryRegionTree s_physical_linear_tree; + private: + static ALWAYS_INLINE auto GetVirtualLinearExtents(const KMemoryRegionTree::DerivedRegionExtents physical) { + return KMemoryRegion(GetInteger(GetLinearVirtualAddress(physical.GetAddress())), physical.GetSize(), 0, KMemoryRegionType_None); + } + public: + static ALWAYS_INLINE KMemoryRegionAllocator &GetMemoryRegionAllocator() { return s_region_allocator; } + static ALWAYS_INLINE KMemoryRegionTree &GetVirtualMemoryRegionTree() { return s_virtual_tree; } + static ALWAYS_INLINE KMemoryRegionTree &GetPhysicalMemoryRegionTree() { return s_physical_tree; } + static ALWAYS_INLINE KMemoryRegionTree &GetVirtualLinearMemoryRegionTree() { return s_virtual_linear_tree; } + static ALWAYS_INLINE KMemoryRegionTree &GetPhysicalLinearMemoryRegionTree() { return s_physical_linear_tree; } + + static ALWAYS_INLINE KMemoryRegionTree::iterator GetEnd(KVirtualAddress) { + return GetVirtualLinearMemoryRegionTree().end(); + } + + static ALWAYS_INLINE KMemoryRegionTree::iterator GetEnd(KPhysicalAddress) { + return GetPhysicalMemoryRegionTree().end(); + } + + static NOINLINE KMemoryRegionTree::iterator FindContainingRegion(KVirtualAddress address) { + return GetVirtualMemoryRegionTree().FindContainingRegion(GetInteger(address)); + } + + static NOINLINE KMemoryRegionTree::iterator FindContainingRegion(KPhysicalAddress address) { + return GetPhysicalMemoryRegionTree().FindContainingRegion(GetInteger(address)); + } + + static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress address) { + return GetInteger(address) + s_linear_phys_to_virt_diff; + } + + static ALWAYS_INLINE KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress address) { + return GetInteger(address) + s_linear_virt_to_phys_diff; + } + + static NOINLINE KVirtualAddress GetMainStackTopAddress(s32 core_id) { + return GetVirtualMemoryRegionTree().FindFirstRegionByTypeAttr(KMemoryRegionType_KernelMiscMainStack, static_cast(core_id))->GetEndAddress(); + } + + static NOINLINE KVirtualAddress GetIdleStackTopAddress(s32 core_id) { + return GetVirtualMemoryRegionTree().FindFirstRegionByTypeAttr(KMemoryRegionType_KernelMiscIdleStack, static_cast(core_id))->GetEndAddress(); + } + + static NOINLINE KVirtualAddress GetExceptionStackTopAddress(s32 core_id) { + return GetVirtualMemoryRegionTree().FindFirstRegionByTypeAttr(KMemoryRegionType_KernelMiscExceptionStack, static_cast(core_id))->GetEndAddress(); + } + + static NOINLINE KVirtualAddress GetSlabRegionAddress() { + return GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelSlab)->GetAddress(); + } + + static NOINLINE KVirtualAddress GetCoreLocalRegionAddress() { + return GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_CoreLocal)->GetAddress(); + } + + static NOINLINE KVirtualAddress GetInterruptDistributorAddress() { + return GetPhysicalMemoryRegionTree().FindFirstDerivedRegion(KMemoryRegionType_InterruptDistributor)->GetPairAddress(); + } + + static NOINLINE KVirtualAddress GetInterruptCpuInterfaceAddress() { + return GetPhysicalMemoryRegionTree().FindFirstDerivedRegion(KMemoryRegionType_InterruptCpuInterface)->GetPairAddress(); + } + + static NOINLINE KVirtualAddress GetUartAddress() { + return GetPhysicalMemoryRegionTree().FindFirstDerivedRegion(KMemoryRegionType_Uart)->GetPairAddress(); + } + + static NOINLINE KMemoryRegion &GetMemoryControllerRegion() { + return *GetPhysicalMemoryRegionTree().FindFirstDerivedRegion(KMemoryRegionType_MemoryController); + } + + static NOINLINE KMemoryRegion &GetMetadataPoolRegion() { + return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_VirtualDramMetadataPool); + } + + static NOINLINE KMemoryRegion &GetPageTableHeapRegion() { + return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_VirtualKernelPtHeap); + } + + static NOINLINE KMemoryRegion &GetKernelStackRegion() { + return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelStack); + } + + static NOINLINE KMemoryRegion &GetTempRegion() { + return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelTemp); + } + + static NOINLINE KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) { + return *GetVirtualLinearMemoryRegionTree().FindContainingRegion(GetInteger(address)); + } + + static NOINLINE bool IsHeapPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, const KMemoryRegion *hint = nullptr) { + auto &tree = GetPhysicalLinearMemoryRegionTree(); + KMemoryRegionTree::const_iterator it = tree.end(); + if (hint != nullptr) { + it = tree.iterator_to(*hint); + } + if (it == tree.end() || !it->Contains(GetInteger(address))) { + it = tree.FindContainingRegion(GetInteger(address)); + } + if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) { + if (out) { + *out = std::addressof(*it); + } + return true; + } + return false; + } + + static NOINLINE bool IsHeapPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, size_t size, const KMemoryRegion *hint = nullptr) { + auto &tree = GetPhysicalLinearMemoryRegionTree(); + KMemoryRegionTree::const_iterator it = tree.end(); + if (hint != nullptr) { + it = tree.iterator_to(*hint); + } + if (it == tree.end() || !it->Contains(GetInteger(address))) { + it = tree.FindContainingRegion(GetInteger(address)); + } + if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) { + const uintptr_t last_address = GetInteger(address) + size - 1; + do { + if (last_address <= it->GetLastAddress()) { + if (out) { + *out = std::addressof(*it); + } + return true; + } + it++; + } while (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)); + } + return false; + } + + static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, const KMemoryRegion *hint = nullptr) { + auto &tree = GetVirtualLinearMemoryRegionTree(); + KMemoryRegionTree::const_iterator it = tree.end(); + if (hint != nullptr) { + it = tree.iterator_to(*hint); + } + if (it == tree.end() || !it->Contains(GetInteger(address))) { + it = tree.FindContainingRegion(GetInteger(address)); + } + if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) { + if (out) { + *out = std::addressof(*it); + } + return true; + } + return false; + } + + static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, size_t size, const KMemoryRegion *hint = nullptr) { + auto &tree = GetVirtualLinearMemoryRegionTree(); + KMemoryRegionTree::const_iterator it = tree.end(); + if (hint != nullptr) { + it = tree.iterator_to(*hint); + } + if (it == tree.end() || !it->Contains(GetInteger(address))) { + it = tree.FindContainingRegion(GetInteger(address)); + } + if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) { + const uintptr_t last_address = GetInteger(address) + size - 1; + do { + if (last_address <= it->GetLastAddress()) { + if (out) { + *out = std::addressof(*it); + } + return true; + } + it++; + } while (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)); + } + return false; + } + + static NOINLINE std::tuple GetTotalAndKernelMemorySizes() { + size_t total_size = 0, kernel_size = 0; + for (auto it = GetPhysicalMemoryRegionTree().cbegin(); it != GetPhysicalMemoryRegionTree().cend(); it++) { + if (it->IsDerivedFrom(KMemoryRegionType_Dram)) { + total_size += it->GetSize(); + if (!it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) { + kernel_size += it->GetSize(); + } + } + } + return std::make_tuple(total_size, kernel_size); + } + + static void InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start); + + static NOINLINE auto GetKernelRegionExtents() { + return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); + } + + static NOINLINE auto GetKernelCodeRegionExtents() { + return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelCode); + } + + static NOINLINE auto GetKernelStackRegionExtents() { + return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelStack); + } + + static NOINLINE auto GetKernelMiscRegionExtents() { + return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelMisc); + } + + static NOINLINE auto GetKernelSlabRegionExtents() { + return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelSlab); + } + + static NOINLINE const KMemoryRegion &GetCoreLocalRegion() { + return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_CoreLocal); + } + + static NOINLINE auto GetLinearRegionPhysicalExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionAttr_LinearMapped); + } + + static NOINLINE auto GetLinearRegionExtents() { + return GetVirtualLinearExtents(GetLinearRegionPhysicalExtents()); + } + + static NOINLINE auto GetCarveoutRegionExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionAttr_CarveoutProtected); + } + + static NOINLINE auto GetKernelRegionPhysicalExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernel); + } + + static NOINLINE auto GetKernelCodeRegionPhysicalExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelCode); + } + + static NOINLINE auto GetKernelSlabRegionPhysicalExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelSlab); + } + + static NOINLINE auto GetKernelPageTableHeapRegionPhysicalExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelPtHeap); + } + + static NOINLINE auto GetKernelInitPageTableRegionPhysicalExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelInitPt); + } + + static NOINLINE auto GetKernelPoolPartitionRegionPhysicalExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolPartition); + } + + static NOINLINE auto GetKernelMetadataPoolRegionPhysicalExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramMetadataPool); + } + + static NOINLINE auto GetKernelSystemPoolRegionPhysicalExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemPool); + } + + static NOINLINE auto GetKernelSystemNonSecurePoolRegionPhysicalExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemNonSecurePool); + } + + static NOINLINE auto GetKernelAppletPoolRegionPhysicalExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramAppletPool); + } + + static NOINLINE auto GetKernelApplicationPoolRegionPhysicalExtents() { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramApplicationPool); + } + }; + + + namespace init { + + /* These should be generic, regardless of board. */ + void SetupCoreLocalRegionMemoryRegions(KInitialPageTable &page_table, KInitialPageAllocator &page_allocator); + void SetupPoolPartitionMemoryRegions(); + + /* These may be implemented in a board-specific manner. */ + void SetupDevicePhysicalMemoryRegions(); + void SetupDramPhysicalMemoryRegions(); + + } + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp new file mode 100644 index 000000000..7213dc7e7 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class KPageGroup; + + class KMemoryManager { + public: + enum Pool { + Pool_Application = 0, + Pool_Applet = 1, + Pool_System = 2, + Pool_SystemNonSecure = 3, + + Pool_Count, + + Pool_Shift = 4, + Pool_Mask = (0xF << Pool_Shift), + }; + + enum Direction { + Direction_FromFront = 0, + Direction_FromBack = 1, + + Direction_Shift = 0, + Direction_Mask = (0xF << Direction_Shift), + }; + + static constexpr size_t MaxManagerCount = 10; + private: + class Impl { + private: + using RefCount = u16; + private: + KPageHeap heap; + RefCount *page_reference_counts; + KVirtualAddress metadata_region; + Pool pool; + Impl *next; + Impl *prev; + public: + constexpr Impl() : heap(), page_reference_counts(), metadata_region(), pool(), next(), prev() { /* ... */ } + + size_t Initialize(const KMemoryRegion *region, Pool pool, KVirtualAddress metadata_region, KVirtualAddress metadata_region_end); + + KVirtualAddress AllocateBlock(s32 index) { return this->heap.AllocateBlock(index); } + void Free(KVirtualAddress addr, size_t num_pages) { this->heap.Free(addr, num_pages); } + + void TrackAllocationForOptimizedProcess(KVirtualAddress block, size_t num_pages); + + constexpr size_t GetSize() const { return this->heap.GetSize(); } + constexpr KVirtualAddress GetEndAddress() const { return this->heap.GetEndAddress(); } + + constexpr void SetNext(Impl *n) { this->next = n; } + constexpr void SetPrev(Impl *n) { this->prev = n; } + constexpr Impl *GetNext() const { return this->next; } + constexpr Impl *GetPrev() const { return this->prev; } + + void Open(KLightLock *pool_locks, KVirtualAddress address, size_t num_pages) { + KScopedLightLock lk(pool_locks[this->pool]); + + size_t index = this->heap.GetPageOffset(address); + const size_t end = index + num_pages; + while (index < end) { + const RefCount ref_count = (++this->page_reference_counts[index]); + MESOSPHERE_ABORT_UNLESS(ref_count > 0); + + index++; + } + } + + void Close(KLightLock *pool_locks, KVirtualAddress address, size_t num_pages) { + KScopedLightLock lk(pool_locks[this->pool]); + + size_t index = this->heap.GetPageOffset(address); + const size_t end = index + num_pages; + + size_t free_start = 0; + size_t free_count = 0; + while (index < end) { + MESOSPHERE_ABORT_UNLESS(this->page_reference_counts[index] > 0); + const RefCount ref_count = (--this->page_reference_counts[index]); + + /* Keep track of how many zero refcounts we see in a row, to minimize calls to free. */ + if (ref_count == 0) { + if (free_count > 0) { + free_count++; + } else { + free_start = index; + free_count = 1; + } + } else { + if (free_count > 0) { + this->Free(this->heap.GetAddress() + free_start * PageSize, free_count); + free_count = 0; + } + } + + index++; + } + + if (free_count > 0) { + this->Free(this->heap.GetAddress() + free_start * PageSize, free_count); + } + } + public: + static size_t CalculateMetadataOverheadSize(size_t region_size); + }; + private: + KLightLock pool_locks[Pool_Count]; + Impl *pool_managers_head[Pool_Count]; + Impl *pool_managers_tail[Pool_Count]; + Impl managers[MaxManagerCount]; + size_t num_managers; + u64 optimized_process_ids[Pool_Count]; + bool has_optimized_process[Pool_Count]; + private: + Impl &GetManager(KVirtualAddress address) { + return this->managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()]; + } + + constexpr Impl *GetFirstManager(Pool pool, Direction dir) { + return dir == Direction_FromBack ? this->pool_managers_tail[pool] : this->pool_managers_head[pool]; + } + + constexpr Impl *GetNextManager(Impl *cur, Direction dir) { + if (dir == Direction_FromBack) { + return cur->GetPrev(); + } else { + return cur->GetNext(); + } + } + public: + constexpr KMemoryManager() + : pool_locks(), pool_managers_head(), pool_managers_tail(), managers(), num_managers(), optimized_process_ids(), has_optimized_process() + { + /* ... */ + } + + NOINLINE void Initialize(KVirtualAddress metadata_region, size_t metadata_region_size); + + NOINLINE KVirtualAddress AllocateContinuous(size_t num_pages, size_t align_pages, u32 option); + NOINLINE Result Allocate(KPageGroup *out, size_t num_pages, u32 option); + + void Open(KVirtualAddress address, size_t num_pages) { + /* Repeatedly open references until we've done so for all pages. */ + while (num_pages) { + auto &manager = this->GetManager(address); + const size_t cur_pages = std::min(num_pages, (manager.GetEndAddress() - address) / PageSize); + manager.Open(this->pool_locks, address, cur_pages); + num_pages -= cur_pages; + address += cur_pages * PageSize; + } + } + + void Close(KVirtualAddress address, size_t num_pages) { + /* Repeatedly close references until we've done so for all pages. */ + while (num_pages) { + auto &manager = this->GetManager(address); + const size_t cur_pages = std::min(num_pages, (manager.GetEndAddress() - address) / PageSize); + manager.Close(this->pool_locks, address, cur_pages); + num_pages -= cur_pages; + address += cur_pages * PageSize; + } + } + public: + static size_t CalculateMetadataOverheadSize(size_t region_size) { + return Impl::CalculateMetadataOverheadSize(region_size); + } + + static constexpr ALWAYS_INLINE u32 EncodeOption(Pool pool, Direction dir) { + return (pool << Pool_Shift) | (dir << Direction_Shift); + } + + static constexpr ALWAYS_INLINE Pool GetPool(u32 option) { + return static_cast((option & Pool_Mask) >> Pool_Shift); + } + + static constexpr ALWAYS_INLINE Direction GetDirection(u32 option) { + return static_cast((option & Direction_Mask) >> Direction_Shift); + } + + static constexpr ALWAYS_INLINE std::tuple DecodeOption(u32 option) { + return std::make_tuple(GetPool(option), GetDirection(option)); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_object_name.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_object_name.hpp new file mode 100644 index 000000000..87e76c8c1 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_object_name.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class KObjectName : public KSlabAllocated, public util::IntrusiveListBaseNode { + public: + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_buffer.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_buffer.hpp new file mode 100644 index 000000000..980fc6c36 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_buffer.hpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KPageBuffer : public KSlabAllocated { + private: + alignas(PageSize) u8 buffer[PageSize]; + public: + KPageBuffer() { + std::memset(buffer, 0, sizeof(buffer)); + } + + ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const { + return KMemoryLayout::GetLinearPhysicalAddress(KVirtualAddress(this)); + } + + static ALWAYS_INLINE KPageBuffer *FromPhysicalAddress(KPhysicalAddress phys_addr) { + const KVirtualAddress virt_addr = KMemoryLayout::GetLinearVirtualAddress(phys_addr); + + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); + + return GetPointer(virt_addr); + } + }; + static_assert(sizeof(KPageBuffer) == PageSize); + static_assert(alignof(KPageBuffer) == PageSize); + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp new file mode 100644 index 000000000..15040b5d8 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KBlockInfoManager; + + class KBlockInfo : public util::IntrusiveListBaseNode { + private: + KVirtualAddress address; + size_t num_pages; + public: + constexpr KBlockInfo() : util::IntrusiveListBaseNode(), address(), num_pages() { /* ... */ } + + constexpr void Initialize(KVirtualAddress addr, size_t np) { + this->address = addr; + this->num_pages = np; + } + + constexpr KVirtualAddress GetAddress() const { return this->address; } + constexpr size_t GetNumPages() const { return this->num_pages; } + constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; } + constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); } + constexpr KVirtualAddress GetLastAddress() const { return this->GetEndAddress() - 1; } + + constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const { + return this->address == rhs.address && this->num_pages == rhs.num_pages; + } + + constexpr bool operator==(const KBlockInfo &rhs) const { + return this->IsEquivalentTo(rhs); + } + + constexpr bool operator!=(const KBlockInfo &rhs) const { + return !(*this == rhs); + } + + constexpr bool IsStrictlyBefore(KVirtualAddress addr) const { + const KVirtualAddress end = this->GetEndAddress(); + + if (this->address != Null && end == Null) { + return false; + } + + return end < addr; + } + + constexpr bool operator<(KVirtualAddress addr) const { + return this->IsStrictlyBefore(addr); + } + + constexpr bool TryConcatenate(KVirtualAddress addr, size_t np) { + if (addr != Null && addr == this->GetEndAddress()) { + this->num_pages += np; + return true; + } + return false; + } + }; + + class KPageGroup { + public: + using BlockInfoList = util::IntrusiveListBaseTraits::ListType; + using iterator = BlockInfoList::const_iterator; + private: + BlockInfoList block_list; + KBlockInfoManager *manager; + public: + explicit KPageGroup(KBlockInfoManager *m) : block_list(), manager(m) { /* ... */ } + ~KPageGroup() { this->Finalize(); } + + void Finalize(); + + iterator begin() const { return this->block_list.begin(); } + iterator end() const { return this->block_list.end(); } + bool empty() const { return this->block_list.empty(); } + + Result AddBlock(KVirtualAddress addr, size_t num_pages); + void Open() const; + void Close() const; + + size_t GetNumPages() const; + + bool IsEquivalentTo(const KPageGroup &rhs) const; + + bool operator==(const KPageGroup &rhs) const { + return this->IsEquivalentTo(rhs); + } + + bool operator!=(const KPageGroup &rhs) const { + return !(*this == rhs); + } + }; + + class KScopedPageGroup { + private: + const KPageGroup *group; + public: + explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp) : group(gp) { if (this->group) { this->group->Open(); } } + explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp) : KScopedPageGroup(std::addressof(gp)) { /* ... */ } + ALWAYS_INLINE ~KScopedPageGroup() { if (this->group) { this->group->Close(); } } + + ALWAYS_INLINE void CancelClose() { + this->group = nullptr; + } + }; + +} \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp new file mode 100644 index 000000000..54feb2231 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KPageHeap { + private: + static constexpr inline size_t MemoryBlockPageShifts[] = { 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E }; + static constexpr size_t NumMemoryBlockPageShifts = util::size(MemoryBlockPageShifts); + public: + static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { + const size_t target_pages = std::max(num_pages, align_pages); + for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { + if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { + return static_cast(i); + } + } + return -1; + } + + static constexpr s32 GetBlockIndex(size_t num_pages) { + for (s32 i = static_cast(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { + if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { + return i; + } + } + return -1; + } + + static constexpr size_t GetBlockSize(size_t index) { + return size_t(1) << MemoryBlockPageShifts[index]; + } + + static constexpr size_t GetBlockNumPages(size_t index) { + return GetBlockSize(index) / PageSize; + } + private: + class Block { + private: + class Bitmap { + public: + static constexpr size_t MaxDepth = 4; + private: + u64 *bit_storages[MaxDepth]; + size_t num_bits; + size_t used_depths; + public: + constexpr Bitmap() : bit_storages(), num_bits(), used_depths() { /* ... */ } + + constexpr size_t GetNumBits() const { return this->num_bits; } + constexpr s32 GetHighestDepthIndex() const { return static_cast(this->used_depths) - 1; } + + u64 *Initialize(u64 *storage, size_t size) { + /* Initially, everything is un-set. */ + this->num_bits = 0; + + /* Calculate the needed bitmap depth. */ + this->used_depths = static_cast(GetRequiredDepth(size)); + MESOSPHERE_ASSERT(this->used_depths <= MaxDepth); + + /* Set the bitmap pointers. */ + for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) { + this->bit_storages[depth] = storage; + size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64); + storage += size; + } + + return storage; + } + + ssize_t FindFreeBlock() const { + uintptr_t offset = 0; + s32 depth = 0; + + do { + const u64 v = this->bit_storages[depth][offset]; + if (v == 0) { + /* If depth is bigger than zero, then a previous level indicated a block was free. */ + MESOSPHERE_ASSERT(depth == 0); + return -1; + } + offset = offset * BITSIZEOF(u64) + __builtin_ctzll(v); + ++depth; + } while (depth < static_cast(this->used_depths)); + + return static_cast(offset); + } + + void SetBit(size_t offset) { + this->SetBit(this->GetHighestDepthIndex(), offset); + this->num_bits++; + } + + void ClearBit(size_t offset) { + this->ClearBit(this->GetHighestDepthIndex(), offset); + this->num_bits--; + } + + bool ClearRange(size_t offset, size_t count) { + s32 depth = this->GetHighestDepthIndex(); + u64 *bits = this->bit_storages[depth]; + size_t bit_ind = offset / BITSIZEOF(u64); + if (AMS_LIKELY(count < BITSIZEOF(u64))) { + const size_t shift = offset % BITSIZEOF(u64); + MESOSPHERE_ASSERT(shift + count <= BITSIZEOF(u64)); + /* Check that all the bits are set. */ + const u64 mask = ((u64(1) << count) - 1) << shift; + u64 v = bits[bit_ind]; + if ((v & mask) != mask) { + return false; + } + + /* Clear the bits. */ + v &= ~mask; + bits[bit_ind] = v; + if (v == 0) { + this->ClearBit(depth - 1, bit_ind); + } + } else { + MESOSPHERE_ASSERT(offset % BITSIZEOF(u64) == 0); + MESOSPHERE_ASSERT(count % BITSIZEOF(u64) == 0); + /* Check that all the bits are set. */ + size_t remaining = count; + size_t i = 0; + do { + if (bits[bit_ind + i++] != ~u64(0)) { + return false; + } + remaining -= BITSIZEOF(u64); + } while (remaining > 0); + + /* Clear the bits. */ + remaining = count; + i = 0; + do { + bits[bit_ind + i] = 0; + this->ClearBit(depth - 1, bit_ind + i); + i++; + remaining -= BITSIZEOF(u64); + } while (remaining > 0); + } + + this->num_bits -= count; + return true; + } + private: + void SetBit(s32 depth, size_t offset) { + while (depth >= 0) { + size_t ind = offset / BITSIZEOF(u64); + size_t which = offset % BITSIZEOF(u64); + const u64 mask = u64(1) << which; + + u64 *bit = std::addressof(this->bit_storages[depth][ind]); + u64 v = *bit; + MESOSPHERE_ASSERT((v & mask) == 0); + *bit = v | mask; + if (v) { + break; + } + offset = ind; + depth--; + } + } + + void ClearBit(s32 depth, size_t offset) { + while (depth >= 0) { + size_t ind = offset / BITSIZEOF(u64); + size_t which = offset % BITSIZEOF(u64); + const u64 mask = u64(1) << which; + + u64 *bit = std::addressof(this->bit_storages[depth][ind]); + u64 v = *bit; + MESOSPHERE_ASSERT((v & mask) != 0); + v &= ~mask; + *bit = v; + if (v) { + break; + } + offset = ind; + depth--; + } + } + private: + static constexpr s32 GetRequiredDepth(size_t region_size) { + s32 depth = 0; + while (true) { + region_size /= BITSIZEOF(u64); + depth++; + if (region_size == 0) { + return depth; + } + } + } + public: + static constexpr size_t CalculateMetadataOverheadSize(size_t region_size) { + size_t overhead_bits = 0; + for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) { + region_size = util::AlignUp(region_size, BITSIZEOF(u64)) / BITSIZEOF(u64); + overhead_bits += region_size; + } + return overhead_bits * sizeof(u64); + } + }; + private: + Bitmap bitmap; + KVirtualAddress heap_address; + uintptr_t end_offset; + size_t block_shift; + size_t next_block_shift; + public: + constexpr Block() : bitmap(), heap_address(), end_offset(), block_shift(), next_block_shift() { /* ... */ } + + constexpr size_t GetShift() const { return this->block_shift; } + constexpr size_t GetNextShift() const { return this->next_block_shift; } + constexpr size_t GetSize() const { return u64(1) << this->GetShift(); } + constexpr size_t GetNumPages() const { return this->GetSize() / PageSize; } + constexpr size_t GetNumFreeBlocks() const { return this->bitmap.GetNumBits(); } + constexpr size_t GetNumFreePages() const { return this->GetNumFreeBlocks() * this->GetNumPages(); } + + u64 *Initialize(KVirtualAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) { + /* Set shifts. */ + this->block_shift = bs; + this->next_block_shift = nbs; + + /* Align up the address. */ + KVirtualAddress end = addr + size; + const size_t align = (this->next_block_shift != 0) ? (u64(1) << this->next_block_shift) : (this->block_shift); + addr = util::AlignDown(GetInteger(addr), align); + end = util::AlignUp(GetInteger(end), align); + + this->heap_address = addr; + this->end_offset = (end - addr) / (u64(1) << this->block_shift); + return this->bitmap.Initialize(bit_storage, this->end_offset); + } + + KVirtualAddress PushBlock(KVirtualAddress address) { + /* Set the bit for the free block. */ + size_t offset = (address - this->heap_address) >> this->GetShift(); + this->bitmap.SetBit(offset); + + /* If we have a next shift, try to clear the blocks below this one and return the new address. */ + if (this->GetNextShift()) { + const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift()); + offset = util::AlignDown(offset, diff); + if (this->bitmap.ClearRange(offset, diff)) { + return this->heap_address + (offset << this->GetShift()); + } + } + + /* We couldn't coalesce, or we're already as big as possible. */ + return Null; + } + + KVirtualAddress PopBlock() { + /* Find a free block. */ + ssize_t soffset = this->bitmap.FindFreeBlock(); + if (soffset < 0) { + return Null; + } + const size_t offset = static_cast(soffset); + + /* Update our tracking and return it. */ + this->bitmap.ClearBit(offset); + return this->heap_address + (offset << this->GetShift()); + } + public: + static constexpr size_t CalculateMetadataOverheadSize(size_t region_size, size_t cur_block_shift, size_t next_block_shift) { + const size_t cur_block_size = (u64(1) << cur_block_shift); + const size_t next_block_size = (u64(1) << next_block_shift); + const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size; + return Bitmap::CalculateMetadataOverheadSize((align * 2 + util::AlignUp(region_size, align)) / cur_block_size); + } + }; + private: + KVirtualAddress heap_address; + size_t heap_size; + size_t used_size; + size_t num_blocks; + Block blocks[NumMemoryBlockPageShifts]; + private: + void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size, const size_t *block_shifts, size_t num_block_shifts); + size_t GetNumFreePages() const; + + void FreeBlock(KVirtualAddress block, s32 index); + public: + constexpr KPageHeap() : heap_address(), heap_size(), used_size(), num_blocks(), blocks() { /* ... */ } + + constexpr KVirtualAddress GetAddress() const { return this->heap_address; } + constexpr size_t GetSize() const { return this->heap_size; } + constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); } + constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->GetAddress()) / PageSize; } + + void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size) { + return Initialize(heap_address, heap_size, metadata_address, metadata_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts); + } + + void UpdateUsedSize() { + this->used_size = this->heap_size - (this->GetNumFreePages() * PageSize); + } + + KVirtualAddress AllocateBlock(s32 index); + void Free(KVirtualAddress addr, size_t num_pages); + private: + static size_t CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts); + public: + static size_t CalculateMetadataOverheadSize(size_t region_size) { + return CalculateMetadataOverheadSize(region_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp new file mode 100644 index 000000000..aa121f6a0 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp @@ -0,0 +1,311 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +namespace ams::kern { + + struct KPageProperties { + KMemoryPermission perm; + bool io; + bool uncached; + bool non_contiguous; + }; + static_assert(std::is_trivial::value); + + class KPageTableBase { + NON_COPYABLE(KPageTableBase); + NON_MOVEABLE(KPageTableBase); + public: + using TraversalEntry = KPageTableImpl::TraversalEntry; + using TraversalContext = KPageTableImpl::TraversalContext; + protected: + enum MemoryFillValue { + MemoryFillValue_Zero = 0, + MemoryFillValue_Stack = 'X', + MemoryFillValue_Ipc = 'Y', + MemoryFillValue_Heap = 'Z', + }; + + enum OperationType { + OperationType_Map = 0, + OperationType_MapGroup = 1, + OperationType_Unmap = 2, + OperationType_ChangePermissions = 3, + OperationType_ChangePermissionsAndRefresh = 4, + /* TODO: perm/attr operations */ + }; + + static constexpr size_t MaxPhysicalMapAlignment = 1_GB; + static constexpr size_t RegionAlignment = 2_MB; + static_assert(RegionAlignment == KernelAslrAlignment); + + struct PageLinkedList { + private: + struct Node { + Node *next; + u8 buffer[PageSize - sizeof(Node *)]; + }; + static_assert(std::is_pod::value); + private: + Node *root; + public: + constexpr PageLinkedList() : root(nullptr) { /* ... */ } + + void Push(Node *n) { + MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast(n), PageSize)); + n->next = this->root; + this->root = n; + } + + void Push(KVirtualAddress addr) { + this->Push(GetPointer(addr)); + } + + Node *Peek() const { return this->root; } + + Node *Pop() { + Node *r = this->root; + this->root = this->root->next; + return r; + } + }; + static_assert(std::is_trivially_destructible::value); + + static constexpr u32 DefaultMemoryIgnoreAttr = KMemoryAttribute_DontCareMask | KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared; + + static constexpr size_t GetAddressSpaceWidth(ams::svc::CreateProcessFlag as_type) { + switch (static_cast(as_type & ams::svc::CreateProcessFlag_AddressSpaceMask)) { + case ams::svc::CreateProcessFlag_AddressSpace64Bit: + return 39; + case ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated: + return 36; + case ams::svc::CreateProcessFlag_AddressSpace32Bit: + case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias: + return 32; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + private: + class KScopedPageTableUpdater { + private: + KPageTableBase *page_table; + PageLinkedList ll; + public: + ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase *pt) : page_table(pt), ll() { /* ... */ } + ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase &pt) : KScopedPageTableUpdater(std::addressof(pt)) { /* ... */ } + ALWAYS_INLINE ~KScopedPageTableUpdater() { this->page_table->FinalizeUpdate(this->GetPageList()); } + + PageLinkedList *GetPageList() { return std::addressof(this->ll); } + }; + private: + KProcessAddress address_space_start; + KProcessAddress address_space_end; + KProcessAddress heap_region_start; + KProcessAddress heap_region_end; + KProcessAddress current_heap_end; + KProcessAddress alias_region_start; + KProcessAddress alias_region_end; + KProcessAddress stack_region_start; + KProcessAddress stack_region_end; + KProcessAddress kernel_map_region_start; + KProcessAddress kernel_map_region_end; + KProcessAddress alias_code_region_start; + KProcessAddress alias_code_region_end; + KProcessAddress code_region_start; + KProcessAddress code_region_end; + size_t max_heap_size; + size_t max_physical_memory_size; + mutable KLightLock general_lock; + mutable KLightLock map_physical_memory_lock; + KPageTableImpl impl; + KMemoryBlockManager memory_block_manager; + u32 allocate_option; + u32 address_space_width; + bool is_kernel; + bool enable_aslr; + KMemoryBlockSlabManager *memory_block_slab_manager; + KBlockInfoManager *block_info_manager; + const KMemoryRegion *cached_physical_linear_region; + const KMemoryRegion *cached_physical_heap_region; + const KMemoryRegion *cached_virtual_heap_region; + MemoryFillValue heap_fill_value; + MemoryFillValue ipc_fill_value; + MemoryFillValue stack_fill_value; + public: + constexpr KPageTableBase() : + address_space_start(), address_space_end(), heap_region_start(), heap_region_end(), current_heap_end(), + alias_region_start(), alias_region_end(), stack_region_start(), stack_region_end(), kernel_map_region_start(), + kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(), + max_heap_size(), max_physical_memory_size(), general_lock(), map_physical_memory_lock(), impl(), memory_block_manager(), + allocate_option(), address_space_width(), is_kernel(), enable_aslr(), memory_block_slab_manager(), block_info_manager(), + cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(), + heap_fill_value(), ipc_fill_value(), stack_fill_value() + { + /* ... */ + } + + NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end); + NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager); + + void Finalize(); + + constexpr bool IsKernel() const { return this->is_kernel; } + constexpr bool IsAslrEnabled() const { return this->enable_aslr; } + + constexpr bool Contains(KProcessAddress addr) const { + return this->address_space_start <= addr && addr <= this->address_space_end - 1; + } + + constexpr bool Contains(KProcessAddress addr, size_t size) const { + return this->address_space_start <= addr && addr < addr + size && addr + size - 1 <= this->address_space_end - 1; + } + + KProcessAddress GetRegionAddress(KMemoryState state) const; + size_t GetRegionSize(KMemoryState state) const; + bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const; + protected: + virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0; + virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0; + virtual void FinalizeUpdate(PageLinkedList *page_list) = 0; + + KPageTableImpl &GetImpl() { return this->impl; } + const KPageTableImpl &GetImpl() const { return this->impl; } + + KBlockInfoManager *GetBlockInfoManager() const { return this->block_info_manager; } + + bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); } + + bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, this->cached_physical_heap_region); + } + + bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, size, this->cached_physical_heap_region); + } + + bool IsHeapVirtualAddress(KVirtualAddress virt_addr) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + return KMemoryLayout::IsHeapVirtualAddress(std::addressof(this->cached_virtual_heap_region), virt_addr, this->cached_virtual_heap_region); + } + + bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + return KMemoryLayout::IsHeapVirtualAddress(std::addressof(this->cached_virtual_heap_region), virt_addr, size, this->cached_virtual_heap_region); + } + + bool ContainsPages(KProcessAddress addr, size_t num_pages) const { + return (this->address_space_start <= addr) && (num_pages <= (this->address_space_end - this->address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= this->address_space_end - 1); + } + private: + constexpr size_t GetNumGuardPages() const { return this->IsKernel() ? 1 : 4; } + ALWAYS_INLINE KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const; + + Result CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const; + Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const; + Result CheckMemoryState(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const { + return this->CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); + } + + Result QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const; + Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties); + Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll); + + Result MakePageGroup(KPageGroup &pg, KProcessAddress addr, size_t num_pages); + bool IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages); + + NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm); + public: + bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const { + return this->GetImpl().GetPhysicalAddress(out, virt_addr); + } + + Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm); + Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm); + Result SetHeapSize(KProcessAddress *out, size_t size); + Result SetMaxHeapSize(size_t size); + Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const; + Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm); + Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm); + Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm); + + Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { + return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, region_num_pages, state, perm); + } + + Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { + return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm); + } + + Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) { + return this->MapPages(out_addr, num_pages, PageSize, Null, false, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm); + } + + Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state); + Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm); + Result MapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm); + Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state); + public: + KProcessAddress GetAddressSpaceStart() const { return this->address_space_start; } + KProcessAddress GetHeapRegionStart() const { return this->heap_region_start; } + KProcessAddress GetAliasRegionStart() const { return this->alias_region_start; } + KProcessAddress GetStackRegionStart() const { return this->stack_region_start; } + KProcessAddress GetKernelMapRegionStart() const { return this->kernel_map_region_start; } + + size_t GetAddressSpaceSize() const { return this->address_space_end - this->address_space_start; } + size_t GetHeapRegionSize() const { return this->heap_region_end - this->heap_region_start; } + size_t GetAliasRegionSize() const { return this->alias_region_end - this->alias_region_start; } + size_t GetStackRegionSize() const { return this->stack_region_end - this->stack_region_start; } + size_t GetKernelMapRegionSize() const { return this->kernel_map_region_end - this->kernel_map_region_start; } + public: + static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress addr) { + return KMemoryLayout::GetLinearVirtualAddress(addr); + } + + static ALWAYS_INLINE KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress addr) { + return KMemoryLayout::GetLinearPhysicalAddress(addr); + } + + static ALWAYS_INLINE KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) { + return GetLinearVirtualAddress(addr); + } + + static ALWAYS_INLINE KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) { + return GetLinearPhysicalAddress(addr); + } + + static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) { + return GetLinearVirtualAddress(addr); + } + + static ALWAYS_INLINE KPhysicalAddress GetPageTablePhysicalAddress(KVirtualAddress addr) { + return GetLinearPhysicalAddress(addr); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp new file mode 100644 index 000000000..85af722a3 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + namespace impl { + + class PageTablePage { + private: + u8 buffer[PageSize]; + }; + static_assert(sizeof(PageTablePage) == PageSize); + + } + + class KPageTableManager : public KDynamicSlabHeap { + public: + using RefCount = u16; + static constexpr size_t PageTableSize = sizeof(impl::PageTablePage); + static_assert(PageTableSize == PageSize); + private: + using BaseHeap = KDynamicSlabHeap; + private: + RefCount *ref_counts; + public: + static constexpr size_t CalculateReferenceCountSize(size_t size) { + return (size / PageSize) * sizeof(RefCount); + } + public: + constexpr KPageTableManager() : BaseHeap(), ref_counts() { /* ... */ } + private: + void Initialize(RefCount *rc) { + this->ref_counts = rc; + for (size_t i = 0; i < this->GetSize() / PageSize; i++) { + this->ref_counts[i] = 0; + } + } + + constexpr RefCount *GetRefCountPointer(KVirtualAddress addr) const { + return std::addressof(this->ref_counts[(addr - this->GetAddress()) / PageSize]); + } + public: + void Initialize(KDynamicPageManager *next_allocator, RefCount *rc) { + BaseHeap::Initialize(next_allocator); + this->Initialize(rc); + } + + void Initialize(KVirtualAddress memory, size_t sz, RefCount *rc) { + BaseHeap::Initialize(memory, sz); + this->Initialize(rc); + } + + KVirtualAddress Allocate() { + return KVirtualAddress(BaseHeap::Allocate()); + } + + void Free(KVirtualAddress addr) { + BaseHeap::Free(GetPointer(addr)); + } + + RefCount GetRefCount(KVirtualAddress addr) const { + MESOSPHERE_ASSERT(this->IsInRange(addr)); + return *this->GetRefCountPointer(addr); + } + + void Open(KVirtualAddress addr, int count) { + MESOSPHERE_ASSERT(this->IsInRange(addr)); + + *this->GetRefCountPointer(addr) += count; + + MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) > 0); + } + + bool Close(KVirtualAddress addr, int count) { + MESOSPHERE_ASSERT(this->IsInRange(addr)); + MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) >= count); + + *this->GetRefCountPointer(addr) -= count; + return this->GetRefCount(addr) == 0; + } + + constexpr bool IsInPageTableHeap(KVirtualAddress addr) const { + return this->IsInRange(addr); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_port.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_port.hpp new file mode 100644 index 000000000..40799b14e --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_port.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KPort final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KPort, KAutoObject); + public: + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp new file mode 100644 index 000000000..3b471b4d0 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp @@ -0,0 +1,424 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + /* + TODO: C++20 + + template + concept KPriorityQueueAffinityMask = !std::is_reference::value && requires (T &t) { + { t.GetAffinityMask() } -> std::convertible_to; + { t.SetAffinityMask(std::declval()) }; + + { t.GetAffinity(std::declval()) } -> std::same_as; + { t.SetAffinity(std::declval(), std::declval()) }; + { t.SetAll() }; + }; + + template + concept KPriorityQueueMember = !std::is_reference::value && requires (T &t) { + { typename T::QueueEntry() }; + { (typename T::QueueEntry()).Initialize() }; + { (typename T::QueueEntry()).SetPrev(std::addressof(t)) }; + { (typename T::QueueEntry()).SetNext(std::addressof(t)) }; + { (typename T::QueueEntry()).GetNext() } -> std::same_as; + { (typename T::QueueEntry()).GetPrev() } -> std::same_as; + { t.GetPriorityQueueEntry(std::declval()) } -> std::same_as; + + { t.GetAffinityMask() }; + { typename std::remove_cvref::type() } -> KPriorityQueueAffinityMask; + + { t.GetActiveCore() } -> std::convertible_to; + { t.GetPriority() } -> std::convertible_to; + }; + */ + + + template /* TODO C++20: requires KPriorityQueueMember */ + class KPriorityQueue { + public: + using AffinityMaskType = typename std::remove_cv().GetAffinityMask())>::type>::type; + + static_assert(LowestPriority >= 0); + static_assert(HighestPriority >= 0); + static_assert(LowestPriority >= HighestPriority); + static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1; + static constexpr size_t NumCores = _NumCores; + + static constexpr ALWAYS_INLINE bool IsValidCore(s32 core) { + return 0 <= core && core < static_cast(NumCores); + } + + static constexpr ALWAYS_INLINE bool IsValidPriority(s32 priority) { + return HighestPriority <= priority && priority <= LowestPriority + 1; + } + private: + using Entry = typename Member::QueueEntry; + public: + class KPerCoreQueue { + private: + Entry root[NumCores]; + public: + constexpr ALWAYS_INLINE KPerCoreQueue() : root() { + for (size_t i = 0; i < NumCores; i++) { + this->root[i].Initialize(); + } + } + + constexpr ALWAYS_INLINE bool PushBack(s32 core, Member *member) { + /* Get the entry associated with the member. */ + Entry &member_entry = member->GetPriorityQueueEntry(core); + + /* Get the entry associated with the end of the queue. */ + Member *tail = this->root[core].GetPrev(); + Entry &tail_entry = (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core]; + + /* Link the entries. */ + member_entry.SetPrev(tail); + member_entry.SetNext(nullptr); + tail_entry.SetNext(member); + this->root[core].SetPrev(member); + + return (tail == nullptr); + } + + constexpr ALWAYS_INLINE bool PushFront(s32 core, Member *member) { + /* Get the entry associated with the member. */ + Entry &member_entry = member->GetPriorityQueueEntry(core); + + /* Get the entry associated with the front of the queue. */ + Member *head = this->root[core].GetNext(); + Entry &head_entry = (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core]; + + /* Link the entries. */ + member_entry.SetPrev(nullptr); + member_entry.SetNext(head); + head_entry.SetPrev(member); + this->root[core].SetNext(member); + + return (head == nullptr); + } + + constexpr ALWAYS_INLINE bool Remove(s32 core, Member *member) { + /* Get the entry associated with the member. */ + Entry &member_entry = member->GetPriorityQueueEntry(core); + + /* Get the entries associated with next and prev. */ + Member *prev = member_entry.GetPrev(); + Member *next = member_entry.GetNext(); + Entry &prev_entry = (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core]; + Entry &next_entry = (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core]; + + /* Unlink. */ + prev_entry.SetNext(next); + next_entry.SetPrev(prev); + + return (this->GetFront(core) == nullptr); + } + + constexpr ALWAYS_INLINE Member *GetFront(s32 core) const { + return this->root[core].GetNext(); + } + }; + + class KPriorityQueueImpl { + private: + KPerCoreQueue queues[NumPriority]; + util::BitSet64 available_priorities[NumCores]; + public: + constexpr ALWAYS_INLINE KPriorityQueueImpl() : queues(), available_priorities() { /* ... */ } + + constexpr ALWAYS_INLINE void PushBack(s32 priority, s32 core, Member *member) { + MESOSPHERE_ASSERT(IsValidCore(core)); + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + if (AMS_LIKELY(priority <= LowestPriority)) { + if (this->queues[priority].PushBack(core, member)) { + this->available_priorities[core].SetBit(priority); + } + } + } + + constexpr ALWAYS_INLINE void PushFront(s32 priority, s32 core, Member *member) { + MESOSPHERE_ASSERT(IsValidCore(core)); + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + if (AMS_LIKELY(priority <= LowestPriority)) { + if (this->queues[priority].PushFront(core, member)) { + this->available_priorities[core].SetBit(priority); + } + } + } + + constexpr ALWAYS_INLINE void Remove(s32 priority, s32 core, Member *member) { + MESOSPHERE_ASSERT(IsValidCore(core)); + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + if (AMS_LIKELY(priority <= LowestPriority)) { + if (this->queues[priority].Remove(core, member)) { + this->available_priorities[core].ClearBit(priority); + } + } + } + + constexpr ALWAYS_INLINE Member *GetFront(s32 core) const { + MESOSPHERE_ASSERT(IsValidCore(core)); + + const s32 priority = this->available_priorities[core].CountLeadingZero(); + if (AMS_LIKELY(priority <= LowestPriority)) { + return this->queues[priority].GetFront(core); + } else { + return nullptr; + } + } + + constexpr ALWAYS_INLINE Member *GetFront(s32 priority, s32 core) const { + MESOSPHERE_ASSERT(IsValidCore(core)); + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + if (AMS_LIKELY(priority <= LowestPriority)) { + return this->queues[priority].GetFront(core); + } else { + return nullptr; + } + } + + constexpr ALWAYS_INLINE Member *GetNext(s32 core, const Member *member) const { + MESOSPHERE_ASSERT(IsValidCore(core)); + + Member *next = member->GetPriorityQueueEntry(core).GetNext(); + if (next == nullptr) { + const s32 priority = this->available_priorities[core].GetNextSet(member->GetPriority()); + if (AMS_LIKELY(priority <= LowestPriority)) { + next = this->queues[priority].GetFront(core); + } + } + return next; + } + + constexpr ALWAYS_INLINE void MoveToFront(s32 priority, s32 core, Member *member) { + MESOSPHERE_ASSERT(IsValidCore(core)); + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + if (AMS_LIKELY(priority <= LowestPriority)) { + this->queues[priority].Remove(core, member); + this->queues[priority].PushFront(core, member); + } + } + + constexpr ALWAYS_INLINE Member *MoveToBack(s32 priority, s32 core, Member *member) { + MESOSPHERE_ASSERT(IsValidCore(core)); + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + if (AMS_LIKELY(priority <= LowestPriority)) { + this->queues[priority].Remove(core, member); + this->queues[priority].PushBack(core, member); + return this->queues[priority].GetFront(core); + } else { + return nullptr; + } + } + }; + private: + KPriorityQueueImpl scheduled_queue; + KPriorityQueueImpl suggested_queue; + private: + constexpr ALWAYS_INLINE void ClearAffinityBit(u64 &affinity, s32 core) { + affinity &= ~(u64(1ul) << core); + } + + constexpr ALWAYS_INLINE s32 GetNextCore(u64 &affinity) { + const s32 core = __builtin_ctzll(static_cast(affinity)); + ClearAffinityBit(affinity, core); + return core; + } + + constexpr ALWAYS_INLINE void PushBack(s32 priority, Member *member) { + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + /* Push onto the scheduled queue for its core, if we can. */ + u64 affinity = member->GetAffinityMask().GetAffinityMask(); + if (const s32 core = member->GetActiveCore(); core >= 0) { + this->scheduled_queue.PushBack(priority, core, member); + ClearAffinityBit(affinity, core); + } + + /* And suggest the thread for all other cores. */ + while (affinity) { + this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); + } + } + + constexpr ALWAYS_INLINE void PushFront(s32 priority, Member *member) { + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + /* Push onto the scheduled queue for its core, if we can. */ + u64 affinity = member->GetAffinityMask().GetAffinityMask(); + if (const s32 core = member->GetActiveCore(); core >= 0) { + this->scheduled_queue.PushFront(priority, core, member); + ClearAffinityBit(affinity, core); + } + + /* And suggest the thread for all other cores. */ + /* Note: Nintendo pushes onto the back of the suggested queue, not the front. */ + while (affinity) { + this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); + } + } + + constexpr ALWAYS_INLINE void Remove(s32 priority, Member *member) { + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + /* Remove from the scheduled queue for its core. */ + u64 affinity = member->GetAffinityMask().GetAffinityMask(); + if (const s32 core = member->GetActiveCore(); core >= 0) { + this->scheduled_queue.Remove(priority, core, member); + ClearAffinityBit(affinity, core); + } + + /* Remove from the suggested queue for all other cores. */ + while (affinity) { + this->suggested_queue.Remove(priority, GetNextCore(affinity), member); + } + } + public: + constexpr ALWAYS_INLINE KPriorityQueue() : scheduled_queue(), suggested_queue() { /* ... */ } + + /* Getters. */ + constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core) const { + return this->scheduled_queue.GetFront(core); + } + + constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core, s32 priority) const { + return this->scheduled_queue.GetFront(priority, core); + } + + constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core) const { + return this->suggested_queue.GetFront(core); + } + + constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core, s32 priority) const { + return this->suggested_queue.GetFront(priority, core); + } + + constexpr ALWAYS_INLINE Member *GetScheduledNext(s32 core, const Member *member) const { + return this->scheduled_queue.GetNext(core, member); + } + + constexpr ALWAYS_INLINE Member *GetSuggestedNext(s32 core, const Member *member) const { + return this->suggested_queue.GetNext(core, member); + } + + constexpr ALWAYS_INLINE Member *GetSamePriorityNext(s32 core, const Member *member) const { + return member->GetPriorityQueueEntry(core).GetNext(); + } + + /* Mutators. */ + constexpr ALWAYS_INLINE void PushBack(Member *member) { + this->PushBack(member->GetPriority(), member); + } + + constexpr ALWAYS_INLINE void Remove(Member *member) { + this->Remove(member->GetPriority(), member); + } + + constexpr ALWAYS_INLINE void MoveToScheduledFront(Member *member) { + this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); + } + + constexpr ALWAYS_INLINE KThread *MoveToScheduledBack(Member *member) { + return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member); + } + + /* First class fancy operations. */ + constexpr ALWAYS_INLINE void ChangePriority(s32 prev_priority, bool is_running, Member *member) { + MESOSPHERE_ASSERT(IsValidPriority(prev_priority)); + + /* Remove the member from the queues. */ + const s32 new_priority = member->GetPriority(); + this->Remove(prev_priority, member); + + /* And enqueue. If the member is running, we want to keep it running. */ + if (is_running) { + this->PushFront(new_priority, member); + } else { + this->PushBack(new_priority, member); + } + } + + constexpr ALWAYS_INLINE void ChangeAffinityMask(s32 prev_core, const AffinityMaskType &prev_affinity, Member *member) { + /* Get the new information. */ + const s32 priority = member->GetPriority(); + const AffinityMaskType &new_affinity = member->GetAffinityMask(); + const s32 new_core = member->GetActiveCore(); + + /* Remove the member from all queues it was in before. */ + for (s32 core = 0; core < static_cast(NumCores); core++) { + if (prev_affinity.GetAffinity(core)) { + if (core == prev_core) { + this->scheduled_queue.Remove(priority, core, member); + } else { + this->suggested_queue.Remove(priority, core, member); + } + } + } + + /* And add the member to all queues it should be in now. */ + for (s32 core = 0; core < static_cast(NumCores); core++) { + if (new_affinity.GetAffinity(core)) { + if (core == new_core) { + this->scheduled_queue.PushBack(priority, core, member); + } else { + this->suggested_queue.PushBack(priority, core, member); + } + } + } + } + + constexpr ALWAYS_INLINE void ChangeCore(s32 prev_core, Member *member, bool to_front = false) { + /* Get the new information. */ + const s32 new_core = member->GetActiveCore(); + const s32 priority = member->GetPriority(); + + /* We don't need to do anything if the core is the same. */ + if (prev_core != new_core) { + /* Remove from the scheduled queue for the previous core. */ + if (prev_core >= 0) { + this->scheduled_queue.Remove(priority, prev_core, member); + } + + /* Remove from the suggested queue and add to the scheduled queue for the new core. */ + if (new_core >= 0) { + this->suggested_queue.Remove(priority, prev_core, member); + if (to_front) { + this->scheduled_queue.PushFront(priority, new_core, member); + } else { + this->scheduled_queue.PushBack(priority, new_core, member); + } + } + + /* Add to the suggested queue for the previous core. */ + if (prev_core >= 0) { + this->suggested_queue.PushBack(priority, prev_core, member); + } + } + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp new file mode 100644 index 000000000..1b173d86f --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ams::kern { + + class KProcess final : public KAutoObjectWithSlabHeapAndContainer, public KWorkerTask { + MESOSPHERE_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject); + public: + enum State { + State_Created = ams::svc::ProcessState_Created, + State_CreatedAttached = ams::svc::ProcessState_CreatedAttached, + State_Running = ams::svc::ProcessState_Running, + State_Crashed = ams::svc::ProcessState_Crashed, + State_RunningAttached = ams::svc::ProcessState_RunningAttached, + State_Terminating = ams::svc::ProcessState_Terminating, + State_Terminated = ams::svc::ProcessState_Terminated, + State_DebugBreak = ams::svc::ProcessState_DebugBreak, + }; + + using ThreadList = util::IntrusiveListMemberTraits<&KThread::process_list_node>::ListType; + private: + using SharedMemoryInfoList = util::IntrusiveListBaseTraits::ListType; + using TLPTree = util::IntrusiveRedBlackTreeBaseTraits::TreeType; + using TLPIterator = TLPTree::iterator; + private: + KProcessPageTable page_table{}; + std::atomic used_kernel_memory_size{}; + TLPTree fully_used_tlp_tree{}; + TLPTree partially_used_tlp_tree{}; + s32 ideal_core_id{}; + void *attached_object{}; + KResourceLimit *resource_limit{}; + KVirtualAddress system_resource_address{}; + size_t system_resource_num_pages{}; + size_t memory_release_hint{}; + State state{}; + KLightLock lock{}; + KLightLock list_lock{}; + KConditionVariable cond_var{}; + KAddressArbiter address_arbiter{}; + u64 entropy[4]{}; + bool is_signaled{}; + bool is_initialized{}; + bool is_application{}; + char name[13]{}; + std::atomic num_threads{}; + u16 peak_num_threads{}; + u32 flags{}; + KMemoryManager::Pool memory_pool{}; + s64 schedule_count{}; + KCapabilities capabilities{}; + ams::svc::ProgramId program_id{}; + u64 process_id{}; + s64 creation_time{}; + KProcessAddress code_address{}; + size_t code_size{}; + size_t main_thread_stack_size{}; + size_t max_process_memory{}; + u32 version{}; + KHandleTable handle_table{}; + KProcessAddress plr_address{}; + KThread *exception_thread{}; + ThreadList thread_list{}; + SharedMemoryInfoList shared_memory_list{}; + bool is_suspended{}; + bool is_jit_debug{}; + ams::svc::DebugEvent jit_debug_event_type{}; + ams::svc::DebugException jit_debug_exception_type{}; + uintptr_t jit_debug_params[4]{}; + u64 jit_debug_thread_id{}; + KWaitObject wait_object{}; + KThread *running_threads[cpu::NumCores]{}; + u64 running_thread_idle_counts[cpu::NumCores]{}; + KThread *pinned_threads[cpu::NumCores]{}; + std::atomic num_created_threads{}; + std::atomic cpu_time{}; + std::atomic num_process_switches{}; + std::atomic num_thread_switches{}; + std::atomic num_fpu_switches{}; + std::atomic num_supervisor_calls{}; + std::atomic num_ipc_messages{}; + std::atomic num_ipc_replies{}; + std::atomic num_ipc_receives{}; + KDynamicPageManager dynamic_page_manager{}; + KMemoryBlockSlabManager memory_block_slab_manager{}; + KBlockInfoManager block_info_manager{}; + KPageTableManager page_table_manager{}; + private: + Result Initialize(const ams::svc::CreateProcessParameter ¶ms); + public: + constexpr KProcess() { /* ... */ } + virtual ~KProcess() { /* ... */ } + + Result Initialize(const ams::svc::CreateProcessParameter ¶ms, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool); + + constexpr const char *GetName() const { return this->name; } + + constexpr u64 GetProcessId() const { return this->process_id; } + + constexpr u64 GetCoreMask() const { return this->capabilities.GetCoreMask(); } + constexpr u64 GetPriorityMask() const { return this->capabilities.GetPriorityMask(); } + + constexpr void SetIdealCoreId(s32 core_id) { this->ideal_core_id = core_id; } + + constexpr bool Is64Bit() const { return this->flags & ams::svc::CreateProcessFlag_Is64Bit; } + + constexpr KProcessAddress GetEntryPoint() const { return this->code_address; } + + constexpr bool IsSuspended() const { + return this->is_suspended; + } + + KThread *GetPreemptionStatePinnedThread(s32 core_id) const { + MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast(cpu::NumCores)); + return this->pinned_threads[core_id]; + } + + void CopySvcPermissionsTo(KThread::StackParameters &sp) { + this->capabilities.CopySvcPermissionsTo(sp); + } + + constexpr KResourceLimit *GetResourceLimit() const { return this->resource_limit; } + + bool ReserveResource(ams::svc::LimitableResource which, s64 value); + bool ReserveResource(ams::svc::LimitableResource which, s64 value, s64 timeout); + void ReleaseResource(ams::svc::LimitableResource which, s64 value); + void ReleaseResource(ams::svc::LimitableResource which, s64 value, s64 hint); + + constexpr KProcessPageTable &GetPageTable() { return this->page_table; } + constexpr const KProcessPageTable &GetPageTable() const { return this->page_table; } + + constexpr KHandleTable &GetHandleTable() { return this->handle_table; } + constexpr const KHandleTable &GetHandleTable() const { return this->handle_table; } + + Result CreateThreadLocalRegion(KProcessAddress *out); + void *GetThreadLocalRegionPointer(KProcessAddress addr); + + void AddCpuTime(s64 diff) { this->cpu_time += diff; } + void IncrementScheduledCount() { ++this->schedule_count; } + + void IncrementThreadCount(); + void DecrementThreadCount(); + + void RegisterThread(KThread *thread); + void UnregisterThread(KThread *thread); + + Result Run(s32 priority, size_t stack_size); + + void SetPreemptionState(); + + static void Switch(KProcess *cur_process, KProcess *next_process) { + /* Set the current process pointer. */ + SetCurrentProcess(next_process); + + /* Update the current page table. */ + if (next_process) { + next_process->GetPageTable().Activate(next_process->GetProcessId()); + } else { + Kernel::GetKernelPageTable().Activate(); + } + } + public: + /* Overridden parent functions. */ + virtual bool IsInitialized() const override { return this->is_initialized; } + + static void PostDestroy(uintptr_t arg) { /* ... */ } + + virtual void Finalize() override; + + virtual u64 GetId() const override { return this->GetProcessId(); } + + virtual bool IsSignaled() const override { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + return this->is_signaled; + } + + virtual void DoWorkerTask() override; + private: + void ChangeState(State new_state) { + if (this->state != new_state) { + this->state = new_state; + this->is_signaled = true; + this->NotifyAvailable(); + } + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_readable_event.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_readable_event.hpp new file mode 100644 index 000000000..0afc530d1 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_readable_event.hpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KEvent; + + class KReadableEvent : public KSynchronizationObject { + MESOSPHERE_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject); + private: + bool is_signaled; + KEvent *parent_event; + public: + constexpr explicit KReadableEvent() : KSynchronizationObject(), is_signaled(), parent_event() { MESOSPHERE_ASSERT_THIS(); } + virtual ~KReadableEvent() { MESOSPHERE_ASSERT_THIS(); } + + constexpr void Initialize(KEvent *parent) { + MESOSPHERE_ASSERT_THIS(); + this->is_signaled = false; + this->parent_event = parent; + } + + constexpr KEvent *GetParent() const { return this->parent_event; } + + virtual bool IsSignaled() const override; + virtual void Destroy() override; + + virtual Result Signal(); + virtual Result Clear(); + virtual Result Reset(); + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_resource_limit.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_resource_limit.hpp new file mode 100644 index 000000000..bfcfcb4fb --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_resource_limit.hpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include + +namespace ams::kern { + + class KResourceLimit final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject); + private: + s64 limit_values[ams::svc::LimitableResource_Count]; + s64 current_values[ams::svc::LimitableResource_Count]; + s64 current_hints[ams::svc::LimitableResource_Count]; + mutable KLightLock lock; + s32 waiter_count; + KLightConditionVariable cond_var; + public: + constexpr ALWAYS_INLINE KResourceLimit() : limit_values(), current_values(), current_hints(), lock(), waiter_count(), cond_var() { /* ... */ } + virtual ~KResourceLimit() { /* ... */ } + + static ALWAYS_INLINE void PostDestroy(uintptr_t arg) { /* ... */ } + + void Initialize(); + virtual void Finalize() override; + + s64 GetLimitValue(ams::svc::LimitableResource which) const; + s64 GetCurrentValue(ams::svc::LimitableResource which) const; + s64 GetFreeValue(ams::svc::LimitableResource which) const; + + Result SetLimitValue(ams::svc::LimitableResource which, s64 value); + + bool Reserve(ams::svc::LimitableResource which, s64 value); + bool Reserve(ams::svc::LimitableResource which, s64 value, s64 timeout); + void Release(ams::svc::LimitableResource which, s64 value); + void Release(ams::svc::LimitableResource which, s64 value, s64 hint); + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp new file mode 100644 index 000000000..5fb57ad2f --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + using KSchedulerPriorityQueue = KPriorityQueue; + static_assert(std::is_same::value); + static_assert(KSchedulerPriorityQueue::NumCores == cpu::NumCores); + static_assert(KSchedulerPriorityQueue::NumPriority == BITSIZEOF(u64)); + + class KScopedSchedulerLock; + class KScopedSchedulerLockAndSleep; + + class KScheduler { + NON_COPYABLE(KScheduler); + NON_MOVEABLE(KScheduler); + public: + using LockType = KAbstractSchedulerLock; + + static constexpr s32 HighestCoreMigrationAllowedPriority = 2; + static_assert(ams::svc::LowestThreadPriority >= HighestCoreMigrationAllowedPriority); + static_assert(ams::svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority); + + struct SchedulingState { + std::atomic needs_scheduling; + bool interrupt_task_thread_runnable; + bool should_count_idle; + u64 idle_count; + KThread *highest_priority_thread; + void *idle_thread_stack; + }; + private: + friend class KScopedSchedulerLock; + friend class KScopedSchedulerLockAndSleep; + static bool s_scheduler_update_needed; + static LockType s_scheduler_lock; + static KSchedulerPriorityQueue s_priority_queue; + private: + SchedulingState state; + bool is_active; + s32 core_id; + KThread *prev_thread; + s64 last_context_switch_time; + KThread *idle_thread; + public: + constexpr KScheduler() + : state(), is_active(false), core_id(0), prev_thread(nullptr), last_context_switch_time(0), idle_thread(nullptr) + { + this->state.needs_scheduling = true; + this->state.interrupt_task_thread_runnable = false; + this->state.should_count_idle = false; + this->state.idle_count = 0; + this->state.idle_thread_stack = nullptr; + this->state.highest_priority_thread = nullptr; + } + + NOINLINE void Initialize(KThread *idle_thread); + NOINLINE void Activate(); + + ALWAYS_INLINE void RequestScheduleOnInterrupt() { + SetSchedulerUpdateNeeded(); + + if (CanSchedule()) { + this->ScheduleOnInterrupt(); + } + } + private: + /* Static private API. */ + static ALWAYS_INLINE bool IsSchedulerUpdateNeeded() { return s_scheduler_update_needed; } + static ALWAYS_INLINE void SetSchedulerUpdateNeeded() { s_scheduler_update_needed = true; } + static ALWAYS_INLINE void ClearSchedulerUpdateNeeded() { s_scheduler_update_needed = false; } + static ALWAYS_INLINE KSchedulerPriorityQueue &GetPriorityQueue() { return s_priority_queue; } + + static NOINLINE u64 UpdateHighestPriorityThreadsImpl(); + public: + /* Static public API. */ + static ALWAYS_INLINE bool CanSchedule() { return GetCurrentThread().GetDisableDispatchCount() == 0; } + static ALWAYS_INLINE bool IsSchedulerLockedByCurrentThread() { return s_scheduler_lock.IsLockedByCurrentThread(); } + + static NOINLINE void SetInterruptTaskThreadRunnable(); + + static ALWAYS_INLINE void DisableScheduling() { + MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 0); + GetCurrentThread().DisableDispatch(); + } + + static NOINLINE void EnableScheduling(u64 cores_needing_scheduling) { + MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 1); + + if (GetCurrentThread().GetDisableDispatchCount() > 1) { + GetCurrentThread().EnableDispatch(); + } else { + GetCurrentScheduler().RescheduleOtherCores(cores_needing_scheduling); + GetCurrentScheduler().RescheduleCurrentCore(); + } + } + + static ALWAYS_INLINE u64 UpdateHighestPriorityThreads() { + if (IsSchedulerUpdateNeeded()) { + return UpdateHighestPriorityThreadsImpl(); + } else { + return 0; + } + } + + static NOINLINE void OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state); + static NOINLINE void OnThreadPriorityChanged(KThread *thread, s32 old_priority); + static NOINLINE void OnThreadAffinityMaskChanged(KThread *thread, const KAffinityMask &old_affinity, s32 old_core); + + /* TODO: Yield operations */ + static NOINLINE void RotateScheduledQueue(s32 priority, s32 core_id); + private: + /* Instanced private API. */ + void ScheduleImpl(); + void SwitchThread(KThread *next_thread); + + ALWAYS_INLINE void Schedule() { + MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1); + MESOSPHERE_ASSERT(this->core_id == GetCurrentCoreId()); + + this->ScheduleImpl(); + } + + ALWAYS_INLINE void ScheduleOnInterrupt() { + KScopedDisableDispatch dd; + this->Schedule(); + } + + void RescheduleOtherCores(u64 cores_needing_scheduling); + + ALWAYS_INLINE void RescheduleCurrentCore() { + MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1); + { + /* Disable interrupts, and then context switch. */ + KScopedInterruptDisable intr_disable; + ON_SCOPE_EXIT { GetCurrentThread().EnableDispatch(); }; + + if (this->state.needs_scheduling) { + Schedule(); + } + } + } + + NOINLINE u64 UpdateHighestPriorityThread(KThread *thread); + }; + + class KScopedSchedulerLock : KScopedLock { + public: + explicit ALWAYS_INLINE KScopedSchedulerLock() : KScopedLock(KScheduler::s_scheduler_lock) { /* ... */ } + ALWAYS_INLINE ~KScopedSchedulerLock() { /* ... */ } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp new file mode 100644 index 000000000..3be30f901 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class KThread; + + /* + TODO: C++20 + + template + concept KSchedulerLockable = !std::is_reference::value && requires { + { T::DisableScheduling() } -> std::same_as; + { T::EnableScheduling(std::declval()) } -> std::same_as; + { T::UpdateHighestPriorityThreads() } -> std::convertible_to; + }; + + */ + + template /* TODO C++20: requires KSchedulerLockable */ + class KAbstractSchedulerLock { + private: + KAlignedSpinLock spin_lock; + s32 lock_count; + KThread *owner_thread; + public: + constexpr ALWAYS_INLINE KAbstractSchedulerLock() : spin_lock(), lock_count(0), owner_thread(nullptr) { MESOSPHERE_ASSERT_THIS(); } + + ALWAYS_INLINE bool IsLockedByCurrentThread() const { + MESOSPHERE_ASSERT_THIS(); + + return this->owner_thread == GetCurrentThreadPointer(); + } + + ALWAYS_INLINE void Lock() { + MESOSPHERE_ASSERT_THIS(); + + if (this->IsLockedByCurrentThread()) { + /* If we already own the lock, we can just increment the count. */ + MESOSPHERE_ASSERT(this->lock_count > 0); + this->lock_count++; + } else { + /* Otherwise, we want to disable scheduling and acquire the spinlock. */ + SchedulerType::DisableScheduling(); + this->spin_lock.Lock(); + + /* For debug, ensure that our state is valid. */ + MESOSPHERE_ASSERT(this->lock_count == 0); + MESOSPHERE_ASSERT(this->owner_thread == nullptr); + + /* Increment count, take ownership. */ + this->lock_count = 1; + this->owner_thread = GetCurrentThreadPointer(); + } + } + + ALWAYS_INLINE void Unlock() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(this->lock_count > 0); + + /* Release an instance of the lock. */ + if ((--this->lock_count) == 0) { + /* We're no longer going to hold the lock. Take note of what cores need scheduling. */ + const u64 cores_needing_scheduling = SchedulerType::UpdateHighestPriorityThreads(); + + /* Note that we no longer hold the lock, and unlock the spinlock. */ + this->owner_thread = nullptr; + this->spin_lock.Unlock(); + + /* Enable scheduling, and perform a rescheduling operation. */ + SchedulerType::EnableScheduling(cores_needing_scheduling); + } + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scoped_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scoped_lock.hpp new file mode 100644 index 000000000..fd6756b2c --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scoped_lock.hpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + /* + TODO: C++20 + + template + concept KLockable = !std::is_reference::value && requires (T &t) { + { t.Lock() } -> std::same_as; + { t.Unlock() } -> std::same_as; + }; + + */ + + template /* TODO C++20: requires KLockable */ + class KScopedLock { + NON_COPYABLE(KScopedLock); + NON_MOVEABLE(KScopedLock); + private: + T *lock_ptr; + public: + explicit ALWAYS_INLINE KScopedLock(T *l) : lock_ptr(l) { this->lock_ptr->Lock(); } + explicit ALWAYS_INLINE KScopedLock(T &l) : KScopedLock(std::addressof(l)) { /* ... */ } + ALWAYS_INLINE ~KScopedLock() { this->lock_ptr->Unlock(); } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scoped_resource_reservation.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scoped_resource_reservation.hpp new file mode 100644 index 000000000..f0979ec63 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scoped_resource_reservation.hpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KScopedResourceReservation { + private: + KResourceLimit *limit; + s64 value; + ams::svc::LimitableResource resource; + bool succeeded; + public: + ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v, s64 timeout) : limit(l), value(v), resource(r) { + if (this->limit && this->value) { + this->succeeded = this->limit->Reserve(this->resource, this->value, timeout); + } else { + this->succeeded = true; + } + } + + ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v = 1) : limit(l), value(v), resource(r) { + if (this->limit && this->value) { + this->succeeded = this->limit->Reserve(this->resource, this->value); + } else { + this->succeeded = true; + } + } + + ALWAYS_INLINE KScopedResourceReservation(const KProcess *p, ams::svc::LimitableResource r, s64 v, s64 t) : KScopedResourceReservation(p->GetResourceLimit(), r, v, t) { /* ... */ } + ALWAYS_INLINE KScopedResourceReservation(const KProcess *p, ams::svc::LimitableResource r, s64 v = 1) : KScopedResourceReservation(p->GetResourceLimit(), r, v) { /* ... */ } + + ALWAYS_INLINE ~KScopedResourceReservation() { + if (this->limit && this->value && this->succeeded) { + this->limit->Release(this->resource, this->value); + } + } + + ALWAYS_INLINE void Commit() { + this->limit = nullptr; + } + + ALWAYS_INLINE bool Succeeded() const { + return this->succeeded; + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scoped_scheduler_lock_and_sleep.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scoped_scheduler_lock_and_sleep.hpp new file mode 100644 index 000000000..f7f973b11 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scoped_scheduler_lock_and_sleep.hpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class KScopedSchedulerLockAndSleep { + private: + s64 timeout_tick; + KThread *thread; + KHardwareTimer *timer; + public: + explicit ALWAYS_INLINE KScopedSchedulerLockAndSleep(KHardwareTimer **out_timer, KThread *t, s64 timeout) : timeout_tick(timeout), thread(t) { + /* Lock the scheduler. */ + KScheduler::s_scheduler_lock.Lock(); + + /* Set our timer only if the absolute time is positive. */ + this->timer = (this->timeout_tick > 0) ? std::addressof(Kernel::GetHardwareTimer()) : nullptr; + + *out_timer = this->timer; + } + + ALWAYS_INLINE ~KScopedSchedulerLockAndSleep() { + /* Register the sleep. */ + if (this->timeout_tick > 0) { + this->timer->RegisterAbsoluteTask(this->thread, this->timeout_tick); + } + + /* Unlock the scheduler. */ + KScheduler::s_scheduler_lock.Unlock(); + } + + ALWAYS_INLINE void CancelSleep() { + this->timeout_tick = 0; + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp new file mode 100644 index 000000000..ccd5bc063 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KSession final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KSession, KAutoObject); + public: + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp new file mode 100644 index 000000000..95a1d0a2c --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KSessionRequest final : public KSlabAllocated, public KAutoObject, public util::IntrusiveListBaseNode { + MESOSPHERE_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject); + public: + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp new file mode 100644 index 000000000..796d264e3 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KSharedMemory final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject); + public: + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory_info.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory_info.hpp new file mode 100644 index 000000000..519cf5aa1 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory_info.hpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KSharedMemory; + + class KSharedMemoryInfo : public KSlabAllocated, public util::IntrusiveListBaseNode { + private: + KSharedMemory *shared_memory; + size_t reference_count; + public: + constexpr KSharedMemoryInfo() : shared_memory(), reference_count() { /* ... */ } + ~KSharedMemoryInfo() { /* ... */ } + + constexpr void Initialize(KSharedMemory *m) { + MESOSPHERE_ASSERT_THIS(); + this->shared_memory = m; + this->reference_count = 0; + } + + constexpr void Open() { + const size_t ref_count = ++this->reference_count; + MESOSPHERE_ASSERT(ref_count > 0); + } + + constexpr bool Close() { + MESOSPHERE_ASSERT(this->reference_count > 0); + return (--this->reference_count) == 0; + } + + constexpr KSharedMemory *GetSharedMemory() const { return this->shared_memory; } + constexpr size_t GetReferenceCount() const { return this->reference_count; } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp new file mode 100644 index 000000000..2d24df367 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + namespace impl { + + class KSlabHeapImpl { + NON_COPYABLE(KSlabHeapImpl); + NON_MOVEABLE(KSlabHeapImpl); + public: + struct Node { + Node *next; + }; + private: + std::atomic head; + size_t obj_size; + public: + constexpr KSlabHeapImpl() : head(nullptr), obj_size(0) { MESOSPHERE_ASSERT_THIS(); } + + void Initialize(size_t size) { + MESOSPHERE_INIT_ABORT_UNLESS(this->head == nullptr); + this->obj_size = size; + } + + Node *GetHead() const { + return this->head; + } + + size_t GetObjectSize() const { + return this->obj_size; + } + + void *Allocate() { + MESOSPHERE_ASSERT_THIS(); + + Node *ret = this->head.load(); + + do { + if (AMS_UNLIKELY(ret == nullptr)) { + break; + } + } while (!this->head.compare_exchange_weak(ret, ret->next)); + + return ret; + } + + void Free(void *obj) { + MESOSPHERE_ASSERT_THIS(); + + Node *node = reinterpret_cast(obj); + + Node *cur_head = this->head.load(); + do { + node->next = cur_head; + } while (!this->head.compare_exchange_weak(cur_head, node)); + } + }; + + } + + class KSlabHeapBase { + NON_COPYABLE(KSlabHeapBase); + NON_MOVEABLE(KSlabHeapBase); + private: + using Impl = impl::KSlabHeapImpl; + private: + Impl impl; + uintptr_t peak; + uintptr_t start; + uintptr_t end; + private: + ALWAYS_INLINE Impl *GetImpl() { + return std::addressof(this->impl); + } + ALWAYS_INLINE const Impl *GetImpl() const { + return std::addressof(this->impl); + } + public: + constexpr KSlabHeapBase() : impl(), peak(0), start(0), end(0) { MESOSPHERE_ASSERT_THIS(); } + + ALWAYS_INLINE bool Contains(uintptr_t address) const { + return this->start <= address && address < this->end; + } + + void InitializeImpl(size_t obj_size, void *memory, size_t memory_size) { + MESOSPHERE_ASSERT_THIS(); + + /* Ensure we don't initialize a slab using null memory. */ + MESOSPHERE_ABORT_UNLESS(memory != nullptr); + + /* Initialize the base allocator. */ + this->GetImpl()->Initialize(obj_size); + + /* Set our tracking variables. */ + const size_t num_obj = (memory_size / obj_size); + this->start = reinterpret_cast(memory); + this->end = this->start + num_obj * obj_size; + this->peak = this->start; + + /* Free the objects. */ + u8 *cur = reinterpret_cast(this->end); + + for (size_t i = 0; i < num_obj; i++) { + cur -= obj_size; + this->GetImpl()->Free(cur); + } + } + + size_t GetSlabHeapSize() const { + return (this->end - this->start) / this->GetObjectSize(); + } + + size_t GetObjectSize() const { + return this->GetImpl()->GetObjectSize(); + } + + void *AllocateImpl() { + MESOSPHERE_ASSERT_THIS(); + + void *obj = this->GetImpl()->Allocate(); + + /* TODO: under some debug define, track the peak for statistics, as N does? */ + + return obj; + } + + void FreeImpl(void *obj) { + MESOSPHERE_ASSERT_THIS(); + + /* Don't allow freeing an object that wasn't allocated from this heap. */ + MESOSPHERE_ABORT_UNLESS(this->Contains(reinterpret_cast(obj))); + + this->GetImpl()->Free(obj); + } + + size_t GetObjectIndexImpl(const void *obj) const { + return (reinterpret_cast(obj) - this->start) / this->GetObjectSize(); + } + + size_t GetPeakIndex() const { + return this->GetObjectIndexImpl(reinterpret_cast(this->peak)); + } + + uintptr_t GetSlabHeapAddress() const { + return this->start; + } + }; + + template + class KSlabHeap : public KSlabHeapBase { + public: + constexpr KSlabHeap() : KSlabHeapBase() { /* ... */ } + + void Initialize(void *memory, size_t memory_size) { + this->InitializeImpl(sizeof(T), memory, memory_size); + } + + T *Allocate() { + T *obj = reinterpret_cast(this->AllocateImpl()); + if (AMS_LIKELY(obj != nullptr)) { + new (obj) T(); + } + return obj; + } + + void Free(T *obj) { + this->FreeImpl(obj); + } + + size_t GetObjectIndex(const T *obj) const { + return this->GetObjectIndexImpl(obj); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp new file mode 100644 index 000000000..ce8fcb3b8 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include + namespace ams::kern { + using ams::kern::arch::arm64::KAlignedSpinLock; + using ams::kern::arch::arm64::KNotAlignedSpinLock; + using ams::kern::arch::arm64::KSpinLock; + } + +#else + + #error "Unknown architecture for KInterruptManager" + +#endif + + +namespace ams::kern { + + using KScopedSpinLock = KScopedLock; + using KScopedAlignedSpinLock = KScopedLock; + using KScopedNotAlignedSpinLock = KScopedLock; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_synchronization.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_synchronization.hpp new file mode 100644 index 000000000..0ad13042f --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_synchronization.hpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KSynchronization { + private: + friend class KSynchronizationObject; + public: + constexpr KSynchronization() { /* ... */ } + + Result Wait(s32 *out_index, KSynchronizationObject **objects, const s32 num_objects, s64 timeout); + private: + void OnAvailable(KSynchronizationObject *object); + void OnAbort(KSynchronizationObject *object, Result abort_reason); + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp new file mode 100644 index 000000000..9f7934cd9 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KThread; + + class KSynchronizationObject : public KAutoObjectWithList { + MESOSPHERE_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject); + public: + using ThreadList = KLinkedList; + using iterator = ThreadList::iterator; + private: + ThreadList thread_list; + protected: + constexpr ALWAYS_INLINE explicit KSynchronizationObject() : KAutoObjectWithList(), thread_list() { MESOSPHERE_ASSERT_THIS(); } + virtual ~KSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); } + + virtual void OnFinalizeSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); } + + void NotifyAvailable(); + void NotifyAbort(Result abort_reason); + public: + virtual void Finalize() override; + virtual bool IsSignaled() const = 0; + virtual void DebugWaiters(); + + iterator AddWaiterThread(KThread *thread); + iterator RemoveWaiterThread(iterator it); + + iterator begin(); + iterator end(); + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp new file mode 100644 index 000000000..2134f830f --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KTargetSystem { + private: + friend class KSystemControl; + private: + static inline bool s_is_debug_mode; + static inline bool s_enable_debug_logging; + static inline bool s_enable_user_exception_handlers; + static inline bool s_enable_debug_memory_fill; + static inline bool s_enable_user_pmu_access; + static inline bool s_enable_kernel_debugging; + private: + static ALWAYS_INLINE void SetIsDebugMode(bool en) { s_is_debug_mode = en; } + static ALWAYS_INLINE void EnableDebugLogging(bool en) { s_enable_debug_logging = en; } + static ALWAYS_INLINE void EnableUserExceptionHandlers(bool en) { s_enable_user_exception_handlers = en; } + static ALWAYS_INLINE void EnableDebugMemoryFill(bool en) { s_enable_debug_memory_fill = en; } + static ALWAYS_INLINE void EnableUserPmuAccess(bool en) { s_enable_user_pmu_access = en; } + static ALWAYS_INLINE void EnableKernelDebugging(bool en) { s_enable_kernel_debugging = en; } + public: + static ALWAYS_INLINE bool IsDebugMode() { return s_is_debug_mode; } + static ALWAYS_INLINE bool IsDebugLoggingEnabled() { return s_enable_debug_logging; } + static ALWAYS_INLINE bool IsUserExceptionHandlersEnabled() { return s_enable_user_exception_handlers; } + static ALWAYS_INLINE bool IsDebugMemoryFillEnabled() { return s_enable_debug_memory_fill; } + static ALWAYS_INLINE bool IsUserPmuAccessEnabled() { return s_enable_user_pmu_access; } + static ALWAYS_INLINE bool IsKernelDebuggingEnabled() { return s_enable_kernel_debugging; } + }; + +} \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp new file mode 100644 index 000000000..8e3ace54e --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -0,0 +1,407 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +namespace ams::kern { + + class KThreadQueue; + class KProcess; + class KConditionVariable; + class KAddressArbiter; + + using KThreadFunction = void (*)(uintptr_t); + + class KThread final : public KAutoObjectWithSlabHeapAndContainer, public KTimerTask, public KWorkerTask { + MESOSPHERE_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject); + private: + friend class KProcess; + friend class KConditionVariable; + friend class KAddressArbiter; + public: + static constexpr s32 MainThreadPriority = 1; + static constexpr s32 IdleThreadPriority = 64; + + enum ThreadType : u32 { + ThreadType_Main = 0, + ThreadType_Kernel = 1, + ThreadType_HighPriority = 2, + ThreadType_User = 3, + }; + + enum SuspendType : u32 { + SuspendType_Process = 0, + SuspendType_Thread = 1, + SuspendType_Debug = 2, + SuspendType_Unk3 = 3, + SuspendType_Init = 4, + + SuspendType_Count, + }; + + enum ThreadState : u16 { + ThreadState_Initialized = 0, + ThreadState_Waiting = 1, + ThreadState_Runnable = 2, + ThreadState_Terminated = 3, + + ThreadState_SuspendShift = 4, + ThreadState_Mask = (1 << ThreadState_SuspendShift) - 1, + + ThreadState_ProcessSuspended = (1 << (SuspendType_Process + ThreadState_SuspendShift)), + ThreadState_ThreadSuspended = (1 << (SuspendType_Thread + ThreadState_SuspendShift)), + ThreadState_DebugSuspended = (1 << (SuspendType_Debug + ThreadState_SuspendShift)), + ThreadState_Unk3Suspended = (1 << (SuspendType_Unk3 + ThreadState_SuspendShift)), + ThreadState_InitSuspended = (1 << (SuspendType_Init + ThreadState_SuspendShift)), + + ThreadState_SuspendFlagMask = ((1 << SuspendType_Count) - 1) << ThreadState_SuspendShift, + }; + + enum DpcFlag : u32 { + DpcFlag_Terminating = (1 << 0), + DpcFlag_Terminated = (1 << 1), + }; + + struct StackParameters { + alignas(0x10) u8 svc_permission[0x10]; + std::atomic dpc_flags; + u8 current_svc_id; + bool is_calling_svc; + bool is_in_exception_handler; + bool is_preemption_state_pinned; + s32 disable_count; + KThreadContext *context; + }; + static_assert(alignof(StackParameters) == 0x10); + + struct QueueEntry { + private: + KThread *prev; + KThread *next; + public: + constexpr QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ } + + constexpr void Initialize() { + this->prev = nullptr; + this->next = nullptr; + } + + constexpr KThread *GetPrev() const { return this->prev; } + constexpr KThread *GetNext() const { return this->next; } + constexpr void SetPrev(KThread *t) { this->prev = t; } + constexpr void SetNext(KThread *t) { this->next = t; } + }; + private: + static constexpr size_t PriorityInheritanceCountMax = 10; + union SyncObjectBuffer { + KSynchronizationObject *sync_objects[ams::svc::MaxWaitSynchronizationHandleCount]; + ams::svc::Handle handles[ams::svc::MaxWaitSynchronizationHandleCount * (sizeof(KSynchronizationObject *) / sizeof(ams::svc::Handle))]; + + constexpr SyncObjectBuffer() : sync_objects() { /* ... */ } + }; + static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles)); + private: + static inline std::atomic s_next_thread_id = 0; + private: + alignas(16) KThreadContext thread_context{}; + KAffinityMask affinity_mask{}; + u64 thread_id{}; + std::atomic cpu_time{}; + KSynchronizationObject *synced_object{}; + KLightLock *waiting_lock{}; + uintptr_t condvar_key{}; + uintptr_t entrypoint{}; + KProcessAddress arbiter_key{}; + KProcess *parent{}; + void *kernel_stack_top{}; + u32 *light_ipc_data{}; + KProcessAddress tls_address{}; + void *tls_heap_address{}; + KLightLock activity_pause_lock{}; + SyncObjectBuffer sync_object_buffer{}; + s64 schedule_count{}; + s64 last_scheduled_tick{}; + QueueEntry per_core_priority_queue_entry[cpu::NumCores]{}; + QueueEntry sleeping_queue_entry{}; + KThreadQueue *sleeping_queue{}; + util::IntrusiveListNode waiter_list_node{}; + util::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{}; + util::IntrusiveListNode process_list_node{}; + + using WaiterListTraits = util::IntrusiveListMemberTraitsDeferredAssert<&KThread::waiter_list_node>; + using WaiterList = WaiterListTraits::ListType; + + WaiterList waiter_list{}; + WaiterList paused_waiter_list{}; + KThread *lock_owner{}; + KConditionVariable *cond_var{}; + uintptr_t debug_params[3]{}; + u32 arbiter_value{}; + u32 suspend_request_flags{}; + u32 suspend_allowed_flags{}; + Result wait_result; + Result debug_exception_result; + s32 priority{}; + s32 core_id{}; + s32 base_priority{}; + s32 ideal_core_id{}; + s32 num_kernel_waiters{}; + KAffinityMask original_affinity_mask{}; + s32 original_ideal_core_id{}; + s32 num_core_migration_disables{}; + ThreadState thread_state{}; + std::atomic termination_requested{}; + bool ipc_cancelled{}; + bool wait_cancelled{}; + bool cancellable{}; + bool registered{}; + bool signaled{}; + bool initialized{}; + bool debug_attached{}; + s8 priority_inheritance_count{}; + bool resource_limit_release_hint{}; + public: + constexpr KThread() : wait_result(svc::ResultNoSynchronizationObject()), debug_exception_result(ResultSuccess()) { /* ... */ } + + virtual ~KThread() { /* ... */ } + /* TODO: Is a constexpr KThread() possible? */ + + Result Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type); + + private: + static Result InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type); + public: + static Result InitializeKernelThread(KThread *thread, KThreadFunction func, uintptr_t arg, s32 prio, s32 core) { + return InitializeThread(thread, func, arg, Null, prio, core, nullptr, ThreadType_Kernel); + } + + static Result InitializeHighPriorityThread(KThread *thread, KThreadFunction func, uintptr_t arg) { + return InitializeThread(thread, func, arg, Null, 0, GetCurrentCoreId(), nullptr, ThreadType_HighPriority); + } + + static Result InitializeUserThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner) { + return InitializeThread(thread, func, arg, user_stack_top, prio, core, owner, ThreadType_User); + } + + static void ResumeThreadsSuspendedForInit(); + private: + StackParameters &GetStackParameters() { + return *(reinterpret_cast(this->kernel_stack_top) - 1); + } + + const StackParameters &GetStackParameters() const { + return *(reinterpret_cast(this->kernel_stack_top) - 1); + } + public: + ALWAYS_INLINE s32 GetDisableDispatchCount() const { + MESOSPHERE_ASSERT_THIS(); + return this->GetStackParameters().disable_count; + } + + ALWAYS_INLINE void DisableDispatch() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 0); + this->GetStackParameters().disable_count++; + } + + ALWAYS_INLINE void EnableDispatch() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() > 0); + this->GetStackParameters().disable_count--; + } + + NOINLINE void DisableCoreMigration(); + NOINLINE void EnableCoreMigration(); + + ALWAYS_INLINE void SetInExceptionHandler() { + MESOSPHERE_ASSERT_THIS(); + this->GetStackParameters().is_in_exception_handler = true; + } + + ALWAYS_INLINE void ClearInExceptionHandler() { + MESOSPHERE_ASSERT_THIS(); + this->GetStackParameters().is_in_exception_handler = false; + } + + ALWAYS_INLINE bool IsInExceptionHandler() const { + MESOSPHERE_ASSERT_THIS(); + return this->GetStackParameters().is_in_exception_handler; + } + + ALWAYS_INLINE void RegisterDpc(DpcFlag flag) { + this->GetStackParameters().dpc_flags |= flag; + } + + ALWAYS_INLINE void ClearDpc(DpcFlag flag) { + this->GetStackParameters().dpc_flags &= ~flag; + } + + ALWAYS_INLINE u8 GetDpc() const { + return this->GetStackParameters().dpc_flags; + } + + ALWAYS_INLINE bool HasDpc() const { + MESOSPHERE_ASSERT_THIS(); + return this->GetDpc() != 0;; + } + private: + void Suspend(); + ALWAYS_INLINE void AddWaiterImpl(KThread *thread); + ALWAYS_INLINE void RemoveWaiterImpl(KThread *thread); + ALWAYS_INLINE static void RestorePriority(KThread *thread); + public: + constexpr u64 GetThreadId() const { return this->thread_id; } + + constexpr KThreadContext &GetContext() { return this->thread_context; } + constexpr const KThreadContext &GetContext() const { return this->thread_context; } + constexpr const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; } + constexpr ThreadState GetState() const { return static_cast(this->thread_state & ThreadState_Mask); } + constexpr ThreadState GetRawState() const { return this->thread_state; } + NOINLINE void SetState(ThreadState state); + + NOINLINE KThreadContext *GetContextForSchedulerLoop(); + + constexpr uintptr_t GetConditionVariableKey() const { return this->condvar_key; } + + constexpr s32 GetIdealCore() const { return this->ideal_core_id; } + constexpr s32 GetActiveCore() const { return this->core_id; } + constexpr void SetActiveCore(s32 core) { this->core_id = core; } + constexpr s32 GetPriority() const { return this->priority; } + constexpr void SetPriority(s32 prio) { this->priority = prio; } + constexpr s32 GetBasePriority() const { return this->base_priority; } + + constexpr QueueEntry &GetPriorityQueueEntry(s32 core) { return this->per_core_priority_queue_entry[core]; } + constexpr const QueueEntry &GetPriorityQueueEntry(s32 core) const { return this->per_core_priority_queue_entry[core]; } + + constexpr QueueEntry &GetSleepingQueueEntry() { return this->sleeping_queue_entry; } + constexpr const QueueEntry &GetSleepingQueueEntry() const { return this->sleeping_queue_entry; } + constexpr void SetSleepingQueue(KThreadQueue *q) { this->sleeping_queue = q; } + + constexpr KConditionVariable *GetConditionVariable() const { return this->cond_var; } + + constexpr s32 GetNumKernelWaiters() const { return this->num_kernel_waiters; } + + void AddWaiter(KThread *thread); + void RemoveWaiter(KThread *thread); + KThread *RemoveWaiterByKey(s32 *out_num_waiters, KProcessAddress key); + + constexpr KProcessAddress GetAddressKey() const { return this->arbiter_key; } + constexpr void SetAddressKey(KProcessAddress key) { this->arbiter_key = key; } + constexpr void SetLockOwner(KThread *owner) { this->lock_owner = owner; } + constexpr KThread *GetLockOwner() const { return this->lock_owner; } + + constexpr void SetSyncedObject(KSynchronizationObject *obj, Result wait_res) { + this->synced_object = obj; + this->wait_result = wait_res; + } + + bool HasWaiters() const { return !this->waiter_list.empty(); } + + constexpr s64 GetLastScheduledTick() const { return this->last_scheduled_tick; } + constexpr void SetLastScheduledTick(s64 tick) { this->last_scheduled_tick = tick; } + + constexpr KProcess *GetOwnerProcess() const { return this->parent; } + constexpr bool IsUserThread() const { return this->parent != nullptr; } + + constexpr KProcessAddress GetThreadLocalRegionAddress() const { return this->tls_address; } + constexpr void *GetThreadLocalRegionHeapAddress() const { return this->tls_heap_address; } + + constexpr u16 GetUserPreemptionState() const { return *GetPointer(this->tls_address + 0x100); } + constexpr void SetKernelPreemptionState(u16 state) const { *GetPointer(this->tls_address + 0x100 + sizeof(u16)) = state; } + + void AddCpuTime(s64 amount) { + this->cpu_time += amount; + } + + constexpr u32 GetSuspendFlags() const { return this->suspend_allowed_flags & this->suspend_request_flags; } + constexpr bool IsSuspended() const { return this->GetSuspendFlags() != 0; } + void RequestSuspend(SuspendType type); + void Resume(SuspendType type); + void TrySuspend(); + void Continue(); + + void ContinueIfHasKernelWaiters() { + if (this->GetNumKernelWaiters() > 0) { + this->Continue(); + } + } + + void Wakeup(); + + Result SetPriorityToIdle(); + + Result Run(); + void Exit(); + + ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast(this->kernel_stack_top) - 1; } + ALWAYS_INLINE void *GetKernelStackTop() const { return this->kernel_stack_top; } + + /* TODO: This is kind of a placeholder definition. */ + + ALWAYS_INLINE bool IsTerminationRequested() const { + return this->termination_requested || this->GetRawState() == ThreadState_Terminated; + } + + public: + /* Overridden parent functions. */ + virtual u64 GetId() const override { return this->GetThreadId(); } + + virtual bool IsInitialized() const override { return this->initialized; } + virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(this->parent) | (this->resource_limit_release_hint ? 1 : 0); } + + static void PostDestroy(uintptr_t arg); + + virtual void Finalize() override; + virtual bool IsSignaled() const override; + virtual void OnTimer() override; + virtual void DoWorkerTask() override; + public: + static constexpr bool IsWaiterListValid() { + return WaiterListTraits::IsValid(); + } + }; + static_assert(alignof(KThread) == 0x10); + static_assert(KThread::IsWaiterListValid()); + + class KScopedDisableDispatch { + public: + explicit ALWAYS_INLINE KScopedDisableDispatch() { + GetCurrentThread().DisableDispatch(); + } + + ALWAYS_INLINE ~KScopedDisableDispatch() { + GetCurrentThread().EnableDispatch(); + } + }; + + class KScopedEnableDispatch { + public: + explicit ALWAYS_INLINE KScopedEnableDispatch() { + GetCurrentThread().EnableDispatch(); + } + + ALWAYS_INLINE ~KScopedEnableDispatch() { + GetCurrentThread().DisableDispatch(); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread_context.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread_context.hpp new file mode 100644 index 000000000..25df4ae70 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread_context.hpp @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +#ifdef ATMOSPHERE_ARCH_ARM64 + #include + + namespace ams::kern { + using ams::kern::arch::arm64::KThreadContext; + } +#else + #error "Unknown architecture for KThreadContext" +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread_local_page.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread_local_page.hpp new file mode 100644 index 000000000..8670bdb64 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread_local_page.hpp @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class KThread; + class KProcess; + + class KThreadLocalPage : public util::IntrusiveRedBlackTreeBaseNode, public KSlabAllocated { + public: + static constexpr size_t RegionsPerPage = PageSize / ams::svc::ThreadLocalRegionSize; + static_assert(RegionsPerPage > 0); + private: + KProcessAddress virt_addr; + KProcess *owner; + bool is_region_free[RegionsPerPage]; + public: + constexpr explicit KThreadLocalPage(KProcessAddress addr) : virt_addr(addr), owner(nullptr), is_region_free() { + for (size_t i = 0; i < RegionsPerPage; i++) { + this->is_region_free[i] = true; + } + } + + constexpr explicit KThreadLocalPage() : KThreadLocalPage(Null) { /* ... */ } + + constexpr ALWAYS_INLINE KProcessAddress GetAddress() const { return this->virt_addr; } + + static constexpr ALWAYS_INLINE int Compare(const KThreadLocalPage &lhs, const KThreadLocalPage &rhs) { + const KProcessAddress lval = lhs.GetAddress(); + const KProcessAddress rval = rhs.GetAddress(); + + if (lval < rval) { + return -1; + } else if (lval == rval) { + return 0; + } else { + return 1; + } + } + private: + constexpr ALWAYS_INLINE KProcessAddress GetRegionAddress(size_t i) { + return this->GetAddress() + i * ams::svc::ThreadLocalRegionSize; + } + + constexpr ALWAYS_INLINE bool Contains(KProcessAddress addr) { + return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize; + } + + constexpr ALWAYS_INLINE size_t GetRegionIndex(KProcessAddress addr) { + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), ams::svc::ThreadLocalRegionSize)); + MESOSPHERE_ASSERT(this->Contains(addr)); + return (addr - this->GetAddress()) / ams::svc::ThreadLocalRegionSize; + } + public: + Result Initialize(KProcess *process); + Result Finalize(); + + KProcessAddress Reserve(); + void Release(KProcessAddress addr); + + void *GetPointer() const; + + bool IsAllUsed() const { + for (size_t i = 0; i < RegionsPerPage; i++) { + if (this->is_region_free[i]) { + return false; + } + } + return true; + } + + bool IsAllFree() const { + for (size_t i = 0; i < RegionsPerPage; i++) { + if (!this->is_region_free[i]) { + return false; + } + } + return true; + } + + bool IsAnyUsed() const { + return !this->IsAllFree(); + } + + bool IsAnyFree() const { + return !this->IsAllUsed(); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread_queue.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread_queue.hpp new file mode 100644 index 000000000..e56042750 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread_queue.hpp @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KThreadQueue { + private: + using Entry = KThread::QueueEntry; + private: + Entry root; + public: + constexpr ALWAYS_INLINE KThreadQueue() : root() { /* ... */ } + + constexpr ALWAYS_INLINE bool IsEmpty() const { return this->root.GetNext() == nullptr; } + + constexpr ALWAYS_INLINE KThread *GetFront() const { return this->root.GetNext(); } + constexpr ALWAYS_INLINE KThread *GetNext(KThread *t) const { return t->GetSleepingQueueEntry().GetNext(); } + private: + constexpr ALWAYS_INLINE KThread *GetBack() const { return this->root.GetPrev(); } + + constexpr ALWAYS_INLINE void Enqueue(KThread *add) { + /* Get the entry associated with the added thread. */ + Entry &add_entry = add->GetSleepingQueueEntry(); + + /* Get the entry associated with the end of the queue. */ + KThread *tail = this->GetBack(); + Entry &tail_entry = (tail != nullptr) ? tail->GetSleepingQueueEntry() : this->root; + + /* Link the entries. */ + add_entry.SetPrev(tail); + add_entry.SetNext(nullptr); + tail_entry.SetNext(add); + this->root.SetPrev(add); + } + + constexpr ALWAYS_INLINE void Remove(KThread *remove) { + /* Get the entry associated with the thread. */ + Entry &remove_entry = remove->GetSleepingQueueEntry(); + + /* Get the entries associated with next and prev. */ + KThread *prev = remove_entry.GetPrev(); + KThread *next = remove_entry.GetNext(); + Entry &prev_entry = (prev != nullptr) ? prev->GetSleepingQueueEntry() : this->root; + Entry &next_entry = (next != nullptr) ? next->GetSleepingQueueEntry() : this->root; + + /* Unlink. */ + prev_entry.SetNext(next); + next_entry.SetPrev(prev); + } + public: + constexpr ALWAYS_INLINE void Dequeue() { + /* Get the front of the queue. */ + KThread *head = this->GetFront(); + if (head == nullptr) { + return; + } + + MESOSPHERE_ASSERT(head->GetState() == KThread::ThreadState_Waiting); + + /* Get the entry for the next head. */ + KThread *next = GetNext(head); + Entry &next_entry = (next != nullptr) ? next->GetSleepingQueueEntry() : this->root; + + /* Link the entries. */ + this->root.SetNext(next); + next_entry.SetPrev(nullptr); + + /* Clear the head's queue. */ + head->SetSleepingQueue(nullptr); + } + + bool SleepThread(KThread *t) { + /* Set the thread's queue and mark it as waiting. */ + t->SetSleepingQueue(this); + t->SetState(KThread::ThreadState_Waiting); + + /* Add the thread to the queue. */ + this->Enqueue(t); + + /* If the thread needs terminating, undo our work. */ + if (t->IsTerminationRequested()) { + this->WakeupThread(t); + return false; + } + + return true; + } + + void WakeupThread(KThread *t) { + MESOSPHERE_ASSERT(t->GetState() == KThread::ThreadState_Waiting); + + /* Remove the thread from the queue. */ + this->Remove(t); + + /* Mark the thread as no longer sleeping. */ + t->SetState(KThread::ThreadState_Runnable); + t->SetSleepingQueue(nullptr); + } + + KThread *WakeupFrontThread() { + KThread *front = this->GetFront(); + if (front != nullptr) { + MESOSPHERE_ASSERT(front->GetState() == KThread::ThreadState_Waiting); + + /* Remove the thread from the queue. */ + this->Dequeue(); + + /* Mark the thread as no longer sleeping. */ + front->SetState(KThread::ThreadState_Runnable); + front->SetSleepingQueue(nullptr); + } + return front; + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp new file mode 100644 index 000000000..dba2e1c16 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KTimerTask : public util::IntrusiveRedBlackTreeBaseNode { + private: + s64 time; + public: + static constexpr ALWAYS_INLINE int Compare(const KTimerTask &lhs, const KTimerTask &rhs) { + if (lhs.GetTime() < rhs.GetTime()) { + return -1; + } else { + return 1; + } + } + public: + constexpr ALWAYS_INLINE KTimerTask() : time(0) { /* ... */ } + + constexpr ALWAYS_INLINE void SetTime(s64 t) { + this->time = t; + } + + constexpr ALWAYS_INLINE s64 GetTime() const { + return this->time; + } + + virtual void OnTimer() = 0; + + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp new file mode 100644 index 000000000..9dd0d98bc --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KTransferMemory final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject); + public: + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp index 97af400ca..d078ff484 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include +#include namespace ams::kern { @@ -50,6 +50,10 @@ namespace ams::kern { return this->address - rhs; } + constexpr ALWAYS_INLINE ptrdiff_t operator-(KTypedAddress rhs) const { + return this->address - rhs.address; + } + template constexpr ALWAYS_INLINE KTypedAddress operator+=(I rhs) { static_assert(std::is_integral::value); @@ -81,6 +85,11 @@ namespace ams::kern { return this->address >> shift; } + template + constexpr ALWAYS_INLINE size_t operator/(U size) const { return this->address / size; } + + /* constexpr ALWAYS_INLINE uintptr_t operator%(U align) const { return this->address % align; } */ + /* Comparison operators. */ constexpr ALWAYS_INLINE bool operator==(KTypedAddress rhs) const { return this->address == rhs.address; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_wait_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_wait_object.hpp new file mode 100644 index 000000000..ac287c126 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_wait_object.hpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KWaitObject : public KTimerTask { + private: + using Entry = KThread::QueueEntry; + private: + Entry root; + bool uses_timer; + public: + constexpr KWaitObject() : root(), uses_timer() { /* ... */ } + + virtual void OnTimer() override; + + /* TODO: Member functions */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_worker_task.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_worker_task.hpp new file mode 100644 index 000000000..7620adb6a --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_worker_task.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KWorkerTask { + private: + KWorkerTask *next_task; + public: + constexpr ALWAYS_INLINE KWorkerTask() : next_task(nullptr) { /* ... */ } + + constexpr ALWAYS_INLINE KWorkerTask *GetNextTask() const { return this->next_task; } + constexpr ALWAYS_INLINE void SetNextTask(KWorkerTask *task) { this->next_task = task; } + + virtual void DoWorkerTask() = 0; + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_worker_task_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_worker_task_manager.hpp new file mode 100644 index 000000000..6e26d6af9 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_worker_task_manager.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KWorkerTaskManager { + public: + static constexpr s32 ExitWorkerPriority = 11; + + enum WorkerType { + WorkerType_Exit, + + WorkerType_Count, + }; + private: + KWorkerTask *head_task; + KWorkerTask *tail_task; + KThread *thread; + WorkerType type; + bool active; + private: + static void ThreadFunction(uintptr_t arg); + void ThreadFunctionImpl(); + + KWorkerTask *GetTask(); + void AddTask(KWorkerTask *task); + public: + constexpr KWorkerTaskManager() : head_task(), tail_task(), thread(), type(WorkerType_Count), active() { /* ... */ } + + NOINLINE void Initialize(WorkerType wt, s32 priority); + static void AddTask(WorkerType type, KWorkerTask *task); + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp b/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp new file mode 100644 index 000000000..5fba52000 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +namespace ams::kern { + + class KThread; + class KHardwareTimer; + class KResourceLimit; + class KInterruptManager; + class KInterruptTaskManager; + class KScheduler; + class KMemoryManager; + class KPageTableManager; + class KMemoryBlockSlabManager; + class KBlockInfoManager; + class KSynchronization; + + + +#if defined(ATMOSPHERE_ARCH_ARM64) + + namespace arch::arm64 { + class KSupervisorPageTable; + } + using ams::kern::arch::arm64::KSupervisorPageTable; + +#else + + #error "Unknown architecture for KSupervisorPageTable forward declare" + +#endif + + class Kernel { + public: + enum class State : u8 { + Invalid = 0, + Initializing = 1, + Initialized = 2, + }; + + static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; + static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000; + static constexpr size_t BlockInfoSlabHeapSize = 4000; + private: + static State s_state; + static KResourceLimit s_system_resource_limit; + static KMemoryManager s_memory_manager; + static KPageTableManager s_page_table_manager; + static KMemoryBlockSlabManager s_app_memory_block_manager; + static KMemoryBlockSlabManager s_sys_memory_block_manager; + static KBlockInfoManager s_block_info_manager; + static KSupervisorPageTable s_supervisor_page_table; + static KSynchronization s_synchronization; + static KWorkerTaskManager s_worker_task_managers[KWorkerTaskManager::WorkerType_Count]; + private: + static ALWAYS_INLINE KCoreLocalContext &GetCoreLocalContext() { + return reinterpret_cast(cpu::GetCoreLocalRegionAddress())->current.context; + } + static ALWAYS_INLINE KCoreLocalContext &GetCoreLocalContext(s32 core_id) { + return reinterpret_cast(cpu::GetCoreLocalRegionAddress())->absolute[core_id].context; + } + public: + static NOINLINE void InitializeCoreLocalRegion(s32 core_id); + static NOINLINE void InitializeMainAndIdleThreads(s32 core_id); + static NOINLINE void InitializeResourceManagers(KVirtualAddress address, size_t size); + static NOINLINE void PrintLayout(); + + static ALWAYS_INLINE State GetState() { return s_state; } + static ALWAYS_INLINE void SetState(State state) { s_state = state; } + + static KThread &GetMainThread(s32 core_id); + static KThread &GetIdleThread(s32 core_id); + + static ALWAYS_INLINE KScheduler &GetScheduler() { + return GetCoreLocalContext().scheduler; + } + + static ALWAYS_INLINE KScheduler &GetScheduler(s32 core_id) { + return GetCoreLocalContext(core_id).scheduler; + } + + static ALWAYS_INLINE KInterruptTaskManager &GetInterruptTaskManager() { + return GetCoreLocalContext().interrupt_task_manager; + } + + static ALWAYS_INLINE KInterruptManager &GetInterruptManager() { + return GetCoreLocalContext().interrupt_manager; + } + + static ALWAYS_INLINE KHardwareTimer &GetHardwareTimer() { + return GetCoreLocalContext().hardware_timer; + } + + static ALWAYS_INLINE KResourceLimit &GetSystemResourceLimit() { + return s_system_resource_limit; + } + + static ALWAYS_INLINE KMemoryManager &GetMemoryManager() { + return s_memory_manager; + } + + static ALWAYS_INLINE KMemoryBlockSlabManager &GetApplicationMemoryBlockManager() { + return s_app_memory_block_manager; + } + + static ALWAYS_INLINE KMemoryBlockSlabManager &GetSystemMemoryBlockManager() { + return s_sys_memory_block_manager; + } + + static ALWAYS_INLINE KBlockInfoManager &GetBlockInfoManager() { + return s_block_info_manager; + } + + static ALWAYS_INLINE KPageTableManager &GetPageTableManager() { + return s_page_table_manager; + } + + static ALWAYS_INLINE KSupervisorPageTable &GetKernelPageTable() { + return s_supervisor_page_table; + } + + static ALWAYS_INLINE KSynchronization &GetSynchronization() { + return s_synchronization; + } + + static ALWAYS_INLINE KWorkerTaskManager &GetWorkerTaskManager(KWorkerTaskManager::WorkerType type) { + MESOSPHERE_ASSERT(type <= KWorkerTaskManager::WorkerType_Count); + return s_worker_task_managers[type]; + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/kern_main.hpp similarity index 80% rename from libraries/libmesosphere/include/mesosphere/kern_select_k_system_control.hpp rename to libraries/libmesosphere/include/mesosphere/kern_main.hpp index 066a317cc..f1935bdde 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_select_k_system_control.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_main.hpp @@ -14,9 +14,10 @@ * along with this program. If not, see . */ #pragma once +#include -#ifdef ATMOSPHERE_BOARD_NINTENDO_SWITCH - #include "board/nintendo/switch/kern_k_system_control.hpp" -#else - #error "Unknown board for KSystemControl" -#endif +namespace ams::kern { + + NORETURN void HorizonKernelMain(s32 core_id); + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_panic.hpp b/libraries/libmesosphere/include/mesosphere/kern_panic.hpp index 16aa4a190..3df06b178 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_panic.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_panic.hpp @@ -14,40 +14,83 @@ * along with this program. If not, see . */ #pragma once -#include +#include +#include namespace ams::kern { - NORETURN void Panic(const char *file, int line, const char *format, ...); - NORETURN void Panic(); + template + ALWAYS_INLINE void UnusedImpl(ArgTypes... args) { + (static_cast(args), ...); + } + + NORETURN NOINLINE void Panic(const char *file, int line, const char *format, ...) __attribute__((format(printf, 3, 4))); + NORETURN NOINLINE void Panic(); } +#define MESOSPHERE_UNUSED(...) ::ams::kern::UnusedImpl(__VA_ARGS__) + #ifdef MESOSPHERE_ENABLE_DEBUG_PRINT -#define MESOSPHERE_PANIC(...) ams::kern::Panic(__FILE__, __LINE__, __VA_ARGS__) +#define MESOSPHERE_PANIC(...) do { ::ams::kern::Panic(__FILE__, __LINE__, __VA_ARGS__); } while(0) #else -#define MESOSPHERE_PANIC(...) ams::kern::Panic() +#define MESOSPHERE_PANIC(...) do { MESOSPHERE_UNUSED(__VA_ARGS__); ::ams::kern::Panic(); } while(0) #endif #ifdef MESOSPHERE_ENABLE_ASSERTIONS -#define MESOSPHERE_ASSERT_IMPL(expr, ...) \ - ({ \ - if (AMS_UNLIKELY(!(expr))) { \ - MESOSPHERE_PANIC(__VA_ARGS__); \ - } \ +#define MESOSPHERE_ASSERT_IMPL(expr, ...) \ + ({ \ + const bool __tmp_meso_assert_val = (expr); \ + if (AMS_UNLIKELY(!__tmp_meso_assert_val)) { \ + MESOSPHERE_PANIC(__VA_ARGS__); \ + } \ }) #else -#define MESOSPHERE_ASSERT_IMPL(expr, ...) do { } while (0) +#define MESOSPHERE_ASSERT_IMPL(expr, ...) do { static_cast(expr); } while (0) #endif -#define MESOSPHERE_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(expr, "Assertion failed: %s", #expr) -#define MESOSPHERE_R_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(R_SUCCEEDED(expr), "Result assertion failed: %s", #expr) +#define MESOSPHERE_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(expr, "Assertion failed: %s\n", #expr) +#define MESOSPHERE_R_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(R_SUCCEEDED(expr), "Result assertion failed: %s\n", #expr) +#define MESOSPHERE_UNREACHABLE_DEFAULT_CASE() default: MESOSPHERE_PANIC("Unreachable default case entered") -#define MESOSPHERE_ABORT() MESOSPHERE_PANIC("Abort()"); +#ifdef MESOSPHERE_ENABLE_THIS_ASSERT +#define MESOSPHERE_ASSERT_THIS() MESOSPHERE_ASSERT(this != nullptr) +#else +#define MESOSPHERE_ASSERT_THIS() +#endif + +#ifdef MESOSPHERE_BUILD_FOR_AUDITING +#define MESOSPHERE_AUDIT(expr) MESOSPHERE_ASSERT(expr) +#else +#define MESOSPHERE_AUDIT(expr) do { static_cast(expr); } while (0) +#endif + +#define MESOSPHERE_TODO(arg) ({ constexpr const char *__mesosphere_todo = arg; static_cast(__mesosphere_todo); MESOSPHERE_PANIC("TODO (%s): %s\n", __PRETTY_FUNCTION__, __mesosphere_todo); }) +#define MESOSPHERE_UNIMPLEMENTED() MESOSPHERE_PANIC("%s: Unimplemented\n", __PRETTY_FUNCTION__) + +#define MESOSPHERE_ABORT() MESOSPHERE_PANIC("Abort()\n"); +#define MESOSPHERE_INIT_ABORT() do { /* ... */ } while (true) #define MESOSPHERE_ABORT_UNLESS(expr) \ ({ \ - if (AMS_UNLIKELY(!(expr))) { \ + const bool _tmp_meso_assert_val = (expr); \ + if (AMS_UNLIKELY(!_tmp_meso_assert_val)) { \ MESOSPHERE_PANIC("Abort(): %s", #expr); \ } \ }) + +#define MESOSPHERE_INIT_ABORT_UNLESS(expr) \ + ({ \ + const bool __tmp_meso_assert_val = (expr); \ + if (AMS_UNLIKELY(!__tmp_meso_assert_val)) { \ + MESOSPHERE_INIT_ABORT(); \ + } \ + }) + +#define MESOSPHERE_R_ABORT_UNLESS(expr) \ + ({ \ + const ::ams::Result _tmp_meso_r_abort_res = static_cast<::ams::Result>((expr)); \ + if (AMS_UNLIKELY((R_FAILED(_tmp_meso_r_abort_res)))) { \ + MESOSPHERE_PANIC("Result Abort(): %s 2%03d-%04d\n", #expr, _tmp_meso_r_abort_res.GetModule(), _tmp_meso_r_abort_res.GetDescription()); \ + } \ + }) diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp index 93bc2434e..276ed1439 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp @@ -14,13 +14,14 @@ * along with this program. If not, see . */ #pragma once +#include #ifdef ATMOSPHERE_ARCH_ARM64 - #include "arch/arm64/kern_cpu.hpp" + #include namespace ams::kern::cpu { - using namespace ams::kern::arm64::cpu; + using namespace ams::kern::arch::arm64::cpu; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_debug.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_debug.hpp new file mode 100644 index 000000000..607c43db1 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_select_debug.hpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include + namespace ams::kern { + using ams::kern::arch::arm64::KDebug; + } + +#else + + #error "Unknown architecture for KDebug" + +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_device_page_table.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_device_page_table.hpp new file mode 100644 index 000000000..51e4db302 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_select_device_page_table.hpp @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +#ifdef ATMOSPHERE_BOARD_NINTENDO_NX + #include + + namespace ams::kern { + using ams::kern::board::nintendo::nx::KDevicePageTable; + } + +#else + #error "Unknown board for KDevicePageTable" +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_hardware_timer.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_hardware_timer.hpp new file mode 100644 index 000000000..e9c04cecc --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_select_hardware_timer.hpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include + namespace ams::kern { + using ams::kern::arch::arm64::KHardwareTimer; + } + +#else + + #error "Unknown architecture for KHardwareTimer" + +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_controller.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_controller.hpp new file mode 100644 index 000000000..605fa3fcd --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_controller.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include + namespace ams::kern { + using ams::kern::arch::arm64::KInterruptController; + } + +#else + + #error "Unknown architecture for KInterruptController" + +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_interrupts.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp similarity index 54% rename from libraries/libmesosphere/include/mesosphere/kern_select_interrupts.hpp rename to libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp index 312450b7c..53790958d 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_select_interrupts.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp @@ -14,29 +14,45 @@ * along with this program. If not, see . */ #pragma once -#include -#include "kern_panic.hpp" +#include +#include + +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include + namespace ams::kern { + using ams::kern::arch::arm64::KInterruptManager; + } + +#else + + #error "Unknown architecture for KInterruptManager" + +#endif + namespace ams::kern { - /* TODO: Actually select between architecture-specific interrupt code. */ - /* Enable or disable interrupts for the lifetime of an object. */ class KScopedInterruptDisable { NON_COPYABLE(KScopedInterruptDisable); NON_MOVEABLE(KScopedInterruptDisable); + private: + u32 prev_intr_state; public: - KScopedInterruptDisable(); - ~KScopedInterruptDisable(); + ALWAYS_INLINE KScopedInterruptDisable() : prev_intr_state(KInterruptManager::DisableInterrupts()) { /* ... */ } + ~KScopedInterruptDisable() { KInterruptManager::RestoreInterrupts(prev_intr_state); } }; class KScopedInterruptEnable { NON_COPYABLE(KScopedInterruptEnable); NON_MOVEABLE(KScopedInterruptEnable); + private: + u32 prev_intr_state; public: - KScopedInterruptEnable(); - ~KScopedInterruptEnable(); + ALWAYS_INLINE KScopedInterruptEnable() : prev_intr_state(KInterruptManager::EnableInterrupts()) { /* ... */ } + ~KScopedInterruptEnable() { KInterruptManager::RestoreInterrupts(prev_intr_state); } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_name.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_name.hpp new file mode 100644 index 000000000..8be3a7b4f --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_name.hpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include + namespace ams::kern { + using namespace ams::kern::arch::arm64::interrupt_name; + } + +#else + + #error "Unknown architecture for KInterruptName" + +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_page_table.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_page_table.hpp new file mode 100644 index 000000000..28210ff38 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_select_page_table.hpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include + #include + #include + namespace ams::kern { + using ams::kern::arch::arm64::KPageTable; + using ams::kern::arch::arm64::KSupervisorPageTable; + using ams::kern::arch::arm64::KProcessPageTable; + } + +#else + + #error "Unknown architecture for KPageTable" + +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_page_table_impl.hpp new file mode 100644 index 000000000..4d6858b00 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_select_page_table_impl.hpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include + namespace ams::kern { + using ams::kern::arch::arm64::KPageTableImpl; + } + +#else + + #error "Unknown architecture for KPageTableImpl" + +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_system_control.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_system_control.hpp new file mode 100644 index 000000000..5d4890d67 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_select_system_control.hpp @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +#ifdef ATMOSPHERE_BOARD_NINTENDO_NX + #include + + namespace ams::kern { + using ams::kern::board::nintendo::nx::KSystemControl; + } + +#else + #error "Unknown board for KSystemControl" +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_userspace_memory_access.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_userspace_memory_access.hpp new file mode 100644 index 000000000..121eb456f --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_select_userspace_memory_access.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +#ifdef ATMOSPHERE_ARCH_ARM64 + #include + + namespace ams::kern { + + using ams::kern::arch::arm64::UserspaceAccess; + + } + +#else + #error "Unknown architecture for CPU" +#endif + diff --git a/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp b/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp new file mode 100644 index 000000000..b34150cbd --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + template + class KSlabAllocated { + private: + static inline KSlabHeap s_slab_heap; + public: + constexpr KSlabAllocated() { /* ... */ } + + size_t GetSlabIndex() const { + return s_slab_heap.GetIndex(static_cast(this)); + } + public: + static void InitializeSlabHeap(void *memory, size_t memory_size) { + s_slab_heap.Initialize(memory, memory_size); + } + + static ALWAYS_INLINE Derived *Allocate() { + return s_slab_heap.Allocate(); + } + + static ALWAYS_INLINE void Free(Derived *obj) { + s_slab_heap.Free(obj); + } + + static size_t GetObjectSize() { return s_slab_heap.GetObjectSize(); } + static size_t GetSlabHeapSize() { return s_slab_heap.GetSlabHeapSize(); } + static size_t GetPeakIndex() { return s_slab_heap.GetPeakIndex(); } + static uintptr_t GetSlabHeapAddress() { return s_slab_heap.GetSlabHeapAddress(); } + }; + + template + class KAutoObjectWithSlabHeapAndContainer : public Base { + static_assert(std::is_base_of::value); + private: + static inline KSlabHeap s_slab_heap; + static inline KAutoObjectWithListContainer s_container; + private: + static ALWAYS_INLINE Derived *Allocate() { + return s_slab_heap.Allocate(); + } + + static ALWAYS_INLINE void Free(Derived *obj) { + s_slab_heap.Free(obj); + } + public: + class ListAccessor : public KAutoObjectWithListContainer::ListAccessor { + public: + ALWAYS_INLINE ListAccessor() : KAutoObjectWithListContainer::ListAccessor(s_container) { /* ... */ } + ALWAYS_INLINE ~ListAccessor() { /* ... */ } + }; + public: + constexpr KAutoObjectWithSlabHeapAndContainer() : Base() { /* ... */ } + virtual ~KAutoObjectWithSlabHeapAndContainer() { /* ... */ } + + virtual void Destroy() override { + const bool is_initialized = this->IsInitialized(); + uintptr_t arg = 0; + if (is_initialized) { + s_container.Unregister(this); + arg = this->GetPostDestroyArgument(); + this->Finalize(); + } + Free(static_cast(this)); + if (is_initialized) { + Derived::PostDestroy(arg); + } + } + + virtual bool IsInitialized() const { return true; } + virtual uintptr_t GetPostDestroyArgument() const { return 0; } + + size_t GetSlabIndex() const { + return s_slab_heap.GetIndex(static_cast(this)); + } + public: + static void InitializeSlabHeap(void *memory, size_t memory_size) { + s_slab_heap.Initialize(memory, memory_size); + s_container.Initialize(); + } + + static Derived *Create() { + Derived *obj = Allocate(); + if (AMS_LIKELY(obj != nullptr)) { + KAutoObject::Create(obj); + } + return obj; + } + + static Result Register(Derived *obj) { + return s_container.Register(obj); + } + + static size_t GetObjectSize() { return s_slab_heap.GetObjectSize(); } + static size_t GetSlabHeapSize() { return s_slab_heap.GetSlabHeapSize(); } + static size_t GetPeakIndex() { return s_slab_heap.GetPeakIndex(); } + static uintptr_t GetSlabHeapAddress() { return s_slab_heap.GetSlabHeapAddress(); } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_svc.hpp b/libraries/libmesosphere/include/mesosphere/kern_svc.hpp new file mode 100644 index 000000000..251fcd02b --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_svc.hpp @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp new file mode 100644 index 000000000..fce28f594 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern::svc { + + namespace impl { + + /* TODO: C++20 + template + concept Pointer = std::is_pointer::value; + + template + concept NonConstPointer = Pointer && !std::is_const::type>::value; + + template + concept ConstPointer = Pointer && std::is_const::type>::value; + + template + concept AlignedNPointer = Pointer && alignof(typename std::remove_pointer::type) >= N && util::IsAligned(sizeof(typename std::remove_pointer::type), N); + + template + concept Aligned8Pointer = AlignedNPointer; + + template + concept Aligned16Pointer = AlignedNPointer && Aligned8; + + template + concept Aligned32Pointer = AlignedNPointer && Aligned16; + + template + concept Aligned64Pointer = AlignedNPointer && Aligned32; + */ + + template + constexpr inline bool IsPointer = std::is_pointer::value; + + template + constexpr inline bool IsConstPointer = IsPointer && std::is_const::type>::value; + + template + constexpr inline bool IsNonConstPointer = IsPointer && !std::is_const::type>::value; + + template + constexpr inline bool IsAlignedNPointer = IsPointer && alignof(typename std::remove_pointer::type) >= N && util::IsAligned(sizeof(typename std::remove_pointer::type), N); + + template /* requires Aligned8Pointer<_T> */ + class KUserPointerImplTraits { + static_assert(IsAlignedNPointer<_T, sizeof(u8)>); + public: + using T = typename std::remove_const::type>::type; + public: + static Result CopyFromUserspace(void *dst, const void *src, size_t size) { + R_UNLESS(UserspaceAccess::CopyMemoryFromUser(dst, src, size), svc::ResultInvalidPointer()); + return ResultSuccess(); + } + + static Result CopyToUserspace(void *dst, const void *src, size_t size) { + R_UNLESS(UserspaceAccess::CopyMemoryToUser(dst, src, size), svc::ResultInvalidPointer()); + return ResultSuccess(); + } + }; + + template /* requires Aligned32Pointer<_T> */ + class KUserPointerImplTraits<_T, typename std::enable_if && !IsAlignedNPointer<_T, sizeof(u64)>>::type> { + static_assert(IsAlignedNPointer<_T, sizeof(u32)>); + public: + using T = typename std::remove_const::type>::type; + public: + static Result CopyFromUserspace(void *dst, const void *src, size_t size) { + R_UNLESS(UserspaceAccess::CopyMemoryFromUserAligned32Bit(dst, src, size), svc::ResultInvalidPointer()); + return ResultSuccess(); + } + + static Result CopyToUserspace(void *dst, const void *src, size_t size) { + R_UNLESS(UserspaceAccess::CopyMemoryToUserAligned32Bit(dst, src, size), svc::ResultInvalidPointer()); + return ResultSuccess(); + } + }; + + template /* requires Aligned64Pointer<_T> */ + class KUserPointerImplTraits<_T, typename std::enable_if>::type> { + static_assert(IsAlignedNPointer<_T, sizeof(u64)>); + public: + using T = typename std::remove_const::type>::type; + public: + static Result CopyFromUserspace(void *dst, const void *src, size_t size) { + R_UNLESS(UserspaceAccess::CopyMemoryFromUserAligned64Bit(dst, src, size), svc::ResultInvalidPointer()); + return ResultSuccess(); + } + + static Result CopyToUserspace(void *dst, const void *src, size_t size) { + R_UNLESS(UserspaceAccess::CopyMemoryToUserAligned64Bit(dst, src, size), svc::ResultInvalidPointer()); + return ResultSuccess(); + } + }; + + template /* requires Aligned8Pointer<_T> */ + class KUserPointerImpl : impl::KUserPointerTag { + private: + using Traits = KUserPointerImplTraits<_T>; + protected: + using CT = typename std::remove_pointer<_T>::type; + using T = typename std::remove_const::type; + private: + CT *ptr; + private: + Result CopyToImpl(void *p, size_t size) const { + return Traits::CopyFromUserspace(p, this->ptr, size); + } + + Result CopyFromImpl(const void *p, size_t size) const { + return Traits::CopyToUserspace(this->ptr, p, size); + } + protected: + Result CopyTo(T *p) const { return this->CopyToImpl(p, sizeof(*p)); } + Result CopyFrom(const T *p) const { return this->CopyFromImpl(p, sizeof(*p)); } + + Result CopyArrayElementTo(T *p, size_t index) const { return Traits::CopyFromUserspace(p, this->ptr + index, sizeof(*p)); } + Result CopyArrayElementFrom(const T *p, size_t index) const { return Traits::CopyToUserspace(this->ptr + index, p, sizeof(*p)); } + + Result CopyArrayTo(T *arr, size_t count) const { return this->CopyToImpl(arr, sizeof(*arr) * count); } + Result CopyArrayFrom(const T *arr, size_t count) const { return this->CopyFromImpl(arr, sizeof(*arr) * count); } + + constexpr bool IsNull() const { return this->ptr == nullptr; } + + constexpr CT *GetUnsafePointer() const { return this->ptr; } + }; + + template<> + class KUserPointerImpl : impl::KUserPointerTag { + private: + using Traits = KUserPointerImplTraits; + protected: + using CT = const char; + using T = char; + private: + const char *ptr; + protected: + Result CopyStringTo(char *dst, size_t size) const { + static_assert(sizeof(char) == 1); + R_UNLESS(UserspaceAccess::CopyStringFromUser(dst, this->ptr, size) > 0, svc::ResultInvalidPointer()); + return ResultSuccess(); + } + + Result CopyArrayElementTo(char *dst, size_t index) const { + return Traits::CopyFromUserspace(dst, this->ptr + index, sizeof(*dst)); + } + + constexpr bool IsNull() const { return this->ptr == nullptr; } + + constexpr const char *GetUnsafePointer() const { return this->ptr; } + }; + + } + + template + class KUserPointer; + + template /* requires impl::ConstPointer */ + struct KUserPointer>::type> : public impl::KUserPointerImpl { + public: + static constexpr bool IsInput = true; + public: + using impl::KUserPointerImpl::CopyTo; + using impl::KUserPointerImpl::CopyArrayElementTo; + using impl::KUserPointerImpl::CopyArrayTo; + using impl::KUserPointerImpl::IsNull; + + using impl::KUserPointerImpl::GetUnsafePointer; + }; + + template /* requires impl::NonConstPointer */ + struct KUserPointer>::type> : public impl::KUserPointerImpl { + public: + static constexpr bool IsInput = false; + public: + using impl::KUserPointerImpl::CopyFrom; + using impl::KUserPointerImpl::CopyArrayElementFrom; + using impl::KUserPointerImpl::CopyArrayFrom; + using impl::KUserPointerImpl::IsNull; + + using impl::KUserPointerImpl::GetUnsafePointer; + }; + + template<> + struct KUserPointer : public impl::KUserPointerImpl { + public: + static constexpr bool IsInput = true; + public: + using impl::KUserPointerImpl::CopyStringTo; + using impl::KUserPointerImpl::CopyArrayElementTo; + using impl::KUserPointerImpl::IsNull; + + using impl::KUserPointerImpl::GetUnsafePointer; + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp new file mode 100644 index 000000000..dddff3139 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern::svc { + + static constexpr size_t NumSupervisorCalls = 0x80; + + #define AMS_KERN_SVC_DECLARE_ENUM_ID(ID, RETURN_TYPE, NAME, ...) \ + SvcId_##NAME = ID, + + enum SvcId { + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_DECLARE_ENUM_ID, __invalid) + SvcId_Count = NumSupervisorCalls, + }; + + #undef AMS_KERN_SVC_DECLARE_ENUM_ID + + #define AMS_KERN_SVC_DECLARE_PROTOTYPE_64(ID, RETURN_TYPE, NAME, ...) \ + NOINLINE RETURN_TYPE NAME##64(__VA_ARGS__); + #define AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32(ID, RETURN_TYPE, NAME, ...) \ + NOINLINE RETURN_TYPE NAME##64From32(__VA_ARGS__); + + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_DECLARE_PROTOTYPE_64, lp64) + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32, ilp32) + + /* TODO: Support _32 ABI */ + + #undef AMS_KERN_SVC_DECLARE_PROTOTYPE_64 + #undef AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32 + + +} diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp new file mode 100644 index 000000000..7b4c2b93e --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern::svc { + + /* 7 */ using ::ams::svc::ResultOutOfSessions; + + /* 14 */ using ::ams::svc::ResultInvalidArgument; + + /* 33 */ using ::ams::svc::ResultNotImplemented; + + /* 57 */ using ::ams::svc::ResultNoSynchronizationObject; + + /* 59 */ using ::ams::svc::ResultTerminationRequested; + + /* 70 */ using ::ams::svc::ResultNoEvent; + + /* 101 */ using ::ams::svc::ResultInvalidSize; + /* 102 */ using ::ams::svc::ResultInvalidAddress; + /* 103 */ using ::ams::svc::ResultOutOfResource; + /* 104 */ using ::ams::svc::ResultOutOfMemory; + /* 105 */ using ::ams::svc::ResultOutOfHandles; + /* 106 */ using ::ams::svc::ResultInvalidCurrentMemory; + + /* 108 */ using ::ams::svc::ResultInvalidNewMemoryPermissions; + + /* 110 */ using ::ams::svc::ResultInvalidMemoryRegion; + + /* 112 */ using ::ams::svc::ResultInvalidPriority; + /* 113 */ using ::ams::svc::ResultInvalidCoreId; + /* 114 */ using ::ams::svc::ResultInvalidHandle; + /* 115 */ using ::ams::svc::ResultInvalidPointer; + /* 116 */ using ::ams::svc::ResultInvalidCombination; + /* 117 */ using ::ams::svc::ResultTimedOut; + /* 118 */ using ::ams::svc::ResultCancelled; + /* 119 */ using ::ams::svc::ResultOutOfRange; + /* 120 */ using ::ams::svc::ResultInvalidEnumValue; + /* 121 */ using ::ams::svc::ResultNotFound; + /* 122 */ using ::ams::svc::ResultBusy; + /* 123 */ using ::ams::svc::ResultSessionClosed; + /* 124 */ using ::ams::svc::ResultNotHandled; + /* 125 */ using ::ams::svc::ResultInvalidState; + /* 126 */ using ::ams::svc::ResultReservedUsed; + /* 127 */ using ::ams::svc::ResultNotSupported; + /* 128 */ using ::ams::svc::ResultDebug; + /* 129 */ using ::ams::svc::ResultThreadNotOwned; + + /* 131 */ using ::ams::svc::ResultPortClosed; + /* 132 */ using ::ams::svc::ResultLimitReached; + + /* 258 */ using ::ams::svc::ResultReceiveListBroken; + /* 259 */ using ::ams::svc::ResultOutOfAddressSpace; + /* 260 */ using ::ams::svc::ResultMessageTooLarge; + + /* 520 */ using ::ams::svc::ResultProcessTerminated; + +} diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp new file mode 100644 index 000000000..3119ba8b1 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern::svc { + + using SvcTableEntry = void (*)(); + + /* TODO: 32-bit ABI */ + + extern const std::array SvcTable64From32; + extern const std::array SvcTable64; + +} diff --git a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp index 2574b6db2..733ba1744 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp @@ -15,20 +15,211 @@ */ #include -namespace ams::kern::arm64::cpu { +namespace ams::kern::arch::arm64::cpu { + + /* Declare prototype to be implemented in asm. */ + void SynchronizeAllCoresImpl(s32 *sync_var, s32 num_cores); + namespace { - void FlushEntireDataCacheImpl(int level) { + class KScopedCoreMigrationDisable { + public: + ALWAYS_INLINE KScopedCoreMigrationDisable() { GetCurrentThread().DisableCoreMigration(); } + + ALWAYS_INLINE ~KScopedCoreMigrationDisable() { GetCurrentThread().EnableCoreMigration(); } + }; + + /* Nintendo registers a handler for a SGI on thread termination, but does not handle anything. */ + /* This is sufficient, because post-interrupt scheduling is all they really intend to occur. */ + class KThreadTerminationInterruptHandler : public KInterruptHandler { + public: + constexpr KThreadTerminationInterruptHandler() : KInterruptHandler() { /* ... */ } + + virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override { + return nullptr; + } + }; + + class KPerformanceCounterInterruptHandler : public KInterruptHandler { + private: + static inline KLightLock s_lock; + private: + u64 counter; + s32 which; + bool done; + public: + constexpr KPerformanceCounterInterruptHandler() : KInterruptHandler(), counter(), which(), done() { /* ... */ } + + static KLightLock &GetLock() { return s_lock; } + + void Setup(s32 w) { + this->done = false; + this->which = w; + } + + void Wait() { + while (!this->done) { + __asm__ __volatile__("yield"); + } + } + + u64 GetCounter() const { return this->counter; } + + /* Nintendo misuses this per their own API, but it's functional. */ + virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override { + if (this->which < 0) { + this->counter = cpu::GetCycleCounter(); + } else { + this->counter = cpu::GetPerformanceCounter(this->which); + } + DataMemoryBarrier(); + this->done = true; + return nullptr; + } + }; + + class KCacheHelperInterruptHandler : public KInterruptHandler { + private: + static constexpr s32 ThreadPriority = 8; + public: + enum class Operation { + Idle, + InvalidateInstructionCache, + StoreDataCache, + FlushDataCache, + }; + private: + KLightLock lock; + KLightLock cv_lock; + KLightConditionVariable cv; + std::atomic target_cores; + volatile Operation operation; + private: + static void ThreadFunction(uintptr_t _this) { + reinterpret_cast(_this)->ThreadFunctionImpl(); + } + + void ThreadFunctionImpl() { + const s32 core_id = GetCurrentCoreId(); + while (true) { + /* Wait for a request to come in. */ + { + KScopedLightLock lk(this->cv_lock); + while ((this->target_cores & (1ul << core_id)) == 0) { + this->cv.Wait(std::addressof(this->cv_lock)); + } + } + + /* Process the request. */ + this->ProcessOperation(); + + /* Broadcast, if there's nothing pending. */ + { + KScopedLightLock lk(this->cv_lock); + if (this->target_cores == 0) { + this->cv.Broadcast(); + } + } + } + } + + void ProcessOperation(); + public: + constexpr KCacheHelperInterruptHandler() : KInterruptHandler(), lock(), cv_lock(), cv(), target_cores(), operation(Operation::Idle) { /* ... */ } + + void Initialize(s32 core_id) { + /* Reserve a thread from the system limit. */ + MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1)); + + /* Create a new thread. */ + KThread *new_thread = KThread::Create(); + MESOSPHERE_ABORT_UNLESS(new_thread != nullptr); + MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, ThreadFunction, reinterpret_cast(this), ThreadPriority, core_id)); + + /* Register the new thread. */ + KThread::Register(new_thread); + + /* Run the thread. */ + new_thread->Run(); + } + + virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override { + this->ProcessOperation(); + return nullptr; + } + + void RequestOperation(Operation op) { + KScopedLightLock lk(this->lock); + MESOSPHERE_ABORT_UNLESS(this->operation == Operation::Idle); + /* Send and wait for acknowledgement of request. */ + { + KScopedLightLock cv_lk(this->cv_lock); + MESOSPHERE_ABORT_UNLESS(this->target_cores == 0); + + /* Set operation. */ + this->operation = op; + + /* Create core masks for us to use. */ + constexpr u64 AllCoresMask = (1ul << cpu::NumCores) - 1ul; + const u64 other_cores_mask = AllCoresMask & ~(1ul << GetCurrentCoreId()); + + if ((op == Operation::InvalidateInstructionCache) || (Kernel::GetState() == Kernel::State::Initializing)) { + /* For certain operations, we want to send an interrupt. */ + this->target_cores = other_cores_mask; + DataSynchronizationBarrier(); + const u64 target_mask = this->target_cores; + DataSynchronizationBarrier(); + Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask); + this->ProcessOperation(); + while (this->target_cores != 0) { + __asm__ __volatile__("yield"); + } + } else { + /* Request all cores. */ + this->target_cores = AllCoresMask; + + /* Use the condvar. */ + this->cv.Broadcast(); + while (this->target_cores != 0) { + this->cv.Wait(std::addressof(this->cv_lock)); + } + } + } + /* Go idle again. */ + this->operation = Operation::Idle; + } + }; + + /* Instances of the interrupt handlers. */ + KThreadTerminationInterruptHandler g_thread_termination_handler; + KCacheHelperInterruptHandler g_cache_operation_handler; + KPerformanceCounterInterruptHandler g_performance_counter_handler[cpu::NumCores]; + + /* Expose this as a global, for asm to use. */ + s32 g_all_core_sync_count; + + template + ALWAYS_INLINE void PerformCacheOperationBySetWayImpl(int level, F f) { /* Used in multiple locations. */ const u64 level_sel_value = static_cast(level << 1); - /* Set selection register. */ - cpu::SetCsselrEl1(level_sel_value); - cpu::InstructionMemoryBarrier(); + u64 ccsidr_value; + if constexpr (Init) { + /* During init, we can just set the selection register directly. */ + cpu::SetCsselrEl1(level_sel_value); + cpu::InstructionMemoryBarrier(); + ccsidr_value = cpu::GetCcsidrEl1(); + } else { + /* After init, we need to care about interrupts. */ + KScopedInterruptDisable di; + cpu::SetCsselrEl1(level_sel_value); + cpu::InstructionMemoryBarrier(); + ccsidr_value = cpu::GetCcsidrEl1(); + } /* Get cache size id info. */ - CacheSizeIdAccessor ccsidr_el1; + CacheSizeIdRegisterAccessor ccsidr_el1(ccsidr_value); const int num_sets = ccsidr_el1.GetNumberOfSets(); const int num_ways = ccsidr_el1.GetAssociativity(); const int line_size = ccsidr_el1.GetLineSize(); @@ -40,30 +231,179 @@ namespace ams::kern::arm64::cpu { for (int set = 0; set <= num_sets; set++) { const u64 way_value = static_cast(way) << way_shift; const u64 set_value = static_cast(set) << set_shift; - const u64 cisw_value = way_value | set_value | level_sel_value; - __asm__ __volatile__("dc cisw, %0" ::"r"(cisw_value) : "memory"); + f(way_value | set_value | level_sel_value); } } } - } - - void FlushEntireDataCacheShared() { - CacheLineIdAccessor clidr_el1; - const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency(); - const int levels_of_unification = clidr_el1.GetLevelsOfUnification(); - - for (int level = levels_of_coherency; level >= levels_of_unification; level--) { - FlushEntireDataCacheImpl(level); + ALWAYS_INLINE void FlushDataCacheLineBySetWayImpl(const u64 sw_value) { + __asm__ __volatile__("dc cisw, %[v]" :: [v]"r"(sw_value) : "memory"); } - } - void FlushEntireDataCacheLocal() { - CacheLineIdAccessor clidr_el1; - const int levels_of_unification = clidr_el1.GetLevelsOfUnification(); - - for (int level = levels_of_unification - 1; level >= 0; level--) { - FlushEntireDataCacheImpl(level); + ALWAYS_INLINE void StoreDataCacheLineBySetWayImpl(const u64 sw_value) { + __asm__ __volatile__("dc csw, %[v]" :: [v]"r"(sw_value) : "memory"); } + + template + ALWAYS_INLINE void PerformCacheOperationBySetWayShared(F f) { + CacheLineIdRegisterAccessor clidr_el1; + const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency(); + const int levels_of_unification = clidr_el1.GetLevelsOfUnification(); + + for (int level = levels_of_coherency; level >= levels_of_unification; level--) { + PerformCacheOperationBySetWayImpl(level, f); + } + } + + template + ALWAYS_INLINE void PerformCacheOperationBySetWayLocal(F f) { + CacheLineIdRegisterAccessor clidr_el1; + const int levels_of_unification = clidr_el1.GetLevelsOfUnification(); + + for (int level = levels_of_unification - 1; level >= 0; level--) { + PerformCacheOperationBySetWayImpl(level, f); + } + } + + void KCacheHelperInterruptHandler::ProcessOperation() { + switch (this->operation) { + case Operation::Idle: + break; + case Operation::InvalidateInstructionCache: + InstructionMemoryBarrier(); + break; + case Operation::StoreDataCache: + PerformCacheOperationBySetWayLocal(StoreDataCacheLineBySetWayImpl); + DataSynchronizationBarrier(); + break; + case Operation::FlushDataCache: + PerformCacheOperationBySetWayLocal(FlushDataCacheLineBySetWayImpl); + DataSynchronizationBarrier(); + break; + } + } + + ALWAYS_INLINE void SetEventLocally() { + __asm__ __volatile__("sevl" ::: "memory"); + } + + ALWAYS_INLINE void WaitForEvent() { + __asm__ __volatile__("wfe" ::: "memory"); + } + + ALWAYS_INLINE Result InvalidateDataCacheRange(uintptr_t start, uintptr_t end) { + MESOSPHERE_ASSERT(util::IsAligned(start, DataCacheLineSize)); + MESOSPHERE_ASSERT(util::IsAligned(end, DataCacheLineSize)); + R_UNLESS(UserspaceAccess::InvalidateDataCache(start, end), svc::ResultInvalidCurrentMemory()); + DataSynchronizationBarrier(); + return ResultSuccess(); + } + + ALWAYS_INLINE Result StoreDataCacheRange(uintptr_t start, uintptr_t end) { + MESOSPHERE_ASSERT(util::IsAligned(start, DataCacheLineSize)); + MESOSPHERE_ASSERT(util::IsAligned(end, DataCacheLineSize)); + R_UNLESS(UserspaceAccess::StoreDataCache(start, end), svc::ResultInvalidCurrentMemory()); + DataSynchronizationBarrier(); + return ResultSuccess(); + } + + ALWAYS_INLINE Result FlushDataCacheRange(uintptr_t start, uintptr_t end) { + MESOSPHERE_ASSERT(util::IsAligned(start, DataCacheLineSize)); + MESOSPHERE_ASSERT(util::IsAligned(end, DataCacheLineSize)); + R_UNLESS(UserspaceAccess::FlushDataCache(start, end), svc::ResultInvalidCurrentMemory()); + DataSynchronizationBarrier(); + return ResultSuccess(); + } + + ALWAYS_INLINE Result InvalidateInstructionCacheRange(uintptr_t start, uintptr_t end) { + MESOSPHERE_ASSERT(util::IsAligned(start, InstructionCacheLineSize)); + MESOSPHERE_ASSERT(util::IsAligned(end, InstructionCacheLineSize)); + R_UNLESS(UserspaceAccess::InvalidateInstructionCache(start, end), svc::ResultInvalidCurrentMemory()); + EnsureInstructionConsistency(); + return ResultSuccess(); + } + } -} \ No newline at end of file + + void FlushEntireDataCacheSharedForInit() { + return PerformCacheOperationBySetWayShared(FlushDataCacheLineBySetWayImpl); + } + + void FlushEntireDataCacheLocalForInit() { + return PerformCacheOperationBySetWayLocal(FlushDataCacheLineBySetWayImpl); + } + + void FlushEntireDataCache() { + return PerformCacheOperationBySetWayShared(FlushDataCacheLineBySetWayImpl); + } + + Result InvalidateDataCache(void *addr, size_t size) { + KScopedCoreMigrationDisable dm; + const uintptr_t start = reinterpret_cast(addr); + const uintptr_t end = start + size; + uintptr_t aligned_start = util::AlignDown(start, DataCacheLineSize); + uintptr_t aligned_end = util::AlignUp(end, DataCacheLineSize); + + if (aligned_start != start) { + R_TRY(FlushDataCacheRange(aligned_start, aligned_start + DataCacheLineSize)); + aligned_start += DataCacheLineSize; + } + + if (aligned_start < aligned_end && (aligned_end != end)) { + aligned_end -= DataCacheLineSize; + R_TRY(FlushDataCacheRange(aligned_end, aligned_end + DataCacheLineSize)); + } + + if (aligned_start < aligned_end) { + R_TRY(InvalidateDataCacheRange(aligned_start, aligned_end)); + } + + return ResultSuccess(); + } + + Result StoreDataCache(const void *addr, size_t size) { + KScopedCoreMigrationDisable dm; + const uintptr_t start = util::AlignDown(reinterpret_cast(addr), DataCacheLineSize); + const uintptr_t end = util::AlignUp( reinterpret_cast(addr), DataCacheLineSize); + + return StoreDataCacheRange(start, end); + } + + Result FlushDataCache(const void *addr, size_t size) { + KScopedCoreMigrationDisable dm; + const uintptr_t start = util::AlignDown(reinterpret_cast(addr), DataCacheLineSize); + const uintptr_t end = util::AlignUp( reinterpret_cast(addr), DataCacheLineSize); + + return FlushDataCacheRange(start, end); + } + + Result InvalidateInstructionCache(void *addr, size_t size) { + KScopedCoreMigrationDisable dm; + const uintptr_t start = util::AlignDown(reinterpret_cast(addr), InstructionCacheLineSize); + const uintptr_t end = util::AlignUp( reinterpret_cast(addr), InstructionCacheLineSize); + + R_TRY(InvalidateInstructionCacheRange(start, end)); + + /* Request the interrupt helper to invalidate, too. */ + g_cache_operation_handler.RequestOperation(KCacheHelperInterruptHandler::Operation::InvalidateInstructionCache); + + return ResultSuccess(); + } + + void InitializeInterruptThreads(s32 core_id) { + /* Initialize the cache operation handler. */ + g_cache_operation_handler.Initialize(core_id); + + /* Bind all handlers to the relevant interrupts. */ + Kernel::GetInterruptManager().BindHandler(std::addressof(g_cache_operation_handler), KInterruptName_CacheOperation, core_id, KInterruptController::PriorityLevel_High, false, false); + Kernel::GetInterruptManager().BindHandler(std::addressof(g_thread_termination_handler), KInterruptName_ThreadTerminate, core_id, KInterruptController::PriorityLevel_Scheduler, false, false); + + if (KTargetSystem::IsUserPmuAccessEnabled()) { SetPmUserEnrEl0(1ul); } + Kernel::GetInterruptManager().BindHandler(std::addressof(g_performance_counter_handler[core_id]), KInterruptName_PerformanceCounter, core_id, KInterruptController::PriorityLevel_Timer, false, false); + } + + void SynchronizeAllCores() { + SynchronizeAllCoresImpl(&g_all_core_sync_count, static_cast(cpu::NumCores)); + } + +} diff --git a/libraries/libmesosphere/source/arch/arm64/kern_cpu_asm.s b/libraries/libmesosphere/source/arch/arm64/kern_cpu_asm.s new file mode 100644 index 000000000..e3f2f8b4b --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_cpu_asm.s @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* ams::kern::arch::arm64::cpu::SynchronizeAllCoresImpl(int *sync_var, int num_cores) */ +.section .text._ZN3ams4kern4arch5arm643cpu23SynchronizeAllCoresImplEPii, "ax", %progbits +.global _ZN3ams4kern4arch5arm643cpu23SynchronizeAllCoresImplEPii +.type _ZN3ams4kern4arch5arm643cpu23SynchronizeAllCoresImplEPii, %function +_ZN3ams4kern4arch5arm643cpu23SynchronizeAllCoresImplEPii: + /* Loop until the sync var is less than num cores. */ + sevl +1: + wfe + ldaxr w2, [x0] + cmp w2, w1 + b.gt 1b + + /* Increment the sync var. */ +2: + ldaxr w2, [x0] + add w3, w2, #1 + stlxr w4, w3, [x0] + cbnz w4, 2b + + /* Loop until the sync var matches our ticket. */ + add w3, w2, w1 + sevl +3: + wfe + ldaxr w2, [x0] + cmp w2, w3 + b.ne 3b + + /* Check if the ticket is the last. */ + sub w2, w1, #1 + add w2, w2, w1 + cmp w3, w2 + b.eq 5f + + /* Our ticket is not the last one. Increment. */ +4: + ldaxr w2, [x0] + add w3, w2, #1 + stlxr w4, w3, [x0] + cbnz w4, 4b + ret + + /* Our ticket is the last one. */ +5: + stlr wzr, [x0] + ret + + +/* ams::kern::arch::arm64::cpu::ClearPageToZero(void *) */ +.section .text._ZN3ams4kern4arch5arm643cpu19ClearPageToZeroImplEPv, "ax", %progbits +.global _ZN3ams4kern4arch5arm643cpu19ClearPageToZeroImplEPv +.type _ZN3ams4kern4arch5arm643cpu19ClearPageToZeroImplEPv, %function +_ZN3ams4kern4arch5arm643cpu19ClearPageToZeroImplEPv: + /* Efficiently clear the page using dc zva. */ + dc zva, x0 + add x8, x0, #0x040 + dc zva, x8 + add x8, x0, #0x080 + dc zva, x8 + add x8, x0, #0x0c0 + dc zva, x8 + add x8, x0, #0x100 + dc zva, x8 + add x8, x0, #0x140 + dc zva, x8 + add x8, x0, #0x180 + dc zva, x8 + add x8, x0, #0x1c0 + dc zva, x8 + add x8, x0, #0x200 + dc zva, x8 + add x8, x0, #0x240 + dc zva, x8 + add x8, x0, #0x280 + dc zva, x8 + add x8, x0, #0x2c0 + dc zva, x8 + add x8, x0, #0x300 + dc zva, x8 + add x8, x0, #0x340 + dc zva, x8 + add x8, x0, #0x380 + dc zva, x8 + add x8, x0, #0x3c0 + dc zva, x8 + add x8, x0, #0x400 + dc zva, x8 + add x8, x0, #0x440 + dc zva, x8 + add x8, x0, #0x480 + dc zva, x8 + add x8, x0, #0x4c0 + dc zva, x8 + add x8, x0, #0x500 + dc zva, x8 + add x8, x0, #0x540 + dc zva, x8 + add x8, x0, #0x580 + dc zva, x8 + add x8, x0, #0x5c0 + dc zva, x8 + add x8, x0, #0x600 + dc zva, x8 + add x8, x0, #0x640 + dc zva, x8 + add x8, x0, #0x680 + dc zva, x8 + add x8, x0, #0x6c0 + dc zva, x8 + add x8, x0, #0x700 + dc zva, x8 + add x8, x0, #0x740 + dc zva, x8 + add x8, x0, #0x780 + dc zva, x8 + add x8, x0, #0x7c0 + dc zva, x8 + add x8, x0, #0x800 + dc zva, x8 + add x8, x0, #0x840 + dc zva, x8 + add x8, x0, #0x880 + dc zva, x8 + add x8, x0, #0x8c0 + dc zva, x8 + add x8, x0, #0x900 + dc zva, x8 + add x8, x0, #0x940 + dc zva, x8 + add x8, x0, #0x980 + dc zva, x8 + add x8, x0, #0x9c0 + dc zva, x8 + add x8, x0, #0xa00 + dc zva, x8 + add x8, x0, #0xa40 + dc zva, x8 + add x8, x0, #0xa80 + dc zva, x8 + add x8, x0, #0xac0 + dc zva, x8 + add x8, x0, #0xb00 + dc zva, x8 + add x8, x0, #0xb40 + dc zva, x8 + add x8, x0, #0xb80 + dc zva, x8 + add x8, x0, #0xbc0 + dc zva, x8 + add x8, x0, #0xc00 + dc zva, x8 + add x8, x0, #0xc40 + dc zva, x8 + add x8, x0, #0xc80 + dc zva, x8 + add x8, x0, #0xcc0 + dc zva, x8 + add x8, x0, #0xd00 + dc zva, x8 + add x8, x0, #0xd40 + dc zva, x8 + add x8, x0, #0xd80 + dc zva, x8 + add x8, x0, #0xdc0 + dc zva, x8 + add x8, x0, #0xe00 + dc zva, x8 + add x8, x0, #0xe40 + dc zva, x8 + add x8, x0, #0xe80 + dc zva, x8 + add x8, x0, #0xec0 + dc zva, x8 + add x8, x0, #0xf00 + dc zva, x8 + add x8, x0, #0xf40 + dc zva, x8 + add x8, x0, #0xf80 + dc zva, x8 + add x8, x0, #0xfc0 + dc zva, x8 + ret \ No newline at end of file diff --git a/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp b/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp new file mode 100644 index 000000000..d284e3987 --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::arch::arm64 { + + namespace { + + constexpr u32 GetInstructionData(const KExceptionContext *context, u64 esr) { + /* Check for THUMB usermode */ + if ((context->psr & 0x3F) == 0x30) { + u32 insn = *reinterpret_cast(context->pc & ~0x1); + /* Check if the instruction was 32-bit. */ + if ((esr >> 25) & 1) { + insn = (insn << 16) | *reinterpret_cast((context->pc & ~0x1) + sizeof(u16)); + } + return insn; + } else { + /* Not thumb, so just get the instruction. */ + return *reinterpret_cast(context->pc); + } + } + + void HandleUserException(KExceptionContext *context, u64 esr, u64 far, u64 afsr0, u64 afsr1, u32 data) { + KProcess *cur_process = GetCurrentProcessPointer(); + bool should_process_user_exception = KTargetSystem::IsUserExceptionHandlersEnabled(); + + const u64 ec = (esr >> 26) & 0x3F; + switch (ec) { + case 0x0: /* Unknown */ + case 0xE: /* Illegal Execution State */ + case 0x11: /* SVC instruction from Aarch32 */ + case 0x15: /* SVC instruction from Aarch64 */ + case 0x22: /* PC Misalignment */ + case 0x26: /* SP Misalignment */ + case 0x2F: /* SError */ + case 0x30: /* Breakpoint from lower EL */ + case 0x32: /* SoftwareStep from lower EL */ + case 0x34: /* Watchpoint from lower EL */ + case 0x38: /* BKPT instruction */ + case 0x3C: /* BRK instruction */ + break; + default: + { + MESOSPHERE_TODO("Get memory state."); + /* If state is KMemoryState_Code and the user can't read it, set should_process_user_exception = true; */ + } + break; + } + + if (should_process_user_exception) { + MESOSPHERE_TODO("Process the user exception."); + } + + { + MESOSPHERE_TODO("Process for KDebug."); + + MESOSPHERE_TODO("cur_process->GetProgramId()"); + MESOSPHERE_RELEASE_LOG("Exception occurred. %016lx\n", 0ul); + + MESOSPHERE_TODO("if (!svc::ResultNotHandled::Includes(res)) { debug process }."); + } + + MESOSPHERE_TODO("cur_process->Exit();"); + (void)cur_process; + } + + } + + /* NOTE: This function is called from ASM. */ + void FpuContextSwitchHandler() { + KThreadContext::FpuContextSwitchHandler(GetCurrentThreadPointer()); + } + + /* NOTE: This function is called from ASM. */ + void HandleException(KExceptionContext *context) { + MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled()); + + /* Retrieve information about the exception. */ + const u64 esr = cpu::GetEsrEl1(); + const u64 afsr0 = cpu::GetAfsr0El1(); + const u64 afsr1 = cpu::GetAfsr1El1(); + u64 far = 0; + u32 data = 0; + + /* Collect far and data based on the ec. */ + switch ((esr >> 26) & 0x3F) { + case 0x0: /* Unknown */ + case 0xE: /* Illegal Execution State */ + case 0x38: /* BKPT instruction */ + case 0x3C: /* BRK instruction */ + far = context->pc; + data = GetInstructionData(context, esr); + break; + case 0x11: /* SVC instruction from Aarch32 */ + if (context->psr & 0x20) { + /* Thumb mode. */ + context->pc -= 2; + } else { + /* ARM mode. */ + context->pc -= 4; + } + far = context->pc; + break; + case 0x15: /* SVC instruction from Aarch64 */ + context->pc -= 4; + far = context->pc; + break; + case 0x30: /* Breakpoint from lower EL */ + far = context->pc; + break; + default: + far = cpu::GetFarEl1(); + break; + } + + /* Note that we're in an exception handler. */ + GetCurrentThread().SetInExceptionHandler(); + { + const bool is_user_mode = (context->psr & 0xF) == 0; + if (is_user_mode) { + /* Handle any changes needed to the user preemption state. */ + if (GetCurrentThread().GetUserPreemptionState() != 0 && GetCurrentProcess().GetPreemptionStatePinnedThread(GetCurrentCoreId()) == nullptr) { + KScopedSchedulerLock lk; + + /* Note the preemption state in process. */ + GetCurrentProcess().SetPreemptionState(); + + /* Set the kernel preemption state flag. */ + GetCurrentThread().SetKernelPreemptionState(1); + } + + /* Enable interrupts while we process the usermode exception. */ + { + KScopedInterruptEnable ei; + + HandleUserException(context, esr, far, afsr0, afsr1, data); + } + } else { + MESOSPHERE_PANIC("Unhandled Exception in Supervisor Mode\n"); + } + + MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled()); + + /* Handle any DPC requests. */ + while (GetCurrentThread().HasDpc()) { + KDpcManager::HandleDpc(); + } + } + /* Note that we're no longer in an exception handler. */ + GetCurrentThread().ClearInExceptionHandler(); + } + +} diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_hardware_timer.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_hardware_timer.cpp new file mode 100644 index 000000000..5a1630758 --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_hardware_timer.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::arch::arm64 { + + namespace impl { + + class KHardwareTimerInterruptTask : public KInterruptTask { + public: + constexpr KHardwareTimerInterruptTask() : KInterruptTask() { /* ... */ } + + virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override { + return this; + } + + virtual void DoTask() override { + Kernel::GetHardwareTimer().DoInterruptTask(); + } + }; + + } + + namespace { + + /* One global hardware timer interrupt task per core. */ + impl::KHardwareTimerInterruptTask g_hardware_timer_interrupt_tasks[cpu::NumCores]; + + ALWAYS_INLINE auto *GetHardwareTimerInterruptTask(s32 core_id) { + return std::addressof(g_hardware_timer_interrupt_tasks[core_id]); + } + + } + + void KHardwareTimer::Initialize(s32 core_id) { + /* Setup the global timer for the core. */ + InitializeGlobalTimer(); + + /* Bind the interrupt task for this core. */ + Kernel::GetInterruptManager().BindHandler(GetHardwareTimerInterruptTask(core_id), KInterruptName_NonSecurePhysicalTimer, core_id, KInterruptController::PriorityLevel_Timer, true, true); + } + + void KHardwareTimer::Finalize() { + /* Stop the hardware timer. */ + StopTimer(); + } + + void KHardwareTimer::DoInterruptTask() { + /* Handle the interrupt. */ + { + KScopedSchedulerLock slk; + KScopedSpinLock lk(this->GetLock()); + + /* Disable the timer interrupt while we handle this. */ + DisableInterrupt(); + if (const s64 next_time = this->DoInterruptTaskImpl(GetTick()); next_time > 0) { + /* We have a next time, so we should set the time to interrupt and turn the interrupt on. */ + SetCompareValue(next_time); + EnableInterrupt(); + } + } + + /* Clear the timer interrupt. */ + Kernel::GetInterruptManager().ClearInterrupt(KInterruptName_NonSecurePhysicalTimer, GetCurrentCoreId()); + } + +} diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_controller.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_controller.cpp new file mode 100644 index 000000000..5cce0c31f --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_controller.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::arch::arm64 { + + void KInterruptController::SetupInterruptLines(s32 core_id) const { + const size_t ITLines = (core_id == 0) ? 32 * ((this->gicd->typer & 0x1F) + 1) : NumLocalInterrupts; + + for (size_t i = 0; i < ITLines / 32; i++) { + this->gicd->icenabler[i] = 0xFFFFFFFF; + this->gicd->icpendr[i] = 0xFFFFFFFF; + this->gicd->icactiver[i] = 0xFFFFFFFF; + this->gicd->igroupr[i] = 0; + } + + for (size_t i = 0; i < ITLines; i++) { + this->gicd->ipriorityr.bytes[i] = 0xFF; + this->gicd->itargetsr.bytes[i] = 0x00; + } + + for (size_t i = 0; i < ITLines / 16; i++) { + this->gicd->icfgr[i] = 0x00000000; + } + } + + void KInterruptController::Initialize(s32 core_id) { + /* Setup pointers to ARM mmio. */ + this->gicd = GetPointer(KMemoryLayout::GetInterruptDistributorAddress()); + this->gicc = GetPointer(KMemoryLayout::GetInterruptCpuInterfaceAddress()); + + /* Clear CTLRs. */ + this->gicc->ctlr = 0; + if (core_id == 0) { + this->gicd->ctlr = 0; + } + + this->gicc->pmr = 0; + this->gicc->bpr = 7; + + /* Setup all interrupt lines. */ + SetupInterruptLines(core_id); + + /* Set CTLRs. */ + if (core_id == 0) { + this->gicd->ctlr = 1; + } + this->gicc->ctlr = 1; + + /* Set the mask for this core. */ + SetGicMask(core_id); + + /* Set the priority level. */ + SetPriorityLevel(PriorityLevel_Low); + } + + void KInterruptController::Finalize(s32 core_id) { + /* Clear CTLRs. */ + if (core_id == 0) { + this->gicd->ctlr = 0; + } + this->gicc->ctlr = 0; + + /* Set the priority level. */ + SetPriorityLevel(PriorityLevel_High); + + /* Setup all interrupt lines. */ + SetupInterruptLines(core_id); + + this->gicd = nullptr; + this->gicc = nullptr; + } + +} diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp new file mode 100644 index 000000000..3b866be8e --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::arch::arm64 { + + /* Instantiate static members in specific translation unit. */ + KSpinLock KInterruptManager::s_lock; + std::array KInterruptManager::s_global_interrupts; + KInterruptController::GlobalState KInterruptManager::s_global_state; + bool KInterruptManager::s_global_state_saved; + + void KInterruptManager::Initialize(s32 core_id) { + this->interrupt_controller.Initialize(core_id); + } + + void KInterruptManager::Finalize(s32 core_id) { + this->interrupt_controller.Finalize(core_id); + } + + bool KInterruptManager::OnHandleInterrupt() { + /* Get the interrupt id. */ + const u32 raw_irq = this->interrupt_controller.GetIrq(); + const s32 irq = KInterruptController::ConvertRawIrq(raw_irq); + + /* If the IRQ is spurious, we don't need to reschedule. */ + if (irq < 0) { + return false; + } + + KInterruptTask *task = nullptr; + if (KInterruptController::IsLocal(irq)) { + /* Get local interrupt entry. */ + auto &entry = GetLocalInterruptEntry(irq); + if (entry.handler != nullptr) { + /* Set manual clear needed if relevant. */ + if (entry.manually_cleared) { + this->interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low); + entry.needs_clear = true; + } + + /* Set the handler. */ + task = entry.handler->OnInterrupt(irq); + } else { + MESOSPHERE_LOG("Core%d: Unhandled local interrupt %d\n", GetCurrentCoreId(), irq); + } + } else if (KInterruptController::IsGlobal(irq)) { + KScopedSpinLock lk(GetLock()); + + /* Get global interrupt entry. */ + auto &entry = GetGlobalInterruptEntry(irq); + if (entry.handler != nullptr) { + /* Set manual clear needed if relevant. */ + if (entry.manually_cleared) { + this->interrupt_controller.Disable(irq); + entry.needs_clear = true; + } + + /* Set the handler. */ + task = entry.handler->OnInterrupt(irq); + } else { + MESOSPHERE_LOG("Core%d: Unhandled global interrupt %d\n", GetCurrentCoreId(), irq); + } + } else { + MESOSPHERE_LOG("Invalid interrupt %d\n", irq); + } + + /* Acknowledge the interrupt. */ + this->interrupt_controller.EndOfInterrupt(raw_irq); + + /* If we found no task, then we don't need to reschedule. */ + if (task == nullptr) { + return false; + } + + /* If the task isn't the dummy task, we should add it to the queue. */ + if (task != GetDummyInterruptTask()) { + Kernel::GetInterruptTaskManager().EnqueueTask(task); + } + + return true; + } + + void KInterruptManager::HandleInterrupt(bool user_mode) { + /* On interrupt, call OnHandleInterrupt() to determine if we need rescheduling and handle. */ + const bool needs_scheduling = Kernel::GetInterruptManager().OnHandleInterrupt(); + + /* If we need scheduling, */ + if (needs_scheduling) { + /* Handle any changes needed to the user preemption state. */ + if (user_mode && GetCurrentThread().GetUserPreemptionState() != 0 && GetCurrentProcess().GetPreemptionStatePinnedThread(GetCurrentCoreId()) == nullptr) { + KScopedSchedulerLock sl; + + /* Note the preemption state in process. */ + GetCurrentProcess().SetPreemptionState(); + + /* Set the kernel preemption state flag. */ + GetCurrentThread().SetKernelPreemptionState(1);; + + /* Request interrupt scheduling. */ + Kernel::GetScheduler().RequestScheduleOnInterrupt(); + } else { + /* Request interrupt scheduling. */ + Kernel::GetScheduler().RequestScheduleOnInterrupt(); + } + } + + /* If user mode, check if the thread needs termination. */ + /* If it does, we can take advantage of this to terminate it. */ + if (user_mode) { + KThread *cur_thread = GetCurrentThreadPointer(); + if (cur_thread->IsTerminationRequested()) { + KScopedInterruptEnable ei; + cur_thread->Exit(); + } + } + } + + Result KInterruptManager::BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level) { + R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange()); + + KScopedInterruptDisable di; + + if (KInterruptController::IsGlobal(irq)) { + KScopedSpinLock lk(GetLock()); + return this->BindGlobal(handler, irq, core_id, priority, manual_clear, level); + } else { + MESOSPHERE_ASSERT(core_id == GetCurrentCoreId()); + return this->BindLocal(handler, irq, priority, manual_clear); + } + } + + Result KInterruptManager::UnbindHandler(s32 irq, s32 core_id) { + R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange()); + + KScopedInterruptDisable di; + + if (KInterruptController::IsGlobal(irq)) { + KScopedSpinLock lk(GetLock()); + return this->UnbindGlobal(irq); + } else { + MESOSPHERE_ASSERT(core_id == GetCurrentCoreId()); + return this->UnbindLocal(irq); + } + } + + Result KInterruptManager::ClearInterrupt(s32 irq) { + R_UNLESS(KInterruptController::IsGlobal(irq), svc::ResultOutOfRange()); + + KScopedInterruptDisable di; + KScopedSpinLock lk(GetLock()); + return this->ClearGlobal(irq); + } + + Result KInterruptManager::ClearInterrupt(s32 irq, s32 core_id) { + R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange()); + + KScopedInterruptDisable di; + + if (KInterruptController::IsGlobal(irq)) { + KScopedSpinLock lk(GetLock()); + return this->ClearGlobal(irq); + } else { + MESOSPHERE_ASSERT(core_id == GetCurrentCoreId()); + return this->ClearLocal(irq); + } + } + + Result KInterruptManager::BindGlobal(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level) { + /* Ensure the priority level is valid. */ + R_UNLESS(KInterruptController::PriorityLevel_High <= priority, svc::ResultOutOfRange()); + R_UNLESS(priority <= KInterruptController::PriorityLevel_Low, svc::ResultOutOfRange()); + + /* Ensure we aren't already bound. */ + auto &entry = GetGlobalInterruptEntry(irq); + R_UNLESS(entry.handler == nullptr, svc::ResultBusy()); + + /* Set entry fields. */ + entry.needs_clear = false; + entry.manually_cleared = manual_clear; + entry.handler = handler; + + /* Configure the interrupt as level or edge. */ + if (level) { + this->interrupt_controller.SetLevel(irq); + } else { + this->interrupt_controller.SetEdge(irq); + } + + /* Configure the interrupt. */ + this->interrupt_controller.Clear(irq); + this->interrupt_controller.SetTarget(irq, core_id); + this->interrupt_controller.SetPriorityLevel(irq, priority); + this->interrupt_controller.Enable(irq); + + return ResultSuccess(); + } + + Result KInterruptManager::BindLocal(KInterruptHandler *handler, s32 irq, s32 priority, bool manual_clear) { + /* Ensure the priority level is valid. */ + R_UNLESS(KInterruptController::PriorityLevel_High <= priority, svc::ResultOutOfRange()); + R_UNLESS(priority <= KInterruptController::PriorityLevel_Low, svc::ResultOutOfRange()); + + /* Ensure we aren't already bound. */ + auto &entry = this->GetLocalInterruptEntry(irq); + R_UNLESS(entry.handler == nullptr, svc::ResultBusy()); + + /* Set entry fields. */ + entry.needs_clear = false; + entry.manually_cleared = manual_clear; + entry.handler = handler; + entry.priority = static_cast(priority); + + /* Configure the interrupt. */ + this->interrupt_controller.Clear(irq); + this->interrupt_controller.SetPriorityLevel(irq, priority); + this->interrupt_controller.Enable(irq); + + return ResultSuccess(); + } + + Result KInterruptManager::UnbindGlobal(s32 irq) { + for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) { + this->interrupt_controller.ClearTarget(irq, static_cast(core_id)); + } + this->interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low); + this->interrupt_controller.Disable(irq); + + GetGlobalInterruptEntry(irq).handler = nullptr; + + return ResultSuccess(); + } + + Result KInterruptManager::UnbindLocal(s32 irq) { + auto &entry = this->GetLocalInterruptEntry(irq); + R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState()); + + this->interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low); + this->interrupt_controller.Disable(irq); + + entry.handler = nullptr; + + return ResultSuccess(); + } + + Result KInterruptManager::ClearGlobal(s32 irq) { + /* We can't clear an entry with no handler. */ + auto &entry = GetGlobalInterruptEntry(irq); + R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState()); + + /* If auto-cleared, we can succeed immediately. */ + R_UNLESS(entry.manually_cleared, ResultSuccess()); + R_UNLESS(entry.needs_clear, ResultSuccess()); + + /* Clear and enable. */ + entry.needs_clear = false; + this->interrupt_controller.Enable(irq); + return ResultSuccess(); + } + + Result KInterruptManager::ClearLocal(s32 irq) { + /* We can't clear an entry with no handler. */ + auto &entry = this->GetLocalInterruptEntry(irq); + R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState()); + + /* If auto-cleared, we can succeed immediately. */ + R_UNLESS(entry.manually_cleared, ResultSuccess()); + R_UNLESS(entry.needs_clear, ResultSuccess()); + + /* Clear and set priority. */ + entry.needs_clear = false; + this->interrupt_controller.SetPriorityLevel(irq, entry.priority); + return ResultSuccess(); + } + +} diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp new file mode 100644 index 000000000..db1c1e27b --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -0,0 +1,1082 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::arch::arm64 { + + namespace { + + class AlignedMemoryBlock { + private: + uintptr_t before_start; + uintptr_t before_end; + uintptr_t after_start; + uintptr_t after_end; + size_t current_alignment; + public: + constexpr AlignedMemoryBlock(uintptr_t start, size_t num_pages, size_t alignment) : before_start(0), before_end(0), after_start(0), after_end(0), current_alignment(0) { + MESOSPHERE_ASSERT(util::IsAligned(start, PageSize)); + MESOSPHERE_ASSERT(num_pages > 0); + + /* Find an alignment that allows us to divide into at least two regions.*/ + uintptr_t start_page = start / PageSize; + alignment /= PageSize; + while (util::AlignUp(start_page, alignment) >= util::AlignDown(start_page + num_pages, alignment)) { + alignment = KPageTable::GetSmallerAlignment(alignment * PageSize) / PageSize; + } + + this->before_start = start_page; + this->before_end = util::AlignUp(start_page, alignment); + this->after_start = this->before_end; + this->after_end = start_page + num_pages; + this->current_alignment = alignment; + MESOSPHERE_ASSERT(this->current_alignment > 0); + } + + constexpr void SetAlignment(size_t alignment) { + /* We can only ever decrease the granularity. */ + MESOSPHERE_ASSERT(this->current_alignment >= alignment / PageSize); + this->current_alignment = alignment / PageSize; + } + + constexpr size_t GetAlignment() const { + return this->current_alignment * PageSize; + } + + constexpr void FindBlock(uintptr_t &out, size_t &num_pages) { + if ((this->after_end - this->after_start) >= this->current_alignment) { + /* Select aligned memory from after block. */ + const size_t available_pages = util::AlignDown(this->after_end, this->current_alignment) - this->after_start; + if (num_pages == 0 || available_pages < num_pages) { + num_pages = available_pages; + } + out = this->after_start * PageSize; + this->after_start += num_pages; + } else if ((this->before_end - this->before_start) >= this->current_alignment) { + /* Select aligned memory from before block. */ + const size_t available_pages = this->before_end - util::AlignUp(this->before_start, this->current_alignment); + if (num_pages == 0 || available_pages < num_pages) { + num_pages = available_pages; + } + this->before_end -= num_pages; + out = this->before_end * PageSize; + } else { + /* Neither after or before can get an aligned bit of memory. */ + out = 0; + num_pages = 0; + } + } + }; + + constexpr u64 EncodeTtbr(KPhysicalAddress table, u8 asid) { + return (static_cast(asid) << 48) | (static_cast(GetInteger(table))); + } + + class KPageTableAsidManager { + private: + using WordType = u32; + static constexpr u8 ReservedAsids[] = { 0 }; + static constexpr size_t NumReservedAsids = util::size(ReservedAsids); + static constexpr size_t BitsPerWord = BITSIZEOF(WordType); + static constexpr size_t AsidCount = 0x100; + static constexpr size_t NumWords = AsidCount / BitsPerWord; + static constexpr WordType FullWord = ~WordType(0u); + private: + WordType state[NumWords]; + KLightLock lock; + u8 hint; + private: + constexpr bool TestImpl(u8 asid) const { + return this->state[asid / BitsPerWord] & (1u << (asid % BitsPerWord)); + } + constexpr void ReserveImpl(u8 asid) { + MESOSPHERE_ASSERT(!this->TestImpl(asid)); + this->state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord)); + } + + constexpr void ReleaseImpl(u8 asid) { + MESOSPHERE_ASSERT(this->TestImpl(asid)); + this->state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord)); + } + + constexpr u8 FindAvailable() const { + for (size_t i = 0; i < util::size(this->state); i++) { + if (this->state[i] == FullWord) { + continue; + } + const WordType clear_bit = (this->state[i] + 1) ^ (this->state[i]); + return BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero(clear_bit); + } + if (this->state[util::size(this->state)-1] == FullWord) { + MESOSPHERE_PANIC("Unable to reserve ASID"); + } + __builtin_unreachable(); + } + + static constexpr ALWAYS_INLINE WordType ClearLeadingZero(WordType value) { + return __builtin_clzll(value) - (BITSIZEOF(unsigned long long) - BITSIZEOF(WordType)); + } + public: + constexpr KPageTableAsidManager() : state(), lock(), hint() { + for (size_t i = 0; i < NumReservedAsids; i++) { + this->ReserveImpl(ReservedAsids[i]); + } + } + + u8 Reserve() { + KScopedLightLock lk(this->lock); + + if (this->TestImpl(this->hint)) { + this->hint = this->FindAvailable(); + } + + this->ReserveImpl(this->hint); + + return this->hint++; + } + + void Release(u8 asid) { + KScopedLightLock lk(this->lock); + this->ReleaseImpl(asid); + } + }; + + KPageTableAsidManager g_asid_manager; + + } + + void KPageTable::Initialize(s32 core_id) { + /* Nothing actually needed here. */ + } + + Result KPageTable::InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end) { + /* Initialize basic fields. */ + this->asid = 0; + this->manager = std::addressof(Kernel::GetPageTableManager()); + + /* Allocate a page for ttbr. */ + const u64 asid_tag = (static_cast(this->asid) << 48ul); + const KVirtualAddress page = this->manager->Allocate(); + MESOSPHERE_ASSERT(page != Null); + cpu::ClearPageToZero(GetVoidPointer(page)); + this->ttbr = GetInteger(KPageTableBase::GetLinearPhysicalAddress(page)) | asid_tag; + + /* Initialize the base page table. */ + MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end)); + + return ResultSuccess(); + } + + Result KPageTable::InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) { + /* Convert the address space type to a width. */ + + /* Get an ASID */ + this->asid = g_asid_manager.Reserve(); + auto asid_guard = SCOPE_GUARD { g_asid_manager.Release(this->asid); }; + + /* Set our manager. */ + this->manager = pt_manager; + + /* Allocate a new table, and set our ttbr value. */ + const KVirtualAddress new_table = this->manager->Allocate(); + R_UNLESS(new_table != Null, svc::ResultOutOfResource()); + this->ttbr = EncodeTtbr(GetPageTablePhysicalAddress(new_table), asid); + auto table_guard = SCOPE_GUARD { this->manager->Free(new_table); }; + + /* Initialize our base table. */ + const size_t as_width = GetAddressSpaceWidth(as_type); + const KProcessAddress as_start = 0; + const KProcessAddress as_end = (1ul << as_width); + R_TRY(KPageTableBase::InitializeForProcess(as_type, enable_aslr, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, mem_block_slab_manager, block_info_manager)); + + /* We succeeded! */ + table_guard.Cancel(); + asid_guard.Cancel(); + + /* Note that we've updated the table (since we created it). */ + this->NoteUpdated(); + return ResultSuccess(); + } + + Result KPageTable::Finalize() { + MESOSPHERE_UNIMPLEMENTED(); + } + + Result KPageTable::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) { + /* Check validity of parameters. */ + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(num_pages > 0); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); + MESOSPHERE_ASSERT(this->ContainsPages(virt_addr, num_pages)); + + if (operation == OperationType_Map) { + MESOSPHERE_ABORT_UNLESS(is_pa_valid); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); + } else { + MESOSPHERE_ABORT_UNLESS(!is_pa_valid); + } + + if (operation == OperationType_Unmap) { + return this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll); + } else { + auto entry_template = this->GetEntryTemplate(properties); + + switch (operation) { + case OperationType_Map: + return this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); + case OperationType_ChangePermissions: + return this->ChangePermissions(virt_addr, num_pages, entry_template, false, page_list, reuse_ll); + case OperationType_ChangePermissionsAndRefresh: + return this->ChangePermissions(virt_addr, num_pages, entry_template, true, page_list, reuse_ll); + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + } + + Result KPageTable::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) { + /* Check validity of parameters. */ + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); + MESOSPHERE_ASSERT(num_pages > 0); + MESOSPHERE_ASSERT(num_pages == page_group.GetNumPages()); + + /* Map the page group. */ + auto entry_template = this->GetEntryTemplate(properties); + switch (operation) { + case OperationType_MapGroup: + return this->MapGroup(virt_addr, page_group, num_pages, entry_template, page_list, reuse_ll); + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + + Result KPageTable::Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); + + auto &impl = this->GetImpl(); + KVirtualAddress l2_virt = Null; + KVirtualAddress l3_virt = Null; + int l2_open_count = 0; + int l3_open_count = 0; + + /* Iterate, mapping each page. */ + for (size_t i = 0; i < num_pages; i++) { + KPhysicalAddress l3_phys = Null; + bool l2_allocated = false; + + /* If we have no L3 table, we should get or allocate one. */ + if (l3_virt == Null) { + KPhysicalAddress l2_phys = Null; + + /* If we have no L2 table, we should get or allocate one. */ + if (l2_virt == Null) { + if (L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); !l1_entry->GetTable(l2_phys)) { + /* Allocate table. */ + l2_virt = AllocatePageTable(page_list, reuse_ll); + R_UNLESS(l2_virt != Null, svc::ResultOutOfResource()); + + /* Set the entry. */ + l2_phys = GetPageTablePhysicalAddress(l2_virt); + PteDataSynchronizationBarrier(); + *l1_entry = L1PageTableEntry(l2_phys, this->IsKernel(), true); + PteDataSynchronizationBarrier(); + l2_allocated = true; + } else { + l2_virt = GetPageTableVirtualAddress(l2_phys); + } + } + MESOSPHERE_ASSERT(l2_virt != Null); + + if (L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, virt_addr); !l2_entry->GetTable(l3_phys)) { + /* Allocate table. */ + l3_virt = AllocatePageTable(page_list, reuse_ll); + if (l3_virt == Null) { + /* Cleanup the L2 entry. */ + if (l2_allocated) { + *impl.GetL1Entry(virt_addr) = InvalidL1PageTableEntry; + this->NoteUpdated(); + FreePageTable(page_list, l2_virt); + } else if (this->GetPageTableManager().IsInPageTableHeap(l2_virt) && l2_open_count > 0) { + this->GetPageTableManager().Open(l2_virt, l2_open_count); + } + return svc::ResultOutOfResource(); + } + + /* Set the entry. */ + l3_phys = GetPageTablePhysicalAddress(l3_virt); + PteDataSynchronizationBarrier(); + *l2_entry = L2PageTableEntry(l3_phys, this->IsKernel(), true); + PteDataSynchronizationBarrier(); + l2_open_count++; + } else { + l3_virt = GetPageTableVirtualAddress(l3_phys); + } + } + MESOSPHERE_ASSERT(l3_virt != Null); + + /* Map the page. */ + *impl.GetL3EntryFromTable(l3_virt, virt_addr) = L3PageTableEntry(phys_addr, entry_template, false); + l3_open_count++; + virt_addr += PageSize; + phys_addr += PageSize; + + /* Account for hitting end of table. */ + if (util::IsAligned(GetInteger(virt_addr), L2BlockSize)) { + if (this->GetPageTableManager().IsInPageTableHeap(l3_virt)) { + this->GetPageTableManager().Open(l3_virt, l3_open_count); + } + l3_virt = Null; + l3_open_count = 0; + + if (util::IsAligned(GetInteger(virt_addr), L1BlockSize)) { + if (this->GetPageTableManager().IsInPageTableHeap(l2_virt) && l2_open_count > 0) { + this->GetPageTableManager().Open(l2_virt, l2_open_count); + } + l2_virt = Null; + l2_open_count = 0; + } + } + } + + /* Perform any remaining opens. */ + if (l2_open_count > 0 && this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { + this->GetPageTableManager().Open(l2_virt, l2_open_count); + } + if (l3_open_count > 0 && this->GetPageTableManager().IsInPageTableHeap(l3_virt)) { + this->GetPageTableManager().Open(l3_virt, l3_open_count); + } + + return ResultSuccess(); + } + + Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + auto &impl = this->GetImpl(); + + /* If we're not forcing an unmap, separate pages immediately. */ + if (!force) { + const size_t size = num_pages * PageSize; + R_TRY(this->SeparatePages(virt_addr, std::min(GetInteger(virt_addr) & -GetInteger(virt_addr), size), page_list, reuse_ll)); + if (num_pages > 1) { + const auto end_page = virt_addr + size; + const auto last_page = end_page - PageSize; + + auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); }; + R_TRY(this->SeparatePages(last_page, std::min(GetInteger(end_page) & -GetInteger(end_page), size), page_list, reuse_ll)); + merge_guard.Cancel(); + } + } + + /* Cache initial addresses for use on cleanup. */ + const KProcessAddress orig_virt_addr = virt_addr; + size_t remaining_pages = num_pages; + + /* Ensure that any pages we track close on exit. */ + KPageGroup pages_to_close(this->GetBlockInfoManager()); + KScopedPageGroup spg(pages_to_close); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + bool next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr); + + while (remaining_pages > 0) { + /* Handle the case where we're not valid. */ + if (!next_valid) { + MESOSPHERE_ABORT_UNLESS(force); + const size_t cur_size = std::min(next_entry.block_size - (GetInteger(virt_addr) & (next_entry.block_size - 1)), remaining_pages * PageSize); + remaining_pages -= cur_size / PageSize; + virt_addr += cur_size; + continue; + } + + /* Handle the case where the block is bigger than it should be. */ + if (next_entry.block_size > remaining_pages * PageSize) { + MESOSPHERE_ABORT_UNLESS(force); + MESOSPHERE_R_ABORT_UNLESS(this->SeparatePages(virt_addr, remaining_pages * PageSize, page_list, reuse_ll)); + next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr); + MESOSPHERE_ASSERT(next_valid); + } + + /* Check that our state is coherent. */ + MESOSPHERE_ASSERT((next_entry.block_size / PageSize) <= remaining_pages); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size)); + + /* Unmap the block. */ + L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); + switch (next_entry.block_size) { + case L1BlockSize: + { + /* Clear the entry. */ + *l1_entry = InvalidL1PageTableEntry; + } + break; + case L2ContiguousBlockSize: + case L2BlockSize: + { + /* Get the number of L2 blocks. */ + const size_t num_l2_blocks = next_entry.block_size / L2BlockSize; + + /* Get the L2 entry. */ + KPhysicalAddress l2_phys = Null; + MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys)); + const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys); + + /* Clear the entry. */ + for (size_t i = 0; i < num_l2_blocks; i++) { + *impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = InvalidL2PageTableEntry; + } + PteDataSynchronizationBarrier(); + + /* Close references to the L2 table. */ + if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { + if (this->GetPageTableManager().Close(l2_virt, num_l2_blocks)) { + *l1_entry = InvalidL1PageTableEntry; + this->NoteUpdated(); + this->FreePageTable(page_list, l2_virt); + } + } + } + break; + case L3ContiguousBlockSize: + case L3BlockSize: + { + /* Get the number of L3 blocks. */ + const size_t num_l3_blocks = next_entry.block_size / L3BlockSize; + + /* Get the L2 entry. */ + KPhysicalAddress l2_phys = Null; + MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys)); + const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys); + L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, virt_addr); + + /* Get the L3 entry. */ + KPhysicalAddress l3_phys = Null; + MESOSPHERE_ABORT_UNLESS(l2_entry->GetTable(l3_phys)); + const KVirtualAddress l3_virt = GetPageTableVirtualAddress(l3_phys); + + /* Clear the entry. */ + for (size_t i = 0; i < num_l3_blocks; i++) { + *impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = InvalidL3PageTableEntry; + } + PteDataSynchronizationBarrier(); + + /* Close references to the L3 table. */ + if (this->GetPageTableManager().IsInPageTableHeap(l3_virt)) { + if (this->GetPageTableManager().Close(l3_virt, num_l3_blocks)) { + *l2_entry = InvalidL2PageTableEntry; + this->NoteUpdated(); + + /* Close reference to the L2 table. */ + if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { + if (this->GetPageTableManager().Close(l2_virt, 1)) { + *l1_entry = InvalidL1PageTableEntry; + this->NoteUpdated(); + this->FreePageTable(page_list, l2_virt); + } + } + + this->FreePageTable(page_list, l3_virt); + } + } + } + break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + + /* Close the blocks. */ + if (!force && IsHeapPhysicalAddress(next_entry.phys_addr)) { + const KVirtualAddress block_virt_addr = GetHeapVirtualAddress(next_entry.phys_addr); + const size_t block_num_pages = next_entry.block_size / PageSize; + if (R_FAILED(pages_to_close.AddBlock(block_virt_addr, block_num_pages))) { + this->NoteUpdated(); + Kernel::GetMemoryManager().Close(block_virt_addr, block_num_pages); + } + } + + /* Advance. */ + virt_addr += next_entry.block_size; + remaining_pages -= next_entry.block_size / PageSize; + next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + } + + /* Ensure we remain coherent. */ + if (this->IsKernel() && num_pages == 1) { + this->NoteSingleKernelPageUpdated(orig_virt_addr); + } else { + this->NoteUpdated(); + } + + return ResultSuccess(); + } + + Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + /* Cache initial addresses for use on cleanup. */ + const KProcessAddress orig_virt_addr = virt_addr; + const KPhysicalAddress orig_phys_addr = phys_addr; + + size_t remaining_pages = num_pages; + + /* Map the pages, using a guard to ensure we don't leak. */ + { + auto map_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); }; + + if (num_pages < ContiguousPageSize / PageSize) { + R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, L3BlockSize, page_list, reuse_ll)); + remaining_pages -= num_pages; + virt_addr += num_pages * PageSize; + phys_addr += num_pages * PageSize; + } else { + /* Map the fractional part of the pages. */ + size_t alignment; + for (alignment = ContiguousPageSize; (virt_addr & (alignment - 1)) == (phys_addr & (alignment - 1)); alignment = GetLargerAlignment(alignment)) { + /* Check if this would be our last map. */ + const size_t pages_to_map = (alignment - (virt_addr & (alignment - 1))) & (alignment - 1); + if (pages_to_map + (alignment / PageSize) > remaining_pages) { + break; + } + + /* Map pages, if we should. */ + if (pages_to_map > 0) { + R_TRY(this->Map(virt_addr, phys_addr, pages_to_map, entry_template, GetSmallerAlignment(alignment), page_list, reuse_ll)); + remaining_pages -= pages_to_map; + virt_addr += pages_to_map * PageSize; + phys_addr += pages_to_map * PageSize; + } + + /* Don't go further than L1 block. */ + if (alignment == L1BlockSize) { + break; + } + } + + while (remaining_pages > 0) { + /* Select the next smallest alignment. */ + alignment = GetSmallerAlignment(alignment); + MESOSPHERE_ASSERT((virt_addr & (alignment - 1)) == 0); + MESOSPHERE_ASSERT((phys_addr & (alignment - 1)) == 0); + + /* Map pages, if we should. */ + const size_t pages_to_map = util::AlignDown(remaining_pages, alignment / PageSize); + if (pages_to_map > 0) { + R_TRY(this->Map(virt_addr, phys_addr, pages_to_map, entry_template, alignment, page_list, reuse_ll)); + remaining_pages -= pages_to_map; + virt_addr += pages_to_map * PageSize; + phys_addr += pages_to_map * PageSize; + } + } + } + + /* We successfully mapped, so cancel our guard. */ + map_guard.Cancel(); + } + + /* Perform what coalescing we can. */ + this->MergePages(orig_virt_addr, page_list); + if (num_pages > 1) { + this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list); + } + + /* Open references to the pages, if we should. */ + if (IsHeapPhysicalAddress(orig_phys_addr)) { + Kernel::GetMemoryManager().Open(GetHeapVirtualAddress(orig_phys_addr), num_pages); + } + + return ResultSuccess(); + } + + Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + /* We want to maintain a new reference to every page in the group. */ + KScopedPageGroup spg(pg); + + /* Cache initial address for use on cleanup. */ + const KProcessAddress orig_virt_addr = virt_addr; + + size_t mapped_pages = 0; + + /* Map the pages, using a guard to ensure we don't leak. */ + { + auto map_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); }; + + if (num_pages < ContiguousPageSize / PageSize) { + for (const auto &block : pg) { + const KPhysicalAddress block_phys_addr = GetLinearPhysicalAddress(block.GetAddress()); + const size_t cur_pages = block.GetNumPages(); + R_TRY(this->Map(virt_addr, block_phys_addr, cur_pages, entry_template, L3BlockSize, page_list, reuse_ll)); + + virt_addr += cur_pages * PageSize; + mapped_pages += cur_pages; + } + } else { + /* Create a block representing our virtual space. */ + AlignedMemoryBlock virt_block(GetInteger(virt_addr), num_pages, L1BlockSize); + for (const auto &block : pg) { + /* Create a block representing this physical group, synchronize its alignment to our virtual block. */ + const KPhysicalAddress block_phys_addr = GetLinearPhysicalAddress(block.GetAddress()); + size_t cur_pages = block.GetNumPages(); + + AlignedMemoryBlock phys_block(GetInteger(block_phys_addr), cur_pages, virt_block.GetAlignment()); + virt_block.SetAlignment(phys_block.GetAlignment()); + + while (cur_pages > 0) { + /* Find a physical region for us to map at. */ + uintptr_t phys_choice = 0; + size_t phys_pages = 0; + phys_block.FindBlock(phys_choice, phys_pages); + + /* If we didn't find a region, try decreasing our alignment. */ + if (phys_pages == 0) { + const size_t next_alignment = KPageTable::GetSmallerAlignment(phys_block.GetAlignment()); + MESOSPHERE_ASSERT(next_alignment >= PageSize); + phys_block.SetAlignment(next_alignment); + virt_block.SetAlignment(next_alignment); + continue; + } + + /* Begin choosing virtual blocks to map at the region we chose. */ + while (phys_pages > 0) { + /* Find a virtual region for us to map at. */ + uintptr_t virt_choice = 0; + size_t virt_pages = phys_pages; + virt_block.FindBlock(virt_choice, virt_pages); + + /* If we didn't find a region, try decreasing our alignment. */ + if (virt_pages == 0) { + const size_t next_alignment = KPageTable::GetSmallerAlignment(virt_block.GetAlignment()); + MESOSPHERE_ASSERT(next_alignment >= PageSize); + phys_block.SetAlignment(next_alignment); + virt_block.SetAlignment(next_alignment); + continue; + } + + /* Map! */ + R_TRY(this->Map(virt_choice, phys_choice, virt_pages, entry_template, virt_block.GetAlignment(), page_list, reuse_ll)); + + /* Advance. */ + phys_choice += virt_pages * PageSize; + phys_pages -= virt_pages; + cur_pages -= virt_pages; + mapped_pages += virt_pages; + } + } + } + } + + /* We successfully mapped, so cancel our guard. */ + map_guard.Cancel(); + } + MESOSPHERE_ASSERT(mapped_pages == num_pages); + + /* Perform what coalescing we can. */ + this->MergePages(orig_virt_addr, page_list); + if (num_pages > 1) { + this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list); + } + + /* We succeeded! We want to persist the reference to the pages. */ + spg.CancelClose(); + return ResultSuccess(); + } + + bool KPageTable::MergePages(KProcessAddress virt_addr, PageLinkedList *page_list) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + auto &impl = this->GetImpl(); + bool merged = false; + + /* If there's no L1 table, don't bother. */ + L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); + if (!l1_entry->IsTable()) { + return merged; + } + + /* Examine and try to merge the L2 table. */ + L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, virt_addr); + if (l2_entry->IsTable()) { + /* We have an L3 entry. */ + L3PageTableEntry *l3_entry = impl.GetL3Entry(l2_entry, virt_addr); + if (!l3_entry->IsBlock() || !l3_entry->IsContiguousAllowed()) { + return merged; + } + + /* If it's not contiguous, try to make it so. */ + if (!l3_entry->IsContiguous()) { + virt_addr = util::AlignDown(GetInteger(virt_addr), L3ContiguousBlockSize); + KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L3ContiguousBlockSize); + const u64 entry_template = l3_entry->GetEntryTemplate(); + + /* Validate that we can merge. */ + for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { + if (!impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + PageSize * i) | PageTableEntry::Type_L3Block)) { + return merged; + } + } + + /* Merge! */ + for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { + impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->SetContiguous(true); + } + + /* Note that we updated. */ + this->NoteUpdated(); + merged = true; + } + + /* We might be able to upgrade a contiguous set of L3 entries into an L2 block. */ + virt_addr = util::AlignDown(GetInteger(virt_addr), L2BlockSize); + KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L2BlockSize); + const u64 entry_template = l3_entry->GetEntryTemplate(); + + /* Validate that we can merge. */ + for (size_t i = 0; i < L2BlockSize / L3ContiguousBlockSize; i++) { + if (!impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + L3ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous)) { + return merged; + } + } + + /* Merge! */ + PteDataSynchronizationBarrier(); + *l2_entry = L2PageTableEntry(phys_addr, entry_template, false); + + /* Note that we updated. */ + this->NoteUpdated(); + merged = true; + + /* Free the L3 table. */ + KVirtualAddress l3_table = util::AlignDown(reinterpret_cast(l3_entry), PageSize); + if (this->GetPageTableManager().IsInPageTableHeap(l3_table)) { + this->GetPageTableManager().Close(l3_table, L2BlockSize / L3BlockSize); + this->FreePageTable(page_list, l3_table); + } + } + if (l2_entry->IsBlock()) { + /* If it's not contiguous, try to make it so. */ + if (!l2_entry->IsContiguous()) { + virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize); + KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L2ContiguousBlockSize); + const u64 entry_template = l2_entry->GetEntryTemplate(); + + /* Validate that we can merge. */ + for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { + if (!impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i)->Is(entry_template | GetInteger(phys_addr + PageSize * i) | PageTableEntry::Type_L2Block)) { + return merged; + } + } + + /* Merge! */ + for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { + impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i)->SetContiguous(true); + } + + /* Note that we updated. */ + this->NoteUpdated(); + merged = true; + } + + /* We might be able to upgrade a contiguous set of L2 entries into an L1 block. */ + virt_addr = util::AlignDown(GetInteger(virt_addr), L1BlockSize); + KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L1BlockSize); + const u64 entry_template = l2_entry->GetEntryTemplate(); + + /* Validate that we can merge. */ + for (size_t i = 0; i < L1BlockSize / L2ContiguousBlockSize; i++) { + if (!impl.GetL2Entry(l1_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + L2ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous)) { + return merged; + } + } + + /* Merge! */ + PteDataSynchronizationBarrier(); + *l1_entry = L1PageTableEntry(phys_addr, entry_template, false); + + /* Note that we updated. */ + this->NoteUpdated(); + merged = true; + + /* Free the L2 table. */ + KVirtualAddress l2_table = util::AlignDown(reinterpret_cast(l2_entry), PageSize); + if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) { + this->GetPageTableManager().Close(l2_table, L1BlockSize / L2BlockSize); + this->FreePageTable(page_list, l2_table); + } + } + + return merged; + } + + Result KPageTable::SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + auto &impl = this->GetImpl(); + + /* First, try to separate an L1 block into contiguous L2 blocks. */ + L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); + if (l1_entry->IsBlock()) { + /* If our block size is too big, don't bother. */ + R_UNLESS(block_size < L1BlockSize, ResultSuccess()); + + /* Get the addresses we're working with. */ + const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L1BlockSize); + const KPhysicalAddress block_phys_addr = l1_entry->GetBlock(); + + /* Allocate a new page for the L2 table. */ + const KVirtualAddress l2_table = this->AllocatePageTable(page_list, reuse_ll); + R_UNLESS(l2_table != Null, svc::ResultOutOfResource()); + const KPhysicalAddress l2_phys = GetPageTablePhysicalAddress(l2_table); + + /* Set the entries in the L2 table. */ + const u64 entry_template = l1_entry->GetEntryTemplate(); + for (size_t i = 0; i < L1BlockSize / L2BlockSize; i++) { + *(impl.GetL2EntryFromTable(l2_table, block_virt_addr + L2BlockSize * i)) = L2PageTableEntry(block_phys_addr + L2BlockSize * i, entry_template, true); + } + + /* Open references to the L2 table. */ + Kernel::GetPageTableManager().Open(l2_table, L1BlockSize / L2BlockSize); + + /* Replace the L1 entry with one to the new table. */ + PteDataSynchronizationBarrier(); + *l1_entry = L1PageTableEntry(l2_phys, this->IsKernel(), true); + this->NoteUpdated(); + } + + /* If we don't have an l1 table, we're done. */ + R_UNLESS(l1_entry->IsTable(), ResultSuccess()); + + /* We want to separate L2 contiguous blocks into L2 blocks, so check that our size permits that. */ + R_UNLESS(block_size < L2ContiguousBlockSize, ResultSuccess()); + + L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, virt_addr); + if (l2_entry->IsBlock()) { + /* If we're contiguous, try to separate. */ + if (l2_entry->IsContiguous()) { + const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize); + + /* Mark the entries as non-contiguous. */ + for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { + impl.GetL2Entry(l1_entry, block_virt_addr + L2BlockSize * i)->SetContiguous(false); + } + this->NoteUpdated(); + } + + /* We want to separate L2 blocks into L3 contiguous blocks, so check that our size permits that. */ + R_UNLESS(block_size < L2BlockSize, ResultSuccess()); + + /* Get the addresses we're working with. */ + const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L2BlockSize); + const KPhysicalAddress block_phys_addr = l2_entry->GetBlock(); + + /* Allocate a new page for the L3 table. */ + const KVirtualAddress l3_table = this->AllocatePageTable(page_list, reuse_ll); + R_UNLESS(l3_table != Null, svc::ResultOutOfResource()); + const KPhysicalAddress l3_phys = GetPageTablePhysicalAddress(l3_table); + + /* Set the entries in the L3 table. */ + const u64 entry_template = l2_entry->GetEntryTemplate(); + for (size_t i = 0; i < L2BlockSize / L3BlockSize; i++) { + *(impl.GetL3EntryFromTable(l3_table, block_virt_addr + L3BlockSize * i)) = L3PageTableEntry(block_phys_addr + L3BlockSize * i, entry_template, true); + } + + /* Open references to the L3 table. */ + Kernel::GetPageTableManager().Open(l3_table, L2BlockSize / L3BlockSize); + + /* Replace the L2 entry with one to the new table. */ + PteDataSynchronizationBarrier(); + *l2_entry = L2PageTableEntry(l3_phys, this->IsKernel(), true); + this->NoteUpdated(); + } + + /* If we don't have an L3 table, we're done. */ + R_UNLESS(l2_entry->IsTable(), ResultSuccess()); + + /* We want to separate L3 contiguous blocks into L2 blocks, so check that our size permits that. */ + R_UNLESS(block_size < L3ContiguousBlockSize, ResultSuccess()); + + /* If we're contiguous, try to separate. */ + L3PageTableEntry *l3_entry = impl.GetL3Entry(l2_entry, virt_addr); + if (l3_entry->IsBlock() && l3_entry->IsContiguous()) { + const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L3ContiguousBlockSize); + + /* Mark the entries as non-contiguous. */ + for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { + impl.GetL3Entry(l2_entry, block_virt_addr + L3BlockSize * i)->SetContiguous(false); + } + this->NoteUpdated(); + } + + /* We're done! */ + return ResultSuccess(); + } + + Result KPageTable::SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + /* Try to separate pages, re-merging if we fail. */ + auto guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); }; + R_TRY(this->SeparatePagesImpl(virt_addr, block_size, page_list, reuse_ll)); + guard.Cancel(); + + return ResultSuccess(); + } + + Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + auto &impl = this->GetImpl(); + + /* Separate pages before we change permissions. */ + const size_t size = num_pages * PageSize; + R_TRY(this->SeparatePages(virt_addr, std::min(GetInteger(virt_addr) & -GetInteger(virt_addr), size), page_list, reuse_ll)); + if (num_pages > 1) { + const auto end_page = virt_addr + size; + const auto last_page = end_page - PageSize; + + auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); }; + R_TRY(this->SeparatePages(last_page, std::min(GetInteger(end_page) & -GetInteger(end_page), size), page_list, reuse_ll)); + merge_guard.Cancel(); + } + + /* Cache initial addresses for use on cleanup. */ + const KProcessAddress orig_virt_addr = virt_addr; + size_t remaining_pages = num_pages; + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr)); + + /* Continue changing properties until we've changed them for all pages. */ + while (remaining_pages > 0) { + MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size)); + MESOSPHERE_ABORT_UNLESS(next_entry.block_size <= remaining_pages * PageSize); + + L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); + switch (next_entry.block_size) { + case L1BlockSize: + { + /* Clear the entry, if we should. */ + if (refresh_mapping) { + *l1_entry = InvalidL1PageTableEntry; + this->NoteUpdated(); + if (IsHeapPhysicalAddress(next_entry.phys_addr)) { + cpu::FlushDataCache(GetVoidPointer(GetHeapVirtualAddress(next_entry.phys_addr)), L1BlockSize); + } + } + + /* Write the updated entry. */ + *l1_entry = L1PageTableEntry(next_entry.phys_addr, entry_template, false); + } + break; + case L2ContiguousBlockSize: + case L2BlockSize: + { + /* Get the number of L2 blocks. */ + const size_t num_l2_blocks = next_entry.block_size / L2BlockSize; + + /* Get the L2 entry. */ + KPhysicalAddress l2_phys = Null; + MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys)); + const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys); + + /* Clear the entry, if we should. */ + if (refresh_mapping) { + for (size_t i = 0; i < num_l2_blocks; i++) { + *impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = InvalidL2PageTableEntry; + } + this->NoteUpdated(); + if (IsHeapPhysicalAddress(next_entry.phys_addr)) { + cpu::FlushDataCache(GetVoidPointer(GetHeapVirtualAddress(next_entry.phys_addr)), next_entry.block_size); + } + } + + /* Write the updated entry. */ + const bool contig = next_entry.block_size == L2ContiguousBlockSize; + for (size_t i = 0; i < num_l2_blocks; i++) { + *impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = L2PageTableEntry(next_entry.phys_addr + L2BlockSize * i, entry_template, contig); + } + } + break; + case L3ContiguousBlockSize: + case L3BlockSize: + { + /* Get the number of L3 blocks. */ + const size_t num_l3_blocks = next_entry.block_size / L3BlockSize; + + /* Get the L2 entry. */ + KPhysicalAddress l2_phys = Null; + MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys)); + const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys); + L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, virt_addr); + + /* Get the L3 entry. */ + KPhysicalAddress l3_phys = Null; + MESOSPHERE_ABORT_UNLESS(l2_entry->GetTable(l3_phys)); + const KVirtualAddress l3_virt = GetPageTableVirtualAddress(l3_phys); + + /* Clear the entry, if we should. */ + if (refresh_mapping) { + for (size_t i = 0; i < num_l3_blocks; i++) { + *impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = InvalidL3PageTableEntry; + } + this->NoteUpdated(); + if (IsHeapPhysicalAddress(next_entry.phys_addr)) { + cpu::FlushDataCache(GetVoidPointer(GetHeapVirtualAddress(next_entry.phys_addr)), next_entry.block_size); + } + } + + /* Write the updated entry. */ + const bool contig = next_entry.block_size == L3ContiguousBlockSize; + for (size_t i = 0; i < num_l3_blocks; i++) { + *impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = L3PageTableEntry(next_entry.phys_addr + L3BlockSize * i, entry_template, contig); + } + } + break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + + /* Advance. */ + virt_addr += next_entry.block_size; + remaining_pages -= next_entry.block_size / PageSize; + if (remaining_pages == 0) { + break; + } + MESOSPHERE_ABORT_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))); + } + + /* We've succeeded, now perform what coalescing we can. */ + this->MergePages(orig_virt_addr, page_list); + if (num_pages > 1) { + this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list); + } + + return ResultSuccess(); + } + + void KPageTable::FinalizeUpdate(PageLinkedList *page_list) { + while (page_list->Peek()) { + KVirtualAddress page = KVirtualAddress(page_list->Pop()); + MESOSPHERE_ASSERT(this->GetPageTableManager().IsInPageTableHeap(page)); + MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(page) == 0); + this->GetPageTableManager().Free(page); + } + } + +} diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp new file mode 100644 index 000000000..e232942ee --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::arch::arm64 { + + void KPageTableImpl::InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end) { + this->table = static_cast(tb); + this->is_kernel = true; + this->num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; + } + + void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) { + this->table = static_cast(tb); + this->is_kernel = false; + this->num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; + } + + L1PageTableEntry *KPageTableImpl::Finalize() { + return this->table; + } + + bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const { + /* Set the L3 entry. */ + out_context->l3_entry = l3_entry; + + if (l3_entry->IsBlock()) { + /* Set the output entry. */ + out_entry->phys_addr = l3_entry->GetBlock() + (virt_addr & (L3BlockSize - 1)); + if (l3_entry->IsContiguous()) { + out_entry->block_size = L3ContiguousBlockSize; + } else { + out_entry->block_size = L3BlockSize; + } + + return true; + } else { + out_entry->phys_addr = Null; + out_entry->block_size = L3BlockSize; + return false; + } + } + + bool KPageTableImpl::ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const { + /* Set the L2 entry. */ + out_context->l2_entry = l2_entry; + + if (l2_entry->IsBlock()) { + /* Set the output entry. */ + out_entry->phys_addr = l2_entry->GetBlock() + (virt_addr & (L2BlockSize - 1)); + if (l2_entry->IsContiguous()) { + out_entry->block_size = L2ContiguousBlockSize; + } else { + out_entry->block_size = L2BlockSize; + } + /* Set the output context. */ + out_context->l3_entry = nullptr; + return true; + } else if (l2_entry->IsTable()) { + return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr); + } else { + out_entry->phys_addr = Null; + out_entry->block_size = L2BlockSize; + out_context->l3_entry = nullptr; + return false; + } + } + + bool KPageTableImpl::ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const { + /* Set the L1 entry. */ + out_context->l1_entry = l1_entry; + + if (l1_entry->IsBlock()) { + /* Set the output entry. */ + out_entry->phys_addr = l1_entry->GetBlock() + (virt_addr & (L1BlockSize - 1)); + if (l1_entry->IsContiguous()) { + out_entry->block_size = L1ContiguousBlockSize; + } else { + out_entry->block_size = L1BlockSize; + } + /* Set the output context. */ + out_context->l2_entry = nullptr; + out_context->l3_entry = nullptr; + return true; + } else if (l1_entry->IsTable()) { + return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr); + } else { + out_entry->phys_addr = Null; + out_entry->block_size = L1BlockSize; + out_context->l2_entry = nullptr; + out_context->l3_entry = nullptr; + return false; + } + } + + bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const { + /* Setup invalid defaults. */ + out_entry->phys_addr = Null; + out_entry->block_size = L1BlockSize; + out_context->l1_entry = this->table + this->num_entries; + out_context->l2_entry = nullptr; + out_context->l3_entry = nullptr; + + /* Validate that we can read the actual entry. */ + const size_t l0_index = GetL0Index(address); + const size_t l1_index = GetL1Index(address); + if (this->is_kernel) { + /* Kernel entries must be accessed via TTBR1. */ + if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - this->num_entries)) { + return false; + } + } else { + /* User entries must be accessed with TTBR0. */ + if ((l0_index != 0) || l1_index >= this->num_entries) { + return false; + } + } + + /* Extract the entry. */ + const bool valid = this->ExtractL1Entry(out_entry, out_context, this->GetL1Entry(address), address); + + /* Update the context for next traversal. */ + switch (out_entry->block_size) { + case L1ContiguousBlockSize: + out_context->l1_entry += (L1ContiguousBlockSize / L1BlockSize) - GetContiguousL1Offset(address) / L1BlockSize; + break; + case L1BlockSize: + out_context->l1_entry += 1; + break; + case L2ContiguousBlockSize: + out_context->l1_entry += 1; + out_context->l2_entry += (L2ContiguousBlockSize / L2BlockSize) - GetContiguousL2Offset(address) / L2BlockSize; + break; + case L2BlockSize: + out_context->l1_entry += 1; + out_context->l2_entry += 1; + break; + case L3ContiguousBlockSize: + out_context->l1_entry += 1; + out_context->l2_entry += 1; + out_context->l3_entry += (L3ContiguousBlockSize / L3BlockSize) - GetContiguousL3Offset(address) / L3BlockSize; + break; + case L3BlockSize: + out_context->l1_entry += 1; + out_context->l2_entry += 1; + out_context->l3_entry += 1; + break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + + return valid; + } + + bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const { + bool valid = false; + + /* Check if we're not at the end of an L3 table. */ + if (!util::IsAligned(reinterpret_cast(context->l3_entry), PageSize)) { + valid = this->ExtractL3Entry(out_entry, context, context->l3_entry, Null); + + switch (out_entry->block_size) { + case L3ContiguousBlockSize: + context->l3_entry += (L3ContiguousBlockSize / L3BlockSize); + break; + case L3BlockSize: + context->l3_entry += 1; + break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } else if (!util::IsAligned(reinterpret_cast(context->l2_entry), PageSize)) { + /* We're not at the end of an L2 table. */ + valid = this->ExtractL2Entry(out_entry, context, context->l2_entry, Null); + + switch (out_entry->block_size) { + case L2ContiguousBlockSize: + context->l2_entry += (L2ContiguousBlockSize / L2BlockSize); + break; + case L2BlockSize: + context->l2_entry += 1; + break; + case L3ContiguousBlockSize: + context->l2_entry += 1; + context->l3_entry += (L3ContiguousBlockSize / L3BlockSize); + break; + case L3BlockSize: + context->l2_entry += 1; + context->l3_entry += 1; + break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } else { + /* We need to update the l1 entry. */ + const size_t l1_index = context->l1_entry - this->table; + if (l1_index < this->num_entries) { + valid = this->ExtractL1Entry(out_entry, context, context->l1_entry, Null); + } else { + /* Invalid, end traversal. */ + out_entry->phys_addr = Null; + out_entry->block_size = L1BlockSize; + context->l1_entry = this->table + this->num_entries; + context->l2_entry = nullptr; + context->l3_entry = nullptr; + return false; + } + + switch (out_entry->block_size) { + case L1ContiguousBlockSize: + context->l1_entry += (L1ContiguousBlockSize / L1BlockSize); + break; + case L1BlockSize: + context->l1_entry += 1; + break; + case L2ContiguousBlockSize: + context->l1_entry += 1; + context->l2_entry += (L2ContiguousBlockSize / L2BlockSize); + break; + case L2BlockSize: + context->l1_entry += 1; + context->l2_entry += 1; + break; + case L3ContiguousBlockSize: + context->l1_entry += 1; + context->l2_entry += 1; + context->l3_entry += (L3ContiguousBlockSize / L3BlockSize); + break; + case L3BlockSize: + context->l1_entry += 1; + context->l2_entry += 1; + context->l3_entry += 1; + break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + + return valid; + } + + bool KPageTableImpl::GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const { + /* Validate that we can read the actual entry. */ + const size_t l0_index = GetL0Index(address); + const size_t l1_index = GetL1Index(address); + if (this->is_kernel) { + /* Kernel entries must be accessed via TTBR1. */ + if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - this->num_entries)) { + return false; + } + } else { + /* User entries must be accessed with TTBR0. */ + if ((l0_index != 0) || l1_index >= this->num_entries) { + return false; + } + } + + /* Try to get from l1 table. */ + const L1PageTableEntry *l1_entry = this->GetL1Entry(address); + if (l1_entry->IsBlock()) { + *out = l1_entry->GetBlock() + GetL1Offset(address); + return true; + } else if (!l1_entry->IsTable()) { + return false; + } + + /* Try to get from l2 table. */ + const L2PageTableEntry *l2_entry = this->GetL2Entry(l1_entry, address); + if (l2_entry->IsBlock()) { + *out = l2_entry->GetBlock() + GetL2Offset(address); + return true; + } else if (!l2_entry->IsTable()) { + return false; + } + + /* Try to get from l3 table. */ + const L3PageTableEntry *l3_entry = this->GetL3Entry(l2_entry, address); + if (l3_entry->IsBlock()) { + *out = l3_entry->GetBlock() + GetL3Offset(address); + return true; + } + + return false; + } + +} diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp new file mode 100644 index 000000000..846766e5d --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::arch::arm64 { + + void KSupervisorPageTable::Initialize(s32 core_id) { + /* Get the identity mapping ttbr0. */ + this->ttbr0[core_id] = cpu::GetTtbr0El1(); + + /* Set sctlr_el1 */ + cpu::SystemControlRegisterAccessor().SetWxn(true).Store(); + cpu::EnsureInstructionConsistency(); + + /* Invalidate the entire TLB. */ + cpu::InvalidateEntireTlb(); + + /* If core 0, initialize our base page table. */ + if (core_id == 0) { + /* TODO: constexpr defines. */ + const u64 ttbr1 = cpu::GetTtbr1El1() & 0xFFFFFFFFFFFFul; + const u64 kernel_vaddr_start = 0xFFFFFF8000000000ul; + const u64 kernel_vaddr_end = 0xFFFFFFFFFFE00000ul; + void *table = GetVoidPointer(KPageTableBase::GetLinearVirtualAddress(ttbr1)); + this->page_table.InitializeForKernel(table, kernel_vaddr_start, kernel_vaddr_end); + } + } + + void KSupervisorPageTable::Finalize(s32 core_id) { + MESOSPHERE_UNIMPLEMENTED(); + } +} diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp new file mode 100644 index 000000000..67a18aee3 --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::arch::arm64 { + + /* These are implemented elsewhere (asm). */ + void UserModeThreadStarter(); + void SupervisorModeThreadStarter(); + + void OnThreadStart() { + MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled()); + /* Send KDebug event for this thread's creation. */ + { + KScopedInterruptEnable ei; + /* TODO */ + } + + /* Handle any pending dpc. */ + while (GetCurrentThread().HasDpc()) { + KDpcManager::HandleDpc(); + } + + /* Clear our status as in an exception handler */ + GetCurrentThread().ClearInExceptionHandler(); + } + + namespace { + + ALWAYS_INLINE bool IsFpuEnabled() { + return cpu::ArchitecturalFeatureAccessControlRegisterAccessor().IsFpEnabled(); + } + + ALWAYS_INLINE void EnableFpu() { + cpu::ArchitecturalFeatureAccessControlRegisterAccessor().SetFpEnabled(true).Store(); + cpu::InstructionMemoryBarrier(); + } + + uintptr_t SetupStackForUserModeThreadStarter(KVirtualAddress pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_64_bit) { + /* NOTE: Stack layout on entry looks like following: */ + /* SP */ + /* | */ + /* v */ + /* | KExceptionContext (size 0x120) | KThread::StackParameters (size 0x30) | */ + KExceptionContext *ctx = GetPointer(k_sp) - 1; + + /* Clear context. */ + std::memset(ctx, 0, sizeof(*ctx)); + + /* Set PC and argument. */ + ctx->pc = GetInteger(pc); + ctx->x[0] = arg; + + /* Set PSR. */ + if (is_64_bit) { + ctx->psr = 0; + } else { + constexpr u64 PsrArmValue = 0x20; + constexpr u64 PsrThumbValue = 0x00; + ctx->psr = ((pc & 1) == 0 ? PsrArmValue : PsrThumbValue) | (0x10); + } + + /* Set stack pointer. */ + if (is_64_bit) { + ctx->sp = GetInteger(u_sp); + } else { + ctx->x[13] = GetInteger(u_sp); + } + + return reinterpret_cast(ctx); + } + + uintptr_t SetupStackForSupervisorModeThreadStarter(KVirtualAddress pc, KVirtualAddress sp, uintptr_t arg) { + /* NOTE: Stack layout on entry looks like following: */ + /* SP */ + /* | */ + /* v */ + /* | u64 argument | u64 entrypoint | KThread::StackParameters (size 0x30) | */ + static_assert(sizeof(KThread::StackParameters) == 0x30); + + u64 *stack = GetPointer(sp); + *(--stack) = GetInteger(pc); + *(--stack) = arg; + return reinterpret_cast(stack); + } + + } + + Result KThreadContext::Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main) { + MESOSPHERE_ASSERT(k_sp != Null); + + /* Ensure that the stack pointers are aligned. */ + k_sp = util::AlignDown(GetInteger(k_sp), 16); + u_sp = util::AlignDown(GetInteger(u_sp), 16); + + /* Determine LR and SP. */ + if (is_user) { + /* Usermode thread. */ + this->lr = reinterpret_cast(::ams::kern::arch::arm64::UserModeThreadStarter); + this->sp = SetupStackForUserModeThreadStarter(u_pc, k_sp, u_sp, arg, is_64_bit); + } else { + /* Kernel thread. */ + MESOSPHERE_ASSERT(is_64_bit); + + if (is_main) { + /* Main thread. */ + this->lr = GetInteger(u_pc); + this->sp = GetInteger(k_sp); + } else { + /* Generic Kernel thread. */ + this->lr = reinterpret_cast(::ams::kern::arch::arm64::SupervisorModeThreadStarter); + this->sp = SetupStackForSupervisorModeThreadStarter(u_pc, k_sp, arg); + } + } + + /* Clear callee-saved registers. */ + for (size_t i = 0; i < util::size(this->callee_saved.registers); i++) { + this->callee_saved.registers[i] = 0; + } + + /* Clear FPU state. */ + this->fpcr = 0; + this->fpsr = 0; + this->cpacr = 0; + for (size_t i = 0; i < util::size(this->fpu_registers); i++) { + this->fpu_registers[i] = 0; + } + + /* Lock the context, if we're a main thread. */ + this->locked = is_main; + + return ResultSuccess(); + } + + Result KThreadContext::Finalize() { + /* This doesn't actually do anything. */ + return ResultSuccess(); + } + + void KThreadContext::SetArguments(uintptr_t arg0, uintptr_t arg1) { + u64 *stack = reinterpret_cast(this->sp); + stack[0] = arg0; + stack[1] = arg1; + } + + void KThreadContext::FpuContextSwitchHandler(KThread *thread) { + MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled()); + MESOSPHERE_ASSERT(!IsFpuEnabled()); + + /* Enable the FPU. */ + EnableFpu(); + + /* Restore the FPU registers. */ + KProcess *process = thread->GetOwnerProcess(); + MESOSPHERE_ASSERT(process != nullptr); + if (process->Is64Bit()) { + RestoreFpuRegisters64(thread->GetContext()); + } else { + RestoreFpuRegisters32(thread->GetContext()); + } + } + +} diff --git a/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s b/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s new file mode 100644 index 000000000..d210466c9 --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s @@ -0,0 +1,519 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* ams::kern::arch::arm64::UserspaceAccessFunctionAreaBegin() */ +.section .text._ZN3ams4kern4arch5arm6432UserspaceAccessFunctionAreaBeginEv, "ax", %progbits +.global _ZN3ams4kern4arch5arm6432UserspaceAccessFunctionAreaBeginEv +.type _ZN3ams4kern4arch5arm6432UserspaceAccessFunctionAreaBeginEv, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6432UserspaceAccessFunctionAreaBeginEv: +/* NOTE: This is not a real function, and only exists as a label for safety. */ + +/* ================ All Userspace Access Functions after this line. ================ */ + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUser(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess18CopyMemoryFromUserEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess18CopyMemoryFromUserEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess18CopyMemoryFromUserEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess18CopyMemoryFromUserEPvPKvm: + /* Check if there's anything to copy. */ + cmp x2, #0 + b.eq 2f + + /* Keep track of the last address. */ + add x3, x1, x2 + +1: /* We're copying memory byte-by-byte. */ + ldtrb w2, [x1] + strb w2, [x0], #1 + add x1, x1, #1 + cmp x1, x3 + b.ne 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserAligned32Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned32BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned32BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned32BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned32BitEPvPKvm: + /* Check if there are 0x40 bytes to copy */ + cmp x2, #0x3F + b.ls 1f + ldtr x4, [x1, #0x00] + ldtr x5, [x1, #0x08] + ldtr x6, [x1, #0x10] + ldtr x7, [x1, #0x18] + ldtr x8, [x1, #0x20] + ldtr x9, [x1, #0x28] + ldtr x10, [x1, #0x30] + ldtr x11, [x1, #0x38] + stp x4, x5, [x0, #0x00] + stp x6, x7, [x0, #0x10] + stp x8, x9, [x0, #0x20] + stp x10, x11, [x0, #0x30] + add x0, x0, #0x40 + add x1, x1, #0x40 + sub x2, x2, #0x40 + b _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned32BitEPvPKvm + +1: /* We have less than 0x40 bytes to copy. */ + cmp x2, #0 + b.eq 2f + ldtr w4, [x1] + str w4, [x0], #4 + add x1, x1, #4 + sub x2, x2, #4 + b 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserAligned64Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned64BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned64BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned64BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned64BitEPvPKvm: + /* Check if there are 0x40 bytes to copy */ + cmp x2, #0x3F + b.ls 1f + ldtr x4, [x1, #0x00] + ldtr x5, [x1, #0x08] + ldtr x6, [x1, #0x10] + ldtr x7, [x1, #0x18] + ldtr x8, [x1, #0x20] + ldtr x9, [x1, #0x28] + ldtr x10, [x1, #0x30] + ldtr x11, [x1, #0x38] + stp x4, x5, [x0, #0x00] + stp x6, x7, [x0, #0x10] + stp x8, x9, [x0, #0x20] + stp x10, x11, [x0, #0x30] + add x0, x0, #0x40 + add x1, x1, #0x40 + sub x2, x2, #0x40 + b _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned64BitEPvPKvm + +1: /* We have less than 0x40 bytes to copy. */ + cmp x2, #0 + b.eq 2f + ldtr x4, [x1] + str x4, [x0], #8 + add x1, x1, #8 + sub x2, x2, #8 + b 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserSize32Bit(void *dst, const void *src) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv +.type _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv: + /* Just load and store a u32. */ + ldtr w2, [x1] + str w2, [x0] + + /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyStringFromUser(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm: + /* Check if there's anything to copy. */ + cmp x2, #0 + b.eq 3f + + /* Keep track of the start address and last address. */ + mov x4, x1 + add x3, x1, x2 + +1: /* We're copying memory byte-by-byte. */ + ldtrb w2, [x1] + strb w2, [x0], #1 + add x1, x1, #1 + + /* If we read a null terminator, we're done. */ + cmp w2, #0 + b.eq 2f + + /* Check if we're done. */ + cmp x1, x3 + b.ne 1b + +2: /* We're done, and we copied some amount of data from the string. */ + sub x0, x1, x4 + ret + +3: /* We're done, and there was no string data. */ + mov x0, #0 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryToUser(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess16CopyMemoryToUserEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess16CopyMemoryToUserEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess16CopyMemoryToUserEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess16CopyMemoryToUserEPvPKvm: + /* Check if there's anything to copy. */ + cmp x2, #0 + b.eq 2f + + /* Keep track of the last address. */ + add x3, x1, x2 + +1: /* We're copying memory byte-by-byte. */ + ldrb w2, [x1], #1 + sttrb w2, [x0] + add x0, x0, #1 + cmp x1, x3 + b.ne 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryToUserAligned32Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned32BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned32BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned32BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned32BitEPvPKvm: + /* Check if there are 0x40 bytes to copy */ + cmp x2, #0x3F + b.ls 1f + ldp x4, x5, [x1, #0x00] + ldp x6, x7, [x1, #0x10] + ldp x8, x9, [x1, #0x20] + ldp x10, x11, [x1, #0x30] + sttr x4, [x0, #0x00] + sttr x5, [x0, #0x08] + sttr x6, [x0, #0x10] + sttr x7, [x0, #0x18] + sttr x8, [x0, #0x20] + sttr x9, [x0, #0x28] + sttr x10, [x0, #0x30] + sttr x11, [x0, #0x38] + add x0, x0, #0x40 + add x1, x1, #0x40 + sub x2, x2, #0x40 + b _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned32BitEPvPKvm + +1: /* We have less than 0x40 bytes to copy. */ + cmp x2, #0 + b.eq 2f + ldr w4, [x1], #4 + sttr w4, [x0] + add x0, x0, #4 + sub x2, x2, #4 + b 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryToUserAligned64Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned64BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned64BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned64BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned64BitEPvPKvm: + /* Check if there are 0x40 bytes to copy */ + cmp x2, #0x3F + b.ls 1f + ldp x4, x5, [x1, #0x00] + ldp x6, x7, [x1, #0x10] + ldp x8, x9, [x1, #0x20] + ldp x10, x11, [x1, #0x30] + sttr x4, [x0, #0x00] + sttr x5, [x0, #0x08] + sttr x6, [x0, #0x10] + sttr x7, [x0, #0x18] + sttr x8, [x0, #0x20] + sttr x9, [x0, #0x28] + sttr x10, [x0, #0x30] + sttr x11, [x0, #0x38] + add x0, x0, #0x40 + add x1, x1, #0x40 + sub x2, x2, #0x40 + b _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned64BitEPvPKvm + +1: /* We have less than 0x40 bytes to copy. */ + cmp x2, #0 + b.eq 2f + ldr x4, [x1], #8 + sttr x4, [x0] + add x0, x0, #8 + sub x2, x2, #8 + b 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryToUserSize32Bit(void *dst, const void *src) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess25CopyMemoryToUserSize32BitEPvPKv, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess25CopyMemoryToUserSize32BitEPvPKv +.type _ZN3ams4kern4arch5arm6415UserspaceAccess25CopyMemoryToUserSize32BitEPvPKv, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess25CopyMemoryToUserSize32BitEPvPKv: + /* Just load and store a u32. */ + ldr w2, [x1] + sttr w2, [x0] + + /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyStringToUser(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess16CopyStringToUserEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess16CopyStringToUserEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess16CopyStringToUserEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess16CopyStringToUserEPvPKvm: + /* Check if there's anything to copy. */ + cmp x2, #0 + b.eq 3f + + /* Keep track of the start address and last address. */ + mov x4, x1 + add x3, x1, x2 + +1: /* We're copying memory byte-by-byte. */ + ldrb w2, [x1], #1 + sttrb w2, [x0] + add x0, x0, #1 + + /* If we read a null terminator, we're done. */ + cmp w2, #0 + b.eq 2f + + /* Check if we're done. */ + cmp x1, x3 + b.ne 1b + +2: /* We're done, and we copied some amount of data from the string. */ + sub x0, x1, x4 + ret + +3: /* We're done, and there was no string data. */ + mov x0, #0 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::ClearMemory(void *dst, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess11ClearMemoryEPvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess11ClearMemoryEPvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess11ClearMemoryEPvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess11ClearMemoryEPvm: + /* Check if there's anything to clear. */ + cmp x1, #0 + b.eq 2f + + /* Keep track of the last address. */ + add x2, x0, x1 + +1: /* We're copying memory byte-by-byte. */ + sttrb wzr, [x0] + add x0, x0, #1 + cmp x0, x2 + b.ne 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::ClearMemoryAligned32Bit(void *dst, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned32BitEPvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned32BitEPvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned32BitEPvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned32BitEPvm: + /* Check if there are 0x40 bytes to clear. */ + cmp x1, #0x3F + b.ls 2f + sttr xzr, [x0, #0x00] + sttr xzr, [x0, #0x08] + sttr xzr, [x0, #0x10] + sttr xzr, [x0, #0x18] + sttr xzr, [x0, #0x20] + sttr xzr, [x0, #0x28] + sttr xzr, [x0, #0x30] + sttr xzr, [x0, #0x38] + add x0, x0, #0x40 + sub x1, x1, #0x40 + b _ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned32BitEPvm + +1: /* We have less than 0x40 bytes to clear. */ + cmp x1, #0 + b.eq 2f + sttr wzr, [x0] + add x0, x0, #4 + sub x1, x1, #4 + b 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::ClearMemoryAligned64Bit(void *dst, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned64BitEPvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned64BitEPvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned64BitEPvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned64BitEPvm: + /* Check if there are 0x40 bytes to clear. */ + cmp x1, #0x3F + b.ls 2f + sttr xzr, [x0, #0x00] + sttr xzr, [x0, #0x08] + sttr xzr, [x0, #0x10] + sttr xzr, [x0, #0x18] + sttr xzr, [x0, #0x20] + sttr xzr, [x0, #0x28] + sttr xzr, [x0, #0x30] + sttr xzr, [x0, #0x38] + add x0, x0, #0x40 + sub x1, x1, #0x40 + b _ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned64BitEPvm + +1: /* We have less than 0x40 bytes to clear. */ + cmp x1, #0 + b.eq 2f + sttr xzr, [x0] + add x0, x0, #8 + sub x1, x1, #8 + b 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::ClearMemorySize32Bit(void *dst) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess20ClearMemorySize32BitEPv, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess20ClearMemorySize32BitEPv +.type _ZN3ams4kern4arch5arm6415UserspaceAccess20ClearMemorySize32BitEPv, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess20ClearMemorySize32BitEPv: + /* Just store a zero. */ + sttr wzr, [x0] + + /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::StoreDataCache(uintptr_t start, uintptr_t end) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm: + /* Check if we have any work to do. */ + cmp x1, x0 + b.eq 2f + +1: /* Loop, storing each cache line. */ + dc cvac, x0 + add x0, x0, #0x40 + cmp x1, x0 + b.ne 1b + +2: /* We're done! */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::FlushDataCache(uintptr_t start, uintptr_t end) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess14FlushDataCacheEmm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess14FlushDataCacheEmm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess14FlushDataCacheEmm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess14FlushDataCacheEmm: + /* Check if we have any work to do. */ + cmp x1, x0 + b.eq 2f + +1: /* Loop, flushing each cache line. */ + dc civac, x0 + add x0, x0, #0x40 + cmp x1, x0 + b.ne 1b + +2: /* We're done! */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::InvalidateDataCache(uintptr_t start, uintptr_t end) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess19InvalidateDataCacheEmm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess19InvalidateDataCacheEmm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess19InvalidateDataCacheEmm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess19InvalidateDataCacheEmm: + /* Check if we have any work to do. */ + cmp x1, x0 + b.eq 2f + +1: /* Loop, invalidating each cache line. */ + dc ivac, x0 + add x0, x0, #0x40 + cmp x1, x0 + b.ne 1b + +2: /* We're done! */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::InvalidateInstructionCache(uintptr_t start, uintptr_t end) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm: + /* Check if we have any work to do. */ + cmp x1, x0 + b.eq 2f + +1: /* Loop, invalidating each cache line. */ + ic ivau, x0 + add x0, x0, #0x40 + cmp x1, x0 + b.ne 1b + +2: /* We're done! */ + mov x0, #1 + ret + +/* ================ All Userspace Access Functions before this line. ================ */ + +/* ams::kern::arch::arm64::UserspaceAccessFunctionAreaEnd() */ +.section .text._ZN3ams4kern4arch5arm6430UserspaceAccessFunctionAreaEndEv, "ax", %progbits +.global _ZN3ams4kern4arch5arm6430UserspaceAccessFunctionAreaEndEv +.type _ZN3ams4kern4arch5arm6430UserspaceAccessFunctionAreaEndEv, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6430UserspaceAccessFunctionAreaEndEv: +/* NOTE: This is not a real function, and only exists as a label for safety. */ \ No newline at end of file diff --git a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_handlers_asm.s b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_handlers_asm.s new file mode 100644 index 000000000..b3106e0b2 --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_handlers_asm.s @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* ams::kern::arch::arm64::SvcHandler64() */ +.section .text._ZN3ams4kern4arch5arm6412SvcHandler64Ev, "ax", %progbits +.global _ZN3ams4kern4arch5arm6412SvcHandler64Ev +.type _ZN3ams4kern4arch5arm6412SvcHandler64Ev, %function +_ZN3ams4kern4arch5arm6412SvcHandler64Ev: + /* Create a KExceptionContext for the exception. */ + sub sp, sp, #0x120 + + /* Save registers needed for ReturnFromException */ + stp x9, x10, [sp, #(8 * 9)] + str x11, [sp, #(8 * 11)] + str x18, [sp, #(8 * 18)] + + mrs x8, sp_el0 + mrs x9, elr_el1 + mrs x10, spsr_el1 + mrs x11, tpidr_el0 + + /* Save callee-saved registers. */ + stp x19, x20, [sp, #(8 * 19)] + stp x21, x22, [sp, #(8 * 21)] + stp x23, x24, [sp, #(8 * 23)] + stp x25, x26, [sp, #(8 * 25)] + stp x27, x28, [sp, #(8 * 27)] + + /* Save miscellaneous registers. */ + stp x0, x1, [sp, #(8 * 0)] + stp x2, x3, [sp, #(8 * 2)] + stp x4, x5, [sp, #(8 * 4)] + stp x6, x7, [sp, #(8 * 6)] + stp x29, x30, [sp, #(8 * 29)] + stp x8, x9, [sp, #(8 * 31)] + stp x10, x11, [sp, #(8 * 33)] + + /* Check if the SVC index is out of range. */ + mrs x8, esr_el1 + and x8, x8, #0xFF + cmp x8, #0x80 + b.ge 3f + + /* Check the specific SVC permission bit for allowal. */ + mov x9, sp + add x9, x9, x8, lsr#3 + ldrb w9, [x9, #0x120] + and x10, x8, #0x7 + lsr x10, x9, x10 + tst x10, #1 + b.eq 3f + + /* Check if our preemption state allows us to call SVCs. */ + mrs x10, tpidrro_el0 + ldrh w10, [x10, #0x100] + cbz w10, 1f + + /* It might not, so check the stack params to see if we must not allow the SVC. */ + ldrb w10, [sp, #(0x120 + 0x14)] + cbz w10, 3f + +1: /* We can call the SVC. */ + adr x10, _ZN3ams4kern3svc10SvcTable64E + ldr x11, [x10, x8, lsl#3] + cbz x11, 3f + + /* Note that we're calling the SVC. */ + mov w10, #1 + strb w10, [sp, #(0x120 + 0x12)] + strb w8, [sp, #(0x120 + 0x11)] + + /* Invoke the SVC handler. */ + mrs x18, tpidr_el1 + msr daifclr, #2 + blr x11 + msr daifset, #2 + +2: /* We completed the SVC, and we should handle DPC. */ + /* Check the dpc flags. */ + ldrb w8, [sp, #(0x120 + 0x10)] + cbz w8, 4f + + /* We have DPC to do! */ + /* Save registers and call ams::kern::KDpcManager::HandleDpc(). */ + sub sp, sp, #0x40 + stp x0, x1, [sp, #(8 * 0)] + stp x2, x3, [sp, #(8 * 2)] + stp x4, x5, [sp, #(8 * 4)] + stp x6, x7, [sp, #(8 * 6)] + bl _ZN3ams4kern11KDpcManager9HandleDpcEv + ldp x0, x1, [sp, #(8 * 0)] + ldp x2, x3, [sp, #(8 * 2)] + ldp x4, x5, [sp, #(8 * 4)] + ldp x6, x7, [sp, #(8 * 6)] + add sp, sp, #0x40 + b 2b + +3: /* Invalid SVC. */ + /* Setup the context to call into HandleException. */ + stp x0, x1, [sp, #(8 * 0)] + stp x2, x3, [sp, #(8 * 2)] + stp x4, x5, [sp, #(8 * 4)] + stp x6, x7, [sp, #(8 * 6)] + stp xzr, xzr, [sp, #(8 * 8)] + stp xzr, xzr, [sp, #(8 * 10)] + stp xzr, xzr, [sp, #(8 * 12)] + stp xzr, xzr, [sp, #(8 * 14)] + stp xzr, xzr, [sp, #(8 * 16)] + stp xzr, x19, [sp, #(8 * 18)] + stp x20, x21, [sp, #(8 * 20)] + stp x22, x23, [sp, #(8 * 22)] + stp x24, x25, [sp, #(8 * 24)] + stp x26, x27, [sp, #(8 * 26)] + stp x28, x29, [sp, #(8 * 28)] + + /* Call ams::kern::arch::arm64::HandleException(ams::kern::arch::arm64::KExceptionContext *) */ + mrs x18, tpidr_el1 + mov x0, sp + bl _ZN3ams4kern4arch5arm6415HandleExceptionEPNS2_17KExceptionContextE + + /* Restore registers. */ + ldp x30, x8, [sp, #(8 * 30)] + ldp x9, x10, [sp, #(8 * 32)] + ldr x11, [sp, #(8 * 34)] + msr sp_el0, x8 + msr elr_el1, x9 + msr spsr_el1, x10 + msr tpidr_el0, x11 + ldp x0, x1, [sp, #(8 * 0)] + ldp x2, x3, [sp, #(8 * 2)] + ldp x4, x5, [sp, #(8 * 4)] + ldp x6, x7, [sp, #(8 * 6)] + ldp x8, x9, [sp, #(8 * 8)] + ldp x10, x11, [sp, #(8 * 10)] + ldp x12, x13, [sp, #(8 * 12)] + ldp x14, x15, [sp, #(8 * 14)] + ldp x16, x17, [sp, #(8 * 16)] + ldp x18, x19, [sp, #(8 * 18)] + ldp x20, x21, [sp, #(8 * 20)] + ldp x22, x23, [sp, #(8 * 22)] + ldp x24, x25, [sp, #(8 * 24)] + ldp x26, x27, [sp, #(8 * 26)] + ldp x28, x29, [sp, #(8 * 28)] + + /* Return. */ + add sp, sp, #0x120 + eret + +4: /* Return from SVC. */ + /* Clear our in-SVC note. */ + strb wzr, [sp, #(0x120 + 0x12)] + + /* Restore registers. */ + ldp x30, x8, [sp, #(8 * 30)] + ldp x9, x10, [sp, #(8 * 32)] + ldr x11, [sp, #(8 * 34)] + msr sp_el0, x8 + msr elr_el1, x9 + msr spsr_el1, x10 + msr tpidr_el0, x11 + + /* Clear registers. */ + mov x8, xzr + mov x9, xzr + mov x10, xzr + mov x11, xzr + mov x12, xzr + mov x13, xzr + mov x14, xzr + mov x15, xzr + mov x16, xzr + mov x17, xzr + mov x18, xzr + + /* Return. */ + add sp, sp, #0x120 + eret + +/* ams::kern::arch::arm64::SvcHandler32() */ +.section .text._ZN3ams4kern4arch5arm6412SvcHandler32Ev, "ax", %progbits +.global _ZN3ams4kern4arch5arm6412SvcHandler32Ev +.type _ZN3ams4kern4arch5arm6412SvcHandler32Ev, %function +_ZN3ams4kern4arch5arm6412SvcHandler32Ev: + /* Ensure that our registers are 32-bit. */ + mov w0, w0 + mov w1, w1 + mov w2, w2 + mov w3, w3 + mov w4, w4 + mov w5, w5 + mov w6, w6 + mov w7, w7 + + /* Create a KExceptionContext for the exception. */ + sub sp, sp, #0x120 + + /* Save system registers */ + mrs x17, elr_el1 + mrs x20, spsr_el1 + mrs x19, tpidr_el0 + stp x17, x20, [sp, #(8 * 32)] + str x19, [sp, #(8 * 34)] + + /* Save registers. */ + stp x0, x1, [sp, #(8 * 0)] + stp x2, x3, [sp, #(8 * 2)] + stp x4, x5, [sp, #(8 * 4)] + stp x6, x7, [sp, #(8 * 6)] + stp x8, x9, [sp, #(8 * 8)] + stp x10, x11, [sp, #(8 * 10)] + stp x12, x13, [sp, #(8 * 12)] + stp x14, xzr, [sp, #(8 * 14)] + + /* Check if the SVC index is out of range. */ + mrs x16, esr_el1 + and x16, x16, #0xFF + cmp x16, #0x80 + b.ge 3f + + /* Check the specific SVC permission bit for allowal. */ + mov x20, sp + add x20, x20, x16, lsr#3 + ldrb w20, [x20, #0x120] + and x17, x16, #0x7 + lsr x17, x20, x17 + tst x17, #1 + b.eq 3f + + /* Check if our preemption state allows us to call SVCs. */ + mrs x15, tpidrro_el0 + ldrh w15, [x15, #0x100] + cbz w15, 1f + + /* It might not, so check the stack params to see if we must not allow the SVC. */ + ldrb w15, [sp, #(0x120 + 0x14)] + cbz w15, 3f + +1: /* We can call the SVC. */ + adr x15, _ZN3ams4kern3svc16SvcTable64From32E + ldr x19, [x15, x16, lsl#3] + cbz x19, 3f + + /* Note that we're calling the SVC. */ + mov w15, #1 + strb w15, [sp, #(0x120 + 0x12)] + strb w16, [sp, #(0x120 + 0x11)] + + /* Invoke the SVC handler. */ + mrs x18, tpidr_el1 + msr daifclr, #2 + blr x19 + msr daifset, #2 + +2: /* We completed the SVC, and we should handle DPC. */ + /* Check the dpc flags. */ + ldrb w16, [sp, #(0x120 + 0x10)] + cbz w16, 4f + + /* We have DPC to do! */ + /* Save registers and call ams::kern::KDpcManager::HandleDpc(). */ + sub sp, sp, #0x20 + stp w0, w1, [sp, #(4 * 0)] + stp w2, w3, [sp, #(4 * 2)] + stp w4, w5, [sp, #(4 * 4)] + stp w6, w7, [sp, #(4 * 6)] + bl _ZN3ams4kern11KDpcManager9HandleDpcEv + ldp w0, w1, [sp, #(4 * 0)] + ldp w2, w3, [sp, #(4 * 2)] + ldp w4, w5, [sp, #(4 * 4)] + ldp w6, w7, [sp, #(4 * 6)] + add sp, sp, #0x20 + b 2b + +3: /* Invalid SVC. */ + /* Setup the context to call into HandleException. */ + stp x0, x1, [sp, #(8 * 0)] + stp x2, x3, [sp, #(8 * 2)] + stp x4, x5, [sp, #(8 * 4)] + stp x6, x7, [sp, #(8 * 6)] + stp xzr, xzr, [sp, #(8 * 16)] + stp xzr, xzr, [sp, #(8 * 18)] + stp xzr, xzr, [sp, #(8 * 20)] + stp xzr, xzr, [sp, #(8 * 22)] + stp xzr, xzr, [sp, #(8 * 24)] + stp xzr, xzr, [sp, #(8 * 26)] + stp xzr, xzr, [sp, #(8 * 28)] + stp xzr, xzr, [sp, #(8 * 30)] + + /* Call ams::kern::arch::arm64::HandleException(ams::kern::arch::arm64::KExceptionContext *) */ + mrs x18, tpidr_el1 + mov x0, sp + bl _ZN3ams4kern4arch5arm6415HandleExceptionEPNS2_17KExceptionContextE + + /* Restore registers. */ + ldp x17, x20, [sp, #(8 * 32)] + ldr x19, [sp, #(8 * 34)] + msr elr_el1, x17 + msr spsr_el1, x20 + msr tpidr_el0, x19 + ldp x0, x1, [sp, #(8 * 0)] + ldp x2, x3, [sp, #(8 * 2)] + ldp x4, x5, [sp, #(8 * 4)] + ldp x6, x7, [sp, #(8 * 6)] + ldp x8, x9, [sp, #(8 * 8)] + ldp x10, x11, [sp, #(8 * 10)] + ldp x12, x13, [sp, #(8 * 12)] + ldp x14, x15, [sp, #(8 * 14)] + + /* Return. */ + add sp, sp, #0x120 + eret + +4: /* Return from SVC. */ + /* Clear our in-SVC note. */ + strb wzr, [sp, #(0x120 + 0x12)] + + /* Restore registers. */ + ldp x8, x9, [sp, #(8 * 8)] + ldp x10, x11, [sp, #(8 * 10)] + ldp x12, x13, [sp, #(8 * 12)] + ldp x14, xzr, [sp, #(8 * 14)] + ldp x17, x20, [sp, #(8 * 32)] + ldr x19, [sp, #(8 * 34)] + msr elr_el1, x17 + msr spsr_el1, x20 + msr tpidr_el0, x19 + + /* Return. */ + add sp, sp, #0x120 + eret diff --git a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp new file mode 100644 index 000000000..0455290e5 --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifdef MESOSPHERE_USE_STUBBED_SVC_TABLES +#include +#endif + +#include +#include + +namespace ams::kern::svc { + + namespace { + + #ifndef MESOSPHERE_USE_STUBBED_SVC_TABLES + #define DECLARE_SVC_STRUCT(ID, RETURN_TYPE, NAME, ...) \ + class NAME { \ + private: \ + using Impl = ::ams::svc::codegen::KernelSvcWrapper<::ams::kern::svc::NAME##64, ::ams::kern::svc::NAME##64From32>; \ + public: \ + static NOINLINE void Call64() { return Impl::Call64(); } \ + static NOINLINE void Call64From32() { return Impl::Call64From32(); } \ + }; + #else + #define DECLARE_SVC_STRUCT(ID, RETURN_TYPE, NAME, ...) \ + class NAME { \ + public: \ + static NOINLINE void Call64() { MESOSPHERE_PANIC("Stubbed Svc"#NAME"64 was called"); } \ + static NOINLINE void Call64From32() { MESOSPHERE_PANIC("Stubbed Svc"#NAME"64From32 was called"); } \ + }; + #endif + + + /* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */ + #pragma GCC push_options + #pragma GCC optimize ("omit-frame-pointer") + + AMS_SVC_FOREACH_KERN_DEFINITION(DECLARE_SVC_STRUCT, _) + + #pragma GCC pop_options + + } + + const std::array SvcTable64From32 = [] { + std::array table = {}; + + #define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \ + table[ID] = NAME::Call64From32; + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _) + #undef AMS_KERN_SVC_SET_TABLE_ENTRY + + return table; + }(); + + const std::array SvcTable64 = [] { + std::array table = {}; + + #define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \ + table[ID] = NAME::Call64; + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _) + #undef AMS_KERN_SVC_SET_TABLE_ENTRY + + return table; + }(); + +} diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp new file mode 100644 index 000000000..874d234f7 --- /dev/null +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp @@ -0,0 +1,407 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "kern_mc_registers.hpp" + +namespace ams::kern::board::nintendo::nx { + + namespace { + + /* Definitions. */ + constexpr size_t PageDirectorySize = KPageTableManager::PageTableSize; + constexpr size_t PageTableSize = KPageTableManager::PageTableSize; + static_assert(PageDirectorySize == PageSize); + + constexpr size_t AsidCount = 0x80; + constexpr size_t PhysicalAddressBits = 34; + constexpr size_t PhysicalAddressMask = (1ul << PhysicalAddressBits) - 1ul; + constexpr size_t DeviceVirtualAddressBits = 34; + constexpr size_t DeviceVirtualAddressMask = (1ul << DeviceVirtualAddressBits) - 1ul; + + constexpr size_t DevicePageBits = 12; + constexpr size_t DevicePageSize = (1ul << DevicePageBits); + constexpr size_t DeviceLargePageBits = 22; + constexpr size_t DeviceLargePageSize = (1ul << DevicePageBits); + static_assert(DevicePageSize == PageSize); + + constexpr size_t DeviceAsidRegisterOffsets[] = { + [ams::svc::DeviceName_Afi] = MC_SMMU_AFI_ASID, + [ams::svc::DeviceName_Avpc] = MC_SMMU_AVPC_ASID, + [ams::svc::DeviceName_Dc] = MC_SMMU_DC_ASID, + [ams::svc::DeviceName_Dcb] = MC_SMMU_DCB_ASID, + [ams::svc::DeviceName_Hc] = MC_SMMU_HC_ASID, + [ams::svc::DeviceName_Hda] = MC_SMMU_HDA_ASID, + [ams::svc::DeviceName_Isp2] = MC_SMMU_ISP2_ASID, + [ams::svc::DeviceName_MsencNvenc] = MC_SMMU_MSENC_NVENC_ASID, + [ams::svc::DeviceName_Nv] = MC_SMMU_NV_ASID, + [ams::svc::DeviceName_Nv2] = MC_SMMU_NV2_ASID, + [ams::svc::DeviceName_Ppcs] = MC_SMMU_PPCS_ASID, + [ams::svc::DeviceName_Sata] = MC_SMMU_SATA_ASID, + [ams::svc::DeviceName_Vi] = MC_SMMU_VI_ASID, + [ams::svc::DeviceName_Vic] = MC_SMMU_VIC_ASID, + [ams::svc::DeviceName_XusbHost] = MC_SMMU_XUSB_HOST_ASID, + [ams::svc::DeviceName_XusbDev] = MC_SMMU_XUSB_DEV_ASID, + [ams::svc::DeviceName_Tsec] = MC_SMMU_TSEC_ASID, + [ams::svc::DeviceName_Ppcs1] = MC_SMMU_PPCS1_ASID, + [ams::svc::DeviceName_Dc1] = MC_SMMU_DC1_ASID, + [ams::svc::DeviceName_Sdmmc1a] = MC_SMMU_SDMMC1A_ASID, + [ams::svc::DeviceName_Sdmmc2a] = MC_SMMU_SDMMC2A_ASID, + [ams::svc::DeviceName_Sdmmc3a] = MC_SMMU_SDMMC3A_ASID, + [ams::svc::DeviceName_Sdmmc4a] = MC_SMMU_SDMMC4A_ASID, + [ams::svc::DeviceName_Isp2b] = MC_SMMU_ISP2B_ASID, + [ams::svc::DeviceName_Gpu] = MC_SMMU_GPU_ASID, + [ams::svc::DeviceName_Gpub] = MC_SMMU_GPUB_ASID, + [ams::svc::DeviceName_Ppcs2] = MC_SMMU_PPCS2_ASID, + [ams::svc::DeviceName_Nvdec] = MC_SMMU_NVDEC_ASID, + [ams::svc::DeviceName_Ape] = MC_SMMU_APE_ASID, + [ams::svc::DeviceName_Se] = MC_SMMU_SE_ASID, + [ams::svc::DeviceName_Nvjpg] = MC_SMMU_NVJPG_ASID, + [ams::svc::DeviceName_Hc1] = MC_SMMU_HC1_ASID, + [ams::svc::DeviceName_Se1] = MC_SMMU_SE1_ASID, + [ams::svc::DeviceName_Axiap] = MC_SMMU_AXIAP_ASID, + [ams::svc::DeviceName_Etr] = MC_SMMU_ETR_ASID, + [ams::svc::DeviceName_Tsecb] = MC_SMMU_TSECB_ASID, + [ams::svc::DeviceName_Tsec1] = MC_SMMU_TSEC1_ASID, + [ams::svc::DeviceName_Tsecb1] = MC_SMMU_TSECB1_ASID, + [ams::svc::DeviceName_Nvdec1] = MC_SMMU_NVDEC1_ASID, + }; + static_assert(util::size(DeviceAsidRegisterOffsets) == ams::svc::DeviceName_Count); + constexpr bool DeviceAsidRegistersValid = [] { + for (size_t i = 0; i < ams::svc::DeviceName_Count; i++) { + if (DeviceAsidRegisterOffsets[i] == 0 || !util::IsAligned(DeviceAsidRegisterOffsets[i], sizeof(u32))) { + return false; + } + } + return true; + }(); + + static_assert(DeviceAsidRegistersValid); + + constexpr ALWAYS_INLINE int GetDeviceAsidRegisterOffset(ams::svc::DeviceName dev) { + if (dev < ams::svc::DeviceName_Count) { + return DeviceAsidRegisterOffsets[dev]; + } else { + return -1; + } + } + + constexpr ams::svc::DeviceName HsDevices[] = { + ams::svc::DeviceName_Afi, + ams::svc::DeviceName_Dc, + ams::svc::DeviceName_Dcb, + ams::svc::DeviceName_Hda, + ams::svc::DeviceName_Isp2, + ams::svc::DeviceName_Sata, + ams::svc::DeviceName_XusbHost, + ams::svc::DeviceName_XusbDev, + ams::svc::DeviceName_Tsec, + ams::svc::DeviceName_Dc1, + ams::svc::DeviceName_Sdmmc1a, + ams::svc::DeviceName_Sdmmc2a, + ams::svc::DeviceName_Sdmmc3a, + ams::svc::DeviceName_Sdmmc4a, + ams::svc::DeviceName_Isp2b, + ams::svc::DeviceName_Gpu, + ams::svc::DeviceName_Gpub, + ams::svc::DeviceName_Axiap, + ams::svc::DeviceName_Etr, + ams::svc::DeviceName_Tsecb, + ams::svc::DeviceName_Tsec1, + ams::svc::DeviceName_Tsecb1, + }; + constexpr size_t NumHsDevices = util::size(HsDevices); + + constexpr u64 HsDeviceMask = [] { + u64 mask = 0; + for (size_t i = 0; i < NumHsDevices; i++) { + mask |= 1ul << HsDevices[i]; + } + return mask; + }(); + + constexpr ALWAYS_INLINE bool IsHsSupported(ams::svc::DeviceName dv) { + return (HsDeviceMask & (1ul << dv)) != 0; + } + + constexpr ALWAYS_INLINE bool IsValidPhysicalAddress(KPhysicalAddress addr) { + return (static_cast(GetInteger(addr)) & ~PhysicalAddressMask) == 0; + } + + /* Types. */ + class EntryBase { + protected: + enum Bit : u32 { + Bit_Table = 28, + Bit_NonSecure = 29, + Bit_Writeable = 30, + Bit_Readable = 31, + }; + private: + u32 value; + protected: + constexpr ALWAYS_INLINE u32 SelectBit(Bit n) const { + return (this->value & (1u << n)); + } + + constexpr ALWAYS_INLINE bool GetBit(Bit n) const { + return this->SelectBit(n) != 0; + } + + static constexpr ALWAYS_INLINE u32 EncodeBit(Bit n, bool en) { + return en ? (1u << n) : 0; + } + + static constexpr ALWAYS_INLINE u32 EncodeValue(bool r, bool w, bool ns, KPhysicalAddress addr, bool t) { + return EncodeBit(Bit_Readable, r) | EncodeBit(Bit_Writeable, w) | EncodeBit(Bit_NonSecure, ns) | EncodeBit(Bit_Table, t) | static_cast(addr >> DevicePageBits); + } + + ALWAYS_INLINE void SetValue(u32 v) { + /* Prevent re-ordering around entry modifications. */ + __asm__ __volatile__("" ::: "memory"); + this->value = v; + __asm__ __volatile__("" ::: "memory"); + } + public: + static constexpr ALWAYS_INLINE u32 EncodePtbDataValue(KPhysicalAddress addr) { + return EncodeValue(true, true, true, addr, false); + } + public: + constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBit(Bit_NonSecure); } + constexpr ALWAYS_INLINE bool IsWriteable() const { return this->GetBit(Bit_Writeable); } + constexpr ALWAYS_INLINE bool IsReadable() const { return this->GetBit(Bit_Readable); } + constexpr ALWAYS_INLINE bool IsValid() const { return this->IsWriteable() || this->IsReadable(); } + + constexpr ALWAYS_INLINE u32 GetAttributes() const { return this->SelectBit(Bit_NonSecure) | this->SelectBit(Bit_Writeable) | this->SelectBit(Bit_Readable); } + + constexpr ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() { return (static_cast(this->value) << DevicePageBits) & PhysicalAddressMask; } + + ALWAYS_INLINE void Invalidate() { this->SetValue(0); } + }; + + class PageDirectoryEntry : public EntryBase { + public: + constexpr ALWAYS_INLINE bool IsTable() const { return this->GetBit(Bit_Table); } + + ALWAYS_INLINE void SetTable(bool r, bool w, bool ns, KPhysicalAddress addr) { + MESOSPHERE_ASSERT(IsValidPhysicalAddress(addr)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), DevicePageSize)); + this->SetValue(EncodeValue(r, w, ns, addr, true)); + } + + ALWAYS_INLINE void SetLargePage(bool r, bool w, bool ns, KPhysicalAddress addr) { + MESOSPHERE_ASSERT(IsValidPhysicalAddress(addr)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), DeviceLargePageSize)); + this->SetValue(EncodeValue(r, w, ns, addr, false)); + } + }; + + class PageTableEntry : public EntryBase { + public: + ALWAYS_INLINE void SetPage(bool r, bool w, bool ns, KPhysicalAddress addr) { + MESOSPHERE_ASSERT(IsValidPhysicalAddress(addr)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), DevicePageSize)); + this->SetValue(EncodeValue(r, w, ns, addr, true)); + } + }; + + class KDeviceAsidManager { + private: + using WordType = u32; + static constexpr u8 ReservedAsids[] = { 0, 1, 2, 3 }; + static constexpr size_t NumReservedAsids = util::size(ReservedAsids); + static constexpr size_t BitsPerWord = BITSIZEOF(WordType); + static constexpr size_t NumWords = AsidCount / BitsPerWord; + static constexpr WordType FullWord = ~WordType(0u); + private: + WordType state[NumWords]; + KLightLock lock; + private: + constexpr void ReserveImpl(u8 asid) { + this->state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord)); + } + + constexpr void ReleaseImpl(u8 asid) { + this->state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord)); + } + + static constexpr ALWAYS_INLINE WordType ClearLeadingZero(WordType value) { + return __builtin_clzll(value) - (BITSIZEOF(unsigned long long) - BITSIZEOF(WordType)); + } + public: + constexpr KDeviceAsidManager() : state(), lock() { + for (size_t i = 0; i < NumReservedAsids; i++) { + this->ReserveImpl(ReservedAsids[i]); + } + } + + Result Reserve(u8 *out, size_t num_desired) { + KScopedLightLock lk(this->lock); + MESOSPHERE_ASSERT(num_desired > 0); + + size_t num_reserved = 0; + for (size_t i = 0; i < NumWords; i++) { + while (this->state[i] != FullWord) { + const WordType clear_bit = (this->state[i] + 1) ^ (this->state[i]); + this->state[i] |= clear_bit; + out[num_reserved++] = static_cast(BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero(clear_bit)); + R_UNLESS(num_reserved != num_desired, ResultSuccess()); + } + } + + /* We failed, so free what we reserved. */ + for (size_t i = 0; i < num_reserved; i++) { + this->ReleaseImpl(out[i]); + } + return svc::ResultOutOfResource(); + } + + void Release(u8 asid) { + KScopedLightLock lk(this->lock); + this->ReleaseImpl(asid); + } + }; + + /* Globals. */ + KLightLock g_lock; + u8 g_reserved_asid; + KPhysicalAddress g_memory_controller_address; + KPhysicalAddress g_reserved_table_phys_addr; + KDeviceAsidManager g_asid_manager; + + /* Memory controller access functionality. */ + void WriteMcRegister(size_t offset, u32 value) { + KSystemControl::WriteRegisterPrivileged(GetInteger(g_memory_controller_address) + offset, value); + } + + u32 ReadMcRegister(size_t offset) { + return KSystemControl::ReadRegisterPrivileged(GetInteger(g_memory_controller_address) + offset); + } + + void SmmuSynchronizationBarrier() { + ReadMcRegister(MC_SMMU_CONFIG); + } + + void InvalidatePtc() { + WriteMcRegister(MC_SMMU_PTC_FLUSH_0, 0); + } + +/* + void InvalidatePtc(KPhysicalAddress address) { + WriteMcRegister(MC_SMMU_PTC_FLUSH_1, (static_cast(GetInteger(address)) >> 32)); + WriteMcRegister(MC_SMMU_PTC_FLUSH_0, (GetInteger(address) & 0xFFFFFFF0u) | 1u); + } +*/ + + enum TlbFlushVaMatch : u32 { + TlbFlushVaMatch_All = 0, + TlbFlushVaMatch_Section = 2, + TlbFlushVaMatch_Group = 3, + }; + + static constexpr ALWAYS_INLINE u32 EncodeTlbFlushValue(bool match_asid, u8 asid, KDeviceVirtualAddress address, TlbFlushVaMatch match) { + return ((match_asid ? 1u : 0u) << 31) | ((asid & 0x7F) << 24) | (((address & 0xFFC00000u) >> DevicePageBits)) | (match); + } + + void InvalidateTlb() { + return WriteMcRegister(MC_SMMU_TLB_FLUSH, EncodeTlbFlushValue(false, 0, 0, TlbFlushVaMatch_All)); + } + + void InvalidateTlb(u8 asid) { + return WriteMcRegister(MC_SMMU_TLB_FLUSH, EncodeTlbFlushValue(true, asid, 0, TlbFlushVaMatch_All)); + } + +/* + void InvalidateTlbSection(u8 asid, KDeviceVirtualAddress address) { + return WriteMcRegister(MC_SMMU_TLB_FLUSH, EncodeTlbFlushValue(true, asid, address, TlbFlushVaMatch_Section)); + } +*/ + + void SetTable(u8 asid, KPhysicalAddress address) { + /* Write the table address. */ + { + KScopedLightLock lk(g_lock); + + WriteMcRegister(MC_SMMU_PTB_ASID, asid); + WriteMcRegister(MC_SMMU_PTB_DATA, EntryBase::EncodePtbDataValue(address)); + + SmmuSynchronizationBarrier(); + } + + /* Ensure consistency. */ + InvalidatePtc(); + InvalidateTlb(asid); + SmmuSynchronizationBarrier(); + } + + } + + void KDevicePageTable::Initialize() { + /* Set the memory controller register address. */ + g_memory_controller_address = KMemoryLayout::GetMemoryControllerRegion().GetAddress(); + + /* Allocate a page to use as a reserved/no device table. */ + const KVirtualAddress table_virt_addr = Kernel::GetPageTableManager().Allocate(); + MESOSPHERE_ABORT_UNLESS(table_virt_addr != Null); + const KPhysicalAddress table_phys_addr = GetPageTablePhysicalAddress(table_virt_addr); + MESOSPHERE_ASSERT(IsValidPhysicalAddress(table_phys_addr)); + Kernel::GetPageTableManager().Open(table_virt_addr, 1); + + /* Clear the page and save it. */ + /* NOTE: Nintendo does not check the result of StoreDataCache. */ + cpu::ClearPageToZero(GetVoidPointer(table_virt_addr)); + cpu::StoreDataCache(GetVoidPointer(table_virt_addr), PageDirectorySize); + g_reserved_table_phys_addr = table_phys_addr; + + /* Reserve an asid to correspond to no device. */ + MESOSPHERE_R_ABORT_UNLESS(g_asid_manager.Reserve(std::addressof(g_reserved_asid), 1)); + + /* Set all asids to the reserved table. */ + static_assert(AsidCount <= std::numeric_limits::max()); + for (size_t i = 0; i < AsidCount; i++) { + SetTable(static_cast(i), g_reserved_table_phys_addr); + } + + /* Set all devices to the reserved asid. */ + for (size_t i = 0; i < ams::svc::DeviceName_Count; i++) { + u32 value = 0x80000000u; + if (IsHsSupported(static_cast(i))) { + for (size_t t = 0; t < TableCount; t++) { + value |= (g_reserved_asid << (BITSIZEOF(u8) * t)); + } + } else { + value |= g_reserved_asid; + } + + WriteMcRegister(GetDeviceAsidRegisterOffset(static_cast(i)), value); + SmmuSynchronizationBarrier(); + } + + /* Ensure consistency. */ + InvalidatePtc(); + InvalidateTlb(); + SmmuSynchronizationBarrier(); + + /* Clear int status. */ + WriteMcRegister(MC_INTSTATUS, ReadMcRegister(MC_INTSTATUS)); + + /* Enable the SMMU */ + WriteMcRegister(MC_SMMU_CONFIG, 1); + SmmuSynchronizationBarrier(); + + /* TODO: Install interrupt handler. */ + } + +} diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp new file mode 100644 index 000000000..3846b1752 --- /dev/null +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "kern_k_sleep_manager.hpp" +#include "kern_secure_monitor.hpp" + +namespace ams::kern::board::nintendo::nx { + + namespace { + + /* Struct representing registers saved on wake/sleep. */ + class SavedSystemRegisters { + private: + u64 ttbr0_el1; + u64 tcr_el1; + u64 elr_el1; + u64 sp_el0; + u64 spsr_el1; + u64 daif; + u64 cpacr_el1; + u64 vbar_el1; + u64 csselr_el1; + u64 cntp_ctl_el0; + u64 cntp_cval_el0; + u64 cntkctl_el1; + u64 tpidr_el0; + u64 tpidrro_el0; + u64 mdscr_el1; + u64 contextidr_el1; + u64 dbgwcrN_el1[16]; + u64 dbgwvrN_el1[16]; + u64 dbgbcrN_el1[16]; + u64 dbgbvrN_el1[16]; + u64 pmccfiltr_el0; + u64 pmccntr_el0; + u64 pmcntenset_el0; + u64 pmcr_el0; + u64 pmevcntrN_el0[31]; + u64 pmevtyperN_el0[31]; + u64 pmcntenset_el1; + u64 pmovsset_el0; + u64 pmselr_el0; + u64 pmuserenr_el0; + public: + void Save(); + void Restore() const; + }; + + constexpr s32 SleepManagerThreadPriority = 2; + + /* Globals for sleep/wake. */ + u64 g_sleep_target_cores; + KLightLock g_request_lock; + KLightLock g_cv_lock; + KLightConditionVariable g_cv; + KPhysicalAddress g_sleep_buffer_phys_addrs[cpu::NumCores]; + alignas(16) u64 g_sleep_buffers[cpu::NumCores][1_KB / sizeof(u64)]; + SavedSystemRegisters g_sleep_system_registers[cpu::NumCores] = {}; + + } + + void KSleepManager::Initialize() { + /* Create a sleep manager thread for each core. */ + for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) { + /* Reserve a thread from the system limit. */ + MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1)); + + /* Create a new thread. */ + KThread *new_thread = KThread::Create(); + MESOSPHERE_ABORT_UNLESS(new_thread != nullptr); + + /* Launch the new thread. */ + MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, KSleepManager::ProcessRequests, reinterpret_cast(g_sleep_buffers[core_id]), SleepManagerThreadPriority, static_cast(core_id))); + + /* Register the new thread. */ + KThread::Register(new_thread); + + /* Run the thread. */ + new_thread->Run(); + } + } + + void KSleepManager::SleepSystem() { + /* Ensure device mappings are not modified during sleep. */ + MESOSPHERE_TODO("KDevicePageTable::Lock();"); + ON_SCOPE_EXIT { MESOSPHERE_TODO("KDevicePageTable::Unlock();"); }; + + /* Request that the system sleep. */ + { + KScopedLightLock lk(g_request_lock); + + /* Signal the manager to sleep on all cores. */ + { + KScopedLightLock lk(g_cv_lock); + MESOSPHERE_ABORT_UNLESS(g_sleep_target_cores == 0); + + g_sleep_target_cores = (1ul << (cpu::NumCores - 1)); + g_cv.Broadcast(); + + while (g_sleep_target_cores != 0) { + g_cv.Wait(std::addressof(g_cv_lock)); + } + } + } + } + + void KSleepManager::ProcessRequests(uintptr_t buffer) { + const s32 core_id = GetCurrentCoreId(); + KPhysicalAddress resume_entry_phys_addr = Null; + + /* Get the physical addresses we'll need. */ + { + MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(g_sleep_buffer_phys_addrs[core_id]), KProcessAddress(buffer))); + MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(resume_entry_phys_addr), KProcessAddress(&::ams::kern::board::nintendo::nx::KSleepManager::ResumeEntry))); + + } + const KPhysicalAddress sleep_buffer_phys_addr = g_sleep_buffer_phys_addrs[core_id]; + const u64 target_core_mask = (1ul << core_id); + + /* Loop, processing sleep when requested. */ + while (true) { + /* Wait for a request. */ + { + KScopedLightLock lk(g_cv_lock); + while (!(g_sleep_target_cores & target_core_mask)) { + g_cv.Wait(std::addressof(g_cv_lock)); + } + } + + MESOSPHERE_TODO("Implement Sleep/Wake"); + (void)(g_sleep_system_registers[core_id]); + (void)(sleep_buffer_phys_addr); + + /* Signal request completed. */ + { + KScopedLightLock lk(g_cv_lock); + g_sleep_target_cores &= ~target_core_mask; + if (g_sleep_target_cores == 0) { + g_cv.Broadcast(); + } + } + } + } + +} diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.hpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.hpp new file mode 100644 index 000000000..1cc500c4d --- /dev/null +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern::board::nintendo::nx { + + class KSleepManager { + private: + static void CpuSleepHandler(uintptr_t arg, uintptr_t entry); + static void ResumeEntry(uintptr_t arg); + + static void ProcessRequests(uintptr_t buffer); + public: + static void Initialize(); + static void SleepSystem(); + }; + + +} \ No newline at end of file diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager_asm.s b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager_asm.s new file mode 100644 index 000000000..1c4a014cd --- /dev/null +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager_asm.s @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* ams::kern::board::nintendo::nx::KSleepManager::ResumeEntry(uintptr_t arg) */ +.section .text._ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm, "ax", %progbits +.global _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm +.type _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm, %function +_ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm: + /* TODO: Implement a real function here. */ + brk 1000 \ No newline at end of file diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp new file mode 100644 index 000000000..790022ffc --- /dev/null +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "kern_secure_monitor.hpp" +#include "kern_k_sleep_manager.hpp" + +namespace ams::kern::board::nintendo::nx { + + namespace { + + /* Global variables for panic. */ + bool g_call_smc_on_panic; + + /* Global variables for secure memory. */ + constexpr size_t SecureAppletReservedMemorySize = 4_MB; + KVirtualAddress g_secure_applet_memory_address; + + + /* Global variables for randomness. */ + /* Nintendo uses std::mt19937_t for randomness. */ + /* To save space (and because mt19337_t isn't secure anyway), */ + /* We will use TinyMT. */ + bool g_initialized_random_generator; + util::TinyMT g_random_generator; + KSpinLock g_random_lock; + + ALWAYS_INLINE size_t GetRealMemorySizeForInit() { + /* TODO: Move this into a header for the MC in general. */ + constexpr u32 MemoryControllerConfigurationRegister = 0x70019050; + u32 config_value; + MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0)); + return static_cast(config_value & 0x3FFF) << 20; + } + + ALWAYS_INLINE util::BitPack32 GetKernelConfigurationForInit() { + u64 value = 0; + smc::init::GetConfig(&value, 1, smc::ConfigItem::KernelConfiguration); + return util::BitPack32{static_cast(value)}; + } + + ALWAYS_INLINE u32 GetMemoryModeForInit() { + u64 value = 0; + smc::init::GetConfig(&value, 1, smc::ConfigItem::MemoryMode); + return static_cast(value); + } + + ALWAYS_INLINE smc::MemoryArrangement GetMemoryArrangeForInit() { + switch(GetMemoryModeForInit() & 0x3F) { + case 0x01: + default: + return smc::MemoryArrangement_4GB; + case 0x02: + return smc::MemoryArrangement_4GBForAppletDev; + case 0x03: + return smc::MemoryArrangement_4GBForSystemDev; + case 0x11: + return smc::MemoryArrangement_6GB; + case 0x12: + return smc::MemoryArrangement_6GBForAppletDev; + case 0x21: + return smc::MemoryArrangement_8GB; + } + } + + ALWAYS_INLINE u64 GenerateRandomU64ForInit() { + u64 value; + smc::init::GenerateRandomBytes(&value, sizeof(value)); + return value; + } + + ALWAYS_INLINE u64 GenerateRandomU64FromGenerator() { + return g_random_generator.GenerateRandomU64(); + } + + template + ALWAYS_INLINE u64 GenerateUniformRange(u64 min, u64 max, F f) { + /* Handle the case where the difference is too large to represent. */ + if (max == std::numeric_limits::max() && min == std::numeric_limits::min()) { + return f(); + } + + /* Iterate until we get a value in range. */ + const u64 range_size = ((max + 1) - min); + const u64 effective_max = (std::numeric_limits::max() / range_size) * range_size; + while (true) { + if (const u64 rnd = f(); rnd < effective_max) { + return min + (rnd % range_size); + } + } + } + + ALWAYS_INLINE u64 GetConfigU64(smc::ConfigItem which) { + u64 value; + smc::GetConfig(&value, 1, which); + return value; + } + + ALWAYS_INLINE u32 GetConfigU32(smc::ConfigItem which) { + return static_cast(GetConfigU64(which)); + } + + ALWAYS_INLINE bool GetConfigBool(smc::ConfigItem which) { + return GetConfigU64(which) != 0; + } + + bool IsRegisterAccessibleToPrivileged(ams::svc::PhysicalAddress address) { + if (!KMemoryLayout::GetMemoryControllerRegion().Contains(address)) { + return false; + } + /* TODO: Validate specific offsets. */ + return true; + } + + } + + /* Initialization. */ + size_t KSystemControl::Init::GetIntendedMemorySize() { + switch (GetKernelConfigurationForInit().Get()) { + case smc::MemorySize_4GB: + default: /* All invalid modes should go to 4GB. */ + return 4_GB; + case smc::MemorySize_6GB: + return 6_GB; + case smc::MemorySize_8GB: + return 8_GB; + } + } + + KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(uintptr_t base_address) { + const size_t real_dram_size = GetRealMemorySizeForInit(); + const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize(); + if (intended_dram_size * 2 < real_dram_size) { + return base_address; + } else { + return base_address + ((real_dram_size - intended_dram_size) / 2); + } + } + + bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() { + return GetKernelConfigurationForInit().Get(); + } + + size_t KSystemControl::Init::GetApplicationPoolSize() { + switch (GetMemoryArrangeForInit()) { + case smc::MemoryArrangement_4GB: + default: + return 3285_MB; + case smc::MemoryArrangement_4GBForAppletDev: + return 2048_MB; + case smc::MemoryArrangement_4GBForSystemDev: + return 3285_MB; + case smc::MemoryArrangement_6GB: + return 4916_MB; + case smc::MemoryArrangement_6GBForAppletDev: + return 3285_MB; + case smc::MemoryArrangement_8GB: + return 4916_MB; + } + } + + size_t KSystemControl::Init::GetAppletPoolSize() { + switch (GetMemoryArrangeForInit()) { + case smc::MemoryArrangement_4GB: + default: + return 507_MB; + case smc::MemoryArrangement_4GBForAppletDev: + return 1554_MB; + case smc::MemoryArrangement_4GBForSystemDev: + return 448_MB; + case smc::MemoryArrangement_6GB: + return 562_MB; + case smc::MemoryArrangement_6GBForAppletDev: + return 2193_MB; + case smc::MemoryArrangement_8GB: + return 2193_MB; + } + } + + size_t KSystemControl::Init::GetMinimumNonSecureSystemPoolSize() { + /* TODO: Where does this constant actually come from? */ + return 0x29C8000; + } + + void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { + smc::init::CpuOn(core_id, entrypoint, arg); + } + + /* Randomness for Initialization. */ + void KSystemControl::Init::GenerateRandomBytes(void *dst, size_t size) { + MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38); + smc::init::GenerateRandomBytes(dst, size); + } + + u64 KSystemControl::Init::GenerateRandomRange(u64 min, u64 max) { + return GenerateUniformRange(min, max, GenerateRandomU64ForInit); + } + + /* System Initialization. */ + void KSystemControl::InitializePhase1() { + /* Set IsDebugMode. */ + { + KTargetSystem::SetIsDebugMode(GetConfigBool(smc::ConfigItem::IsDebugMode)); + + /* If debug mode, we want to initialize uart logging. */ + KTargetSystem::EnableDebugLogging(KTargetSystem::IsDebugMode()); + KDebugLog::Initialize(); + } + + /* Set Kernel Configuration. */ + { + const auto kernel_config = util::BitPack32{GetConfigU32(smc::ConfigItem::KernelConfiguration)}; + + KTargetSystem::EnableDebugMemoryFill(kernel_config.Get()); + KTargetSystem::EnableUserExceptionHandlers(kernel_config.Get()); + KTargetSystem::EnableUserPmuAccess(kernel_config.Get()); + + g_call_smc_on_panic = kernel_config.Get(); + } + + /* Set Kernel Debugging. */ + { + /* NOTE: This is used to restrict access to SvcKernelDebug/SvcChangeKernelTraceState. */ + /* Mesosphere may wish to not require this, as we'd ideally keep ProgramVerification enabled for userland. */ + KTargetSystem::EnableKernelDebugging(GetConfigBool(smc::ConfigItem::DisableProgramVerification)); + } + + /* Configure the Kernel Carveout region. */ + { + const auto carveout = KMemoryLayout::GetCarveoutRegionExtents(); + smc::ConfigureCarveout(0, carveout.GetAddress(), carveout.GetSize()); + } + + /* System ResourceLimit initialization. */ + { + /* Construct the resource limit object. */ + KResourceLimit &sys_res_limit = Kernel::GetSystemResourceLimit(); + KAutoObject::Create(std::addressof(sys_res_limit)); + sys_res_limit.Initialize(); + + /* Set the initial limits. */ + const auto [total_memory_size, kernel_memory_size] = KMemoryLayout::GetTotalAndKernelMemorySizes(); + const auto &slab_counts = init::GetSlabResourceCounts(); + MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_PhysicalMemoryMax, total_memory_size)); + MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_ThreadCountMax, slab_counts.num_KThread)); + MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_EventCountMax, slab_counts.num_KEvent)); + MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_TransferMemoryCountMax, slab_counts.num_KTransferMemory)); + MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_SessionCountMax, slab_counts.num_KSession)); + + /* Reserve system memory. */ + MESOSPHERE_ABORT_UNLESS(sys_res_limit.Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, kernel_memory_size)); + } + } + + void KSystemControl::InitializePhase2() { + /* Initialize the sleep manager. */ + KSleepManager::Initialize(); + + /* Reserve secure applet memory. */ + { + MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address == Null); + MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, SecureAppletReservedMemorySize)); + + constexpr auto SecureAppletAllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); + g_secure_applet_memory_address = Kernel::GetMemoryManager().AllocateContinuous(SecureAppletReservedMemorySize / PageSize, 1, SecureAppletAllocateOption); + MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address != Null); + } + } + + u32 KSystemControl::GetInitialProcessBinaryPool() { + return KMemoryManager::Pool_Application; + } + + /* Privileged Access. */ + void KSystemControl::ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) { + MESOSPHERE_ABORT_UNLESS(util::IsAligned(address, sizeof(u32))); + MESOSPHERE_ABORT_UNLESS(IsRegisterAccessibleToPrivileged(address)); + MESOSPHERE_ABORT_UNLESS(smc::ReadWriteRegister(out, address, mask, value)); + } + + void KSystemControl::ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) { + MESOSPHERE_UNIMPLEMENTED(); + } + + /* Randomness. */ + void KSystemControl::GenerateRandomBytes(void *dst, size_t size) { + MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38); + smc::GenerateRandomBytes(dst, size); + } + + u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) { + KScopedInterruptDisable intr_disable; + KScopedSpinLock lk(g_random_lock); + + if (AMS_UNLIKELY(!g_initialized_random_generator)) { + u64 seed; + GenerateRandomBytes(&seed, sizeof(seed)); + g_random_generator.Initialize(reinterpret_cast(&seed), sizeof(seed) / sizeof(u32)); + g_initialized_random_generator = true; + } + + return GenerateUniformRange(min, max, GenerateRandomU64FromGenerator); + } + + void KSystemControl::SleepSystem() { + MESOSPHERE_LOG("SleepSystem() was called\n"); + KSleepManager::SleepSystem(); + } + + void KSystemControl::StopSystem() { + if (g_call_smc_on_panic) { + /* Display a panic screen via secure monitor. */ + smc::Panic(0xF00); + } + while (true) { /* ... */ } + } + +} \ No newline at end of file diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_mc_registers.hpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_mc_registers.hpp new file mode 100644 index 000000000..edd925ee2 --- /dev/null +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_mc_registers.hpp @@ -0,0 +1,526 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +#define MC_INTSTATUS 0x0 +#define MC_INTMASK 0x4 +#define MC_ERR_STATUS 0x8 +#define MC_ERR_ADR 0xc +#define MC_SMMU_CONFIG 0x10 +#define MC_SMMU_TLB_CONFIG 0x14 +#define MC_SMMU_PTC_CONFIG 0x18 +#define MC_SMMU_PTB_ASID 0x1c +#define MC_SMMU_PTB_DATA 0x20 +#define MC_SMMU_TLB_FLUSH 0x30 +#define MC_SMMU_PTC_FLUSH_0 0x34 +#define MC_SMMU_PTC_FLUSH_1 0x9b8 +#define MC_SMMU_ASID_SECURITY 0x38 +#define MC_SMMU_ASID_SECURITY_1 0x3c +#define MC_SMMU_ASID_SECURITY_2 0x9e0 +#define MC_SMMU_ASID_SECURITY_3 0x9e4 +#define MC_SMMU_ASID_SECURITY_4 0x9e8 +#define MC_SMMU_ASID_SECURITY_5 0x9ec +#define MC_SMMU_ASID_SECURITY_6 0x9f0 +#define MC_SMMU_ASID_SECURITY_7 0x9f4 +#define MC_SMMU_AFI_ASID 0x238 +#define MC_SMMU_AVPC_ASID 0x23c +#define MC_SMMU_DC_ASID 0x240 +#define MC_SMMU_DCB_ASID 0x244 +#define MC_SMMU_HC_ASID 0x250 +#define MC_SMMU_HDA_ASID 0x254 +#define MC_SMMU_ISP2_ASID 0x258 +#define MC_SMMU_MSENC_NVENC_ASID 0x264 +#define MC_SMMU_NV_ASID 0x268 +#define MC_SMMU_NV2_ASID 0x26c +#define MC_SMMU_PPCS_ASID 0x270 +#define MC_SMMU_SATA_ASID 0x274 +#define MC_SMMU_VDE_ASID 0x27c +#define MC_SMMU_VI_ASID 0x280 +#define MC_SMMU_VIC_ASID 0x284 +#define MC_SMMU_XUSB_HOST_ASID 0x288 +#define MC_SMMU_XUSB_DEV_ASID 0x28c +#define MC_SMMU_TSEC_ASID 0x294 +#define MC_SMMU_PPCS1_ASID 0x298 +#define MC_SMMU_DC1_ASID 0xa88 +#define MC_SMMU_SDMMC1A_ASID 0xa94 +#define MC_SMMU_SDMMC2A_ASID 0xa98 +#define MC_SMMU_SDMMC3A_ASID 0xa9c +#define MC_SMMU_SDMMC4A_ASID 0xaa0 +#define MC_SMMU_ISP2B_ASID 0xaa4 +#define MC_SMMU_GPU_ASID 0xaa8 +#define MC_SMMU_GPUB_ASID 0xaac +#define MC_SMMU_PPCS2_ASID 0xab0 +#define MC_SMMU_NVDEC_ASID 0xab4 +#define MC_SMMU_APE_ASID 0xab8 +#define MC_SMMU_SE_ASID 0xabc +#define MC_SMMU_NVJPG_ASID 0xac0 +#define MC_SMMU_HC1_ASID 0xac4 +#define MC_SMMU_SE1_ASID 0xac8 +#define MC_SMMU_AXIAP_ASID 0xacc +#define MC_SMMU_ETR_ASID 0xad0 +#define MC_SMMU_TSECB_ASID 0xad4 +#define MC_SMMU_TSEC1_ASID 0xad8 +#define MC_SMMU_TSECB1_ASID 0xadc +#define MC_SMMU_NVDEC1_ASID 0xae0 +#define MC_SMMU_TRANSLATION_ENABLE_0 0x228 +#define MC_SMMU_TRANSLATION_ENABLE_1 0x22c +#define MC_SMMU_TRANSLATION_ENABLE_2 0x230 +#define MC_SMMU_TRANSLATION_ENABLE_3 0x234 +#define MC_SMMU_TRANSLATION_ENABLE_4 0xb98 +#define MC_PCFIFO_CLIENT_CONFIG0 0xdd0 +#define MC_PCFIFO_CLIENT_CONFIG1 0xdd4 +#define MC_PCFIFO_CLIENT_CONFIG2 0xdd8 +#define MC_PCFIFO_CLIENT_CONFIG3 0xddc +#define MC_PCFIFO_CLIENT_CONFIG4 0xde0 +#define MC_EMEM_CFG 0x50 +#define MC_EMEM_ADR_CFG 0x54 +#define MC_EMEM_ADR_CFG_DEV0 0x58 +#define MC_EMEM_ADR_CFG_DEV1 0x5c +#define MC_EMEM_ADR_CFG_CHANNEL_MASK 0x60 +#define MC_EMEM_ADR_CFG_BANK_MASK_0 0x64 +#define MC_EMEM_ADR_CFG_BANK_MASK_1 0x68 +#define MC_EMEM_ADR_CFG_BANK_MASK_2 0x6c +#define MC_SECURITY_CFG0 0x70 +#define MC_SECURITY_CFG1 0x74 +#define MC_SECURITY_CFG3 0x9bc +#define MC_SECURITY_RSV 0x7c +#define MC_EMEM_ARB_CFG 0x90 +#define MC_EMEM_ARB_OUTSTANDING_REQ 0x94 +#define MC_EMEM_ARB_TIMING_RCD 0x98 +#define MC_EMEM_ARB_TIMING_RP 0x9c +#define MC_EMEM_ARB_TIMING_RC 0xa0 +#define MC_EMEM_ARB_TIMING_RAS 0xa4 +#define MC_EMEM_ARB_TIMING_FAW 0xa8 +#define MC_EMEM_ARB_TIMING_RRD 0xac +#define MC_EMEM_ARB_TIMING_RAP2PRE 0xb0 +#define MC_EMEM_ARB_TIMING_WAP2PRE 0xb4 +#define MC_EMEM_ARB_TIMING_R2R 0xb8 +#define MC_EMEM_ARB_TIMING_W2W 0xbc +#define MC_EMEM_ARB_TIMING_R2W 0xc0 +#define MC_EMEM_ARB_TIMING_W2R 0xc4 +#define MC_EMEM_ARB_TIMING_RFCPB 0x6c0 +#define MC_EMEM_ARB_TIMING_CCDMW 0x6c4 +#define MC_EMEM_ARB_REFPB_HP_CTRL 0x6f0 +#define MC_EMEM_ARB_REFPB_BANK_CTRL 0x6f4 +#define MC_EMEM_ARB_DA_TURNS 0xd0 +#define MC_EMEM_ARB_DA_COVERS 0xd4 +#define MC_EMEM_ARB_MISC0 0xd8 +#define MC_EMEM_ARB_MISC1 0xdc +#define MC_EMEM_ARB_MISC2 0xc8 +#define MC_EMEM_ARB_RING1_THROTTLE 0xe0 +#define MC_EMEM_ARB_RING3_THROTTLE 0xe4 +#define MC_EMEM_ARB_NISO_THROTTLE 0x6b0 +#define MC_EMEM_ARB_OVERRIDE 0xe8 +#define MC_EMEM_ARB_RSV 0xec +#define MC_CLKEN_OVERRIDE 0xf4 +#define MC_TIMING_CONTROL_DBG 0xf8 +#define MC_TIMING_CONTROL 0xfc +#define MC_STAT_CONTROL 0x100 +#define MC_STAT_STATUS 0x104 +#define MC_STAT_EMC_CLOCK_LIMIT 0x108 +#define MC_STAT_EMC_CLOCK_LIMIT_MSBS 0x10c +#define MC_STAT_EMC_CLOCKS 0x110 +#define MC_STAT_EMC_CLOCKS_MSBS 0x114 +#define MC_STAT_EMC_FILTER_SET0_ADR_LIMIT_LO 0x118 +#define MC_STAT_EMC_FILTER_SET1_ADR_LIMIT_LO 0x158 +#define MC_STAT_EMC_FILTER_SET0_ADR_LIMIT_HI 0x11c +#define MC_STAT_EMC_FILTER_SET1_ADR_LIMIT_HI 0x15c +#define MC_STAT_EMC_FILTER_SET0_ADR_LIMIT_UPPER 0xa20 +#define MC_STAT_EMC_FILTER_SET1_ADR_LIMIT_UPPER 0xa24 +#define MC_STAT_EMC_FILTER_SET0_VIRTUAL_ADR_LIMIT_LO 0x198 +#define MC_STAT_EMC_FILTER_SET1_VIRTUAL_ADR_LIMIT_LO 0x1a8 +#define MC_STAT_EMC_FILTER_SET0_VIRTUAL_ADR_LIMIT_HI 0x19c +#define MC_STAT_EMC_FILTER_SET1_VIRTUAL_ADR_LIMIT_HI 0x1ac +#define MC_STAT_EMC_FILTER_SET0_VIRTUAL_ADR_LIMIT_UPPER 0xa28 +#define MC_STAT_EMC_FILTER_SET1_VIRTUAL_ADR_LIMIT_UPPER 0xa2c +#define MC_STAT_EMC_FILTER_SET0_ASID 0x1a0 +#define MC_STAT_EMC_FILTER_SET1_ASID 0x1b0 +#define MC_STAT_EMC_FILTER_SET0_SLACK_LIMIT 0x120 +#define MC_STAT_EMC_FILTER_SET1_SLACK_LIMIT 0x160 +#define MC_STAT_EMC_FILTER_SET0_CLIENT_0 0x128 +#define MC_STAT_EMC_FILTER_SET1_CLIENT_0 0x168 +#define MC_STAT_EMC_FILTER_SET0_CLIENT_1 0x12c +#define MC_STAT_EMC_FILTER_SET1_CLIENT_1 0x16c +#define MC_STAT_EMC_FILTER_SET0_CLIENT_2 0x130 +#define MC_STAT_EMC_FILTER_SET1_CLIENT_2 0x170 +#define MC_STAT_EMC_FILTER_SET0_CLIENT_3 0x134 +#define MC_STAT_EMC_FILTER_SET0_CLIENT_4 0xb88 +#define MC_STAT_EMC_FILTER_SET1_CLIENT_3 0x174 +#define MC_STAT_EMC_FILTER_SET1_CLIENT_4 0xb8c +#define MC_STAT_EMC_SET0_COUNT 0x138 +#define MC_STAT_EMC_SET0_COUNT_MSBS 0x13c +#define MC_STAT_EMC_SET1_COUNT 0x178 +#define MC_STAT_EMC_SET1_COUNT_MSBS 0x17c +#define MC_STAT_EMC_SET0_SLACK_ACCUM 0x140 +#define MC_STAT_EMC_SET0_SLACK_ACCUM_MSBS 0x144 +#define MC_STAT_EMC_SET1_SLACK_ACCUM 0x180 +#define MC_STAT_EMC_SET1_SLACK_ACCUM_MSBS 0x184 +#define MC_STAT_EMC_SET0_HISTO_COUNT 0x148 +#define MC_STAT_EMC_SET0_HISTO_COUNT_MSBS 0x14c +#define MC_STAT_EMC_SET1_HISTO_COUNT 0x188 +#define MC_STAT_EMC_SET1_HISTO_COUNT_MSBS 0x18c +#define MC_STAT_EMC_SET0_MINIMUM_SLACK_OBSERVED 0x150 +#define MC_STAT_EMC_SET1_MINIMUM_SLACK_OBSERVED 0x190 +#define MC_STAT_EMC_SET0_IDLE_CYCLE_COUNT 0x1b8 +#define MC_STAT_EMC_SET0_IDLE_CYCL_COUNT_MSBS 0x1bc +#define MC_STAT_EMC_SET1_IDLE_CYCLE_COUNT 0x1c8 +#define MC_STAT_EMC_SET1_IDLE_CYCL_COUNT_MSBS 0x1cc +#define MC_STAT_EMC_SET0_IDLE_CYCLE_PARTITION_SELECT 0x1c0 +#define MC_STAT_EMC_SET1_IDLE_CYCLE_PARTITION_SELECT 0x1d0 +#define MC_CLIENT_HOTRESET_CTRL 0x200 +#define MC_CLIENT_HOTRESET_CTRL_1 0x970 +#define MC_CLIENT_HOTRESET_STATUS 0x204 +#define MC_CLIENT_HOTRESET_STATUS_1 0x974 +#define MC_EMEM_ARB_ISOCHRONOUS_0 0x208 +#define MC_EMEM_ARB_ISOCHRONOUS_1 0x20c +#define MC_EMEM_ARB_ISOCHRONOUS_2 0x210 +#define MC_EMEM_ARB_ISOCHRONOUS_3 0x214 +#define MC_EMEM_ARB_ISOCHRONOUS_4 0xb94 +#define MC_EMEM_ARB_HYSTERESIS_0 0x218 +#define MC_EMEM_ARB_HYSTERESIS_1 0x21c +#define MC_EMEM_ARB_HYSTERESIS_2 0x220 +#define MC_EMEM_ARB_HYSTERESIS_3 0x224 +#define MC_EMEM_ARB_HYSTERESIS_4 0xb84 +#define MC_EMEM_ARB_DHYSTERESIS_0 0xbb0 +#define MC_EMEM_ARB_DHYSTERESIS_1 0xbb4 +#define MC_EMEM_ARB_DHYSTERESIS_2 0xbb8 +#define MC_EMEM_ARB_DHYSTERESIS_3 0xbbc +#define MC_EMEM_ARB_DHYSTERESIS_4 0xbc0 +#define MC_EMEM_ARB_DHYST_CTRL 0xbcc +#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_0 0xbd0 +#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_1 0xbd4 +#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_2 0xbd8 +#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_3 0xbdc +#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_4 0xbe0 +#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_5 0xbe4 +#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_6 0xbe8 +#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_7 0xbec +#define MC_RESERVED_RSV 0x3fc +#define MC_DISB_EXTRA_SNAP_LEVELS 0x408 +#define MC_APB_EXTRA_SNAP_LEVELS 0x2a4 +#define MC_AHB_EXTRA_SNAP_LEVELS 0x2a0 +#define MC_USBD_EXTRA_SNAP_LEVELS 0xa18 +#define MC_ISP_EXTRA_SNAP_LEVELS 0xa08 +#define MC_AUD_EXTRA_SNAP_LEVELS 0xa10 +#define MC_MSE_EXTRA_SNAP_LEVELS 0x40c +#define MC_GK2_EXTRA_SNAP_LEVELS 0xa40 +#define MC_A9AVPPC_EXTRA_SNAP_LEVELS 0x414 +#define MC_FTOP_EXTRA_SNAP_LEVELS 0x2bc +#define MC_JPG_EXTRA_SNAP_LEVELS 0xa3c +#define MC_HOST_EXTRA_SNAP_LEVELS 0xa14 +#define MC_SAX_EXTRA_SNAP_LEVELS 0x2c0 +#define MC_DIS_EXTRA_SNAP_LEVELS 0x2ac +#define MC_VICPC_EXTRA_SNAP_LEVELS 0xa1c +#define MC_HDAPC_EXTRA_SNAP_LEVELS 0xa48 +#define MC_AVP_EXTRA_SNAP_LEVELS 0x2a8 +#define MC_USBX_EXTRA_SNAP_LEVELS 0x404 +#define MC_PCX_EXTRA_SNAP_LEVELS 0x2b8 +#define MC_SD_EXTRA_SNAP_LEVELS 0xa04 +#define MC_DFD_EXTRA_SNAP_LEVELS 0xa4c +#define MC_VE_EXTRA_SNAP_LEVELS 0x2d8 +#define MC_GK_EXTRA_SNAP_LEVELS 0xa00 +#define MC_VE2_EXTRA_SNAP_LEVELS 0x410 +#define MC_SDM_EXTRA_SNAP_LEVELS 0xa44 +#define MC_VIDEO_PROTECT_BOM 0x648 +#define MC_VIDEO_PROTECT_SIZE_MB 0x64c +#define MC_VIDEO_PROTECT_BOM_ADR_HI 0x978 +#define MC_VIDEO_PROTECT_REG_CTRL 0x650 +#define MC_ERR_VPR_STATUS 0x654 +#define MC_ERR_VPR_ADR 0x658 +#define MC_VIDEO_PROTECT_VPR_OVERRIDE 0x418 +#define MC_VIDEO_PROTECT_VPR_OVERRIDE1 0x590 +#define MC_IRAM_BOM 0x65c +#define MC_IRAM_TOM 0x660 +#define MC_IRAM_ADR_HI 0x980 +#define MC_IRAM_REG_CTRL 0x964 +#define MC_EMEM_CFG_ACCESS_CTRL 0x664 +#define MC_TZ_SECURITY_CTRL 0x668 +#define MC_EMEM_ARB_OUTSTANDING_REQ_RING3 0x66c +#define MC_EMEM_ARB_OUTSTANDING_REQ_NISO 0x6b4 +#define MC_EMEM_ARB_RING0_THROTTLE_MASK 0x6bc +#define MC_EMEM_ARB_NISO_THROTTLE_MASK 0x6b8 +#define MC_EMEM_ARB_NISO_THROTTLE_MASK_1 0xb80 +#define MC_SEC_CARVEOUT_BOM 0x670 +#define MC_SEC_CARVEOUT_SIZE_MB 0x674 +#define MC_SEC_CARVEOUT_ADR_HI 0x9d4 +#define MC_SEC_CARVEOUT_REG_CTRL 0x678 +#define MC_ERR_SEC_STATUS 0x67c +#define MC_ERR_SEC_ADR 0x680 +#define MC_PC_IDLE_CLOCK_GATE_CONFIG 0x684 +#define MC_STUTTER_CONTROL 0x688 +#define MC_RESERVED_RSV_1 0x958 +#define MC_DVFS_PIPE_SELECT 0x95c +#define MC_AHB_PTSA_MIN 0x4e0 +#define MC_AUD_PTSA_MIN 0x54c +#define MC_MLL_MPCORER_PTSA_RATE 0x44c +#define MC_RING2_PTSA_RATE 0x440 +#define MC_USBD_PTSA_RATE 0x530 +#define MC_USBX_PTSA_MIN 0x528 +#define MC_USBD_PTSA_MIN 0x534 +#define MC_APB_PTSA_MAX 0x4f0 +#define MC_JPG_PTSA_RATE 0x584 +#define MC_DIS_PTSA_MIN 0x420 +#define MC_AVP_PTSA_MAX 0x4fc +#define MC_AVP_PTSA_RATE 0x4f4 +#define MC_RING1_PTSA_MIN 0x480 +#define MC_DIS_PTSA_MAX 0x424 +#define MC_SD_PTSA_MAX 0x4d8 +#define MC_MSE_PTSA_RATE 0x4c4 +#define MC_VICPC_PTSA_MIN 0x558 +#define MC_PCX_PTSA_MAX 0x4b4 +#define MC_ISP_PTSA_RATE 0x4a0 +#define MC_A9AVPPC_PTSA_MIN 0x48c +#define MC_RING2_PTSA_MAX 0x448 +#define MC_AUD_PTSA_RATE 0x548 +#define MC_HOST_PTSA_MIN 0x51c +#define MC_MLL_MPCORER_PTSA_MAX 0x454 +#define MC_SD_PTSA_MIN 0x4d4 +#define MC_RING1_PTSA_RATE 0x47c +#define MC_JPG_PTSA_MIN 0x588 +#define MC_HDAPC_PTSA_MIN 0x62c +#define MC_AVP_PTSA_MIN 0x4f8 +#define MC_JPG_PTSA_MAX 0x58c +#define MC_VE_PTSA_MAX 0x43c +#define MC_DFD_PTSA_MAX 0x63c +#define MC_VICPC_PTSA_RATE 0x554 +#define MC_GK_PTSA_MAX 0x544 +#define MC_VICPC_PTSA_MAX 0x55c +#define MC_SDM_PTSA_MAX 0x624 +#define MC_SAX_PTSA_RATE 0x4b8 +#define MC_PCX_PTSA_MIN 0x4b0 +#define MC_APB_PTSA_MIN 0x4ec +#define MC_GK2_PTSA_MIN 0x614 +#define MC_PCX_PTSA_RATE 0x4ac +#define MC_RING1_PTSA_MAX 0x484 +#define MC_HDAPC_PTSA_RATE 0x628 +#define MC_MLL_MPCORER_PTSA_MIN 0x450 +#define MC_GK2_PTSA_MAX 0x618 +#define MC_AUD_PTSA_MAX 0x550 +#define MC_GK2_PTSA_RATE 0x610 +#define MC_ISP_PTSA_MAX 0x4a8 +#define MC_DISB_PTSA_RATE 0x428 +#define MC_VE2_PTSA_MAX 0x49c +#define MC_DFD_PTSA_MIN 0x638 +#define MC_FTOP_PTSA_RATE 0x50c +#define MC_A9AVPPC_PTSA_RATE 0x488 +#define MC_VE2_PTSA_MIN 0x498 +#define MC_USBX_PTSA_MAX 0x52c +#define MC_DIS_PTSA_RATE 0x41c +#define MC_USBD_PTSA_MAX 0x538 +#define MC_A9AVPPC_PTSA_MAX 0x490 +#define MC_USBX_PTSA_RATE 0x524 +#define MC_FTOP_PTSA_MAX 0x514 +#define MC_HDAPC_PTSA_MAX 0x630 +#define MC_SD_PTSA_RATE 0x4d0 +#define MC_DFD_PTSA_RATE 0x634 +#define MC_FTOP_PTSA_MIN 0x510 +#define MC_SDM_PTSA_RATE 0x61c +#define MC_AHB_PTSA_RATE 0x4dc +#define MC_SMMU_SMMU_PTSA_MAX 0x460 +#define MC_RING2_PTSA_MIN 0x444 +#define MC_SDM_PTSA_MIN 0x620 +#define MC_APB_PTSA_RATE 0x4e8 +#define MC_MSE_PTSA_MIN 0x4c8 +#define MC_HOST_PTSA_RATE 0x518 +#define MC_VE_PTSA_RATE 0x434 +#define MC_AHB_PTSA_MAX 0x4e4 +#define MC_SAX_PTSA_MIN 0x4bc +#define MC_SMMU_SMMU_PTSA_MIN 0x45c +#define MC_ISP_PTSA_MIN 0x4a4 +#define MC_HOST_PTSA_MAX 0x520 +#define MC_SAX_PTSA_MAX 0x4c0 +#define MC_VE_PTSA_MIN 0x438 +#define MC_GK_PTSA_MIN 0x540 +#define MC_MSE_PTSA_MAX 0x4cc +#define MC_DISB_PTSA_MAX 0x430 +#define MC_DISB_PTSA_MIN 0x42c +#define MC_SMMU_SMMU_PTSA_RATE 0x458 +#define MC_VE2_PTSA_RATE 0x494 +#define MC_GK_PTSA_RATE 0x53c +#define MC_PTSA_GRANT_DECREMENT 0x960 +#define MC_LATENCY_ALLOWANCE_AVPC_0 0x2e4 +#define MC_LATENCY_ALLOWANCE_AXIAP_0 0x3a0 +#define MC_LATENCY_ALLOWANCE_XUSB_1 0x380 +#define MC_LATENCY_ALLOWANCE_ISP2B_0 0x384 +#define MC_LATENCY_ALLOWANCE_SDMMCAA_0 0x3bc +#define MC_LATENCY_ALLOWANCE_SDMMCA_0 0x3b8 +#define MC_LATENCY_ALLOWANCE_ISP2_0 0x370 +#define MC_LATENCY_ALLOWANCE_SE_0 0x3e0 +#define MC_LATENCY_ALLOWANCE_ISP2_1 0x374 +#define MC_LATENCY_ALLOWANCE_DC_0 0x2e8 +#define MC_LATENCY_ALLOWANCE_VIC_0 0x394 +#define MC_LATENCY_ALLOWANCE_DCB_1 0x2f8 +#define MC_LATENCY_ALLOWANCE_NVDEC_0 0x3d8 +#define MC_LATENCY_ALLOWANCE_DCB_2 0x2fc +#define MC_LATENCY_ALLOWANCE_TSEC_0 0x390 +#define MC_LATENCY_ALLOWANCE_DC_2 0x2f0 +#define MC_SCALED_LATENCY_ALLOWANCE_DISPLAY0AB 0x694 +#define MC_LATENCY_ALLOWANCE_PPCS_1 0x348 +#define MC_LATENCY_ALLOWANCE_XUSB_0 0x37c +#define MC_LATENCY_ALLOWANCE_PPCS_0 0x344 +#define MC_LATENCY_ALLOWANCE_TSECB_0 0x3f0 +#define MC_LATENCY_ALLOWANCE_AFI_0 0x2e0 +#define MC_SCALED_LATENCY_ALLOWANCE_DISPLAY0B 0x698 +#define MC_LATENCY_ALLOWANCE_DC_1 0x2ec +#define MC_LATENCY_ALLOWANCE_APE_0 0x3dc +#define MC_SCALED_LATENCY_ALLOWANCE_DISPLAY0C 0x6a0 +#define MC_LATENCY_ALLOWANCE_A9AVP_0 0x3a4 +#define MC_LATENCY_ALLOWANCE_GPU2_0 0x3e8 +#define MC_LATENCY_ALLOWANCE_DCB_0 0x2f4 +#define MC_LATENCY_ALLOWANCE_HC_1 0x314 +#define MC_LATENCY_ALLOWANCE_SDMMC_0 0x3c0 +#define MC_LATENCY_ALLOWANCE_NVJPG_0 0x3e4 +#define MC_LATENCY_ALLOWANCE_PTC_0 0x34c +#define MC_LATENCY_ALLOWANCE_ETR_0 0x3ec +#define MC_LATENCY_ALLOWANCE_MPCORE_0 0x320 +#define MC_LATENCY_ALLOWANCE_VI2_0 0x398 +#define MC_SCALED_LATENCY_ALLOWANCE_DISPLAY0BB 0x69c +#define MC_SCALED_LATENCY_ALLOWANCE_DISPLAY0CB 0x6a4 +#define MC_LATENCY_ALLOWANCE_SATA_0 0x350 +#define MC_SCALED_LATENCY_ALLOWANCE_DISPLAY0A 0x690 +#define MC_LATENCY_ALLOWANCE_HC_0 0x310 +#define MC_LATENCY_ALLOWANCE_DC_3 0x3c8 +#define MC_LATENCY_ALLOWANCE_GPU_0 0x3ac +#define MC_LATENCY_ALLOWANCE_SDMMCAB_0 0x3c4 +#define MC_LATENCY_ALLOWANCE_ISP2B_1 0x388 +#define MC_LATENCY_ALLOWANCE_NVENC_0 0x328 +#define MC_LATENCY_ALLOWANCE_HDA_0 0x318 +#define MC_MIN_LENGTH_APE_0 0xb34 +#define MC_MIN_LENGTH_DCB_2 0x8a8 +#define MC_MIN_LENGTH_A9AVP_0 0x950 +#define MC_MIN_LENGTH_TSEC_0 0x93c +#define MC_MIN_LENGTH_DC_1 0x898 +#define MC_MIN_LENGTH_AXIAP_0 0x94c +#define MC_MIN_LENGTH_ISP2B_0 0x930 +#define MC_MIN_LENGTH_VI2_0 0x944 +#define MC_MIN_LENGTH_DCB_0 0x8a0 +#define MC_MIN_LENGTH_DCB_1 0x8a4 +#define MC_MIN_LENGTH_PPCS_1 0x8f4 +#define MC_MIN_LENGTH_NVJPG_0 0xb3c +#define MC_MIN_LENGTH_HDA_0 0x8c4 +#define MC_MIN_LENGTH_NVENC_0 0x8d4 +#define MC_MIN_LENGTH_SDMMC_0 0xb18 +#define MC_MIN_LENGTH_ISP2B_1 0x934 +#define MC_MIN_LENGTH_HC_1 0x8c0 +#define MC_MIN_LENGTH_DC_3 0xb20 +#define MC_MIN_LENGTH_AVPC_0 0x890 +#define MC_MIN_LENGTH_VIC_0 0x940 +#define MC_MIN_LENGTH_ISP2_0 0x91c +#define MC_MIN_LENGTH_HC_0 0x8bc +#define MC_MIN_LENGTH_SE_0 0xb38 +#define MC_MIN_LENGTH_NVDEC_0 0xb30 +#define MC_MIN_LENGTH_SATA_0 0x8fc +#define MC_MIN_LENGTH_DC_0 0x894 +#define MC_MIN_LENGTH_XUSB_1 0x92c +#define MC_MIN_LENGTH_DC_2 0x89c +#define MC_MIN_LENGTH_SDMMCAA_0 0xb14 +#define MC_MIN_LENGTH_GPU_0 0xb04 +#define MC_MIN_LENGTH_ETR_0 0xb44 +#define MC_MIN_LENGTH_AFI_0 0x88c +#define MC_MIN_LENGTH_PPCS_0 0x8f0 +#define MC_MIN_LENGTH_ISP2_1 0x920 +#define MC_MIN_LENGTH_XUSB_0 0x928 +#define MC_MIN_LENGTH_MPCORE_0 0x8cc +#define MC_MIN_LENGTH_TSECB_0 0xb48 +#define MC_MIN_LENGTH_SDMMCA_0 0xb10 +#define MC_MIN_LENGTH_GPU2_0 0xb40 +#define MC_MIN_LENGTH_SDMMCAB_0 0xb1c +#define MC_MIN_LENGTH_PTC_0 0x8f8 +#define MC_EMEM_ARB_OVERRIDE_1 0x968 +#define MC_VIDEO_PROTECT_GPU_OVERRIDE_0 0x984 +#define MC_VIDEO_PROTECT_GPU_OVERRIDE_1 0x988 +#define MC_EMEM_ARB_STATS_0 0x990 +#define MC_EMEM_ARB_STATS_1 0x994 +#define MC_MTS_CARVEOUT_BOM 0x9a0 +#define MC_MTS_CARVEOUT_SIZE_MB 0x9a4 +#define MC_MTS_CARVEOUT_ADR_HI 0x9a8 +#define MC_MTS_CARVEOUT_REG_CTRL 0x9ac +#define MC_ERR_MTS_STATUS 0x9b0 +#define MC_ERR_MTS_ADR 0x9b4 +#define MC_ERR_GENERALIZED_CARVEOUT_STATUS 0xc00 +#define MC_ERR_GENERALIZED_CARVEOUT_ADR 0xc04 +#define MC_SECURITY_CARVEOUT5_CLIENT_FORCE_INTERNAL_ACCESS2 0xd74 +#define MC_SECURITY_CARVEOUT4_CFG0 0xcf8 +#define MC_SECURITY_CARVEOUT4_CLIENT_ACCESS2 0xd10 +#define MC_SECURITY_CARVEOUT4_SIZE_128KB 0xd04 +#define MC_SECURITY_CARVEOUT1_CLIENT_ACCESS4 0xc28 +#define MC_SECURITY_CARVEOUT1_CLIENT_FORCE_INTERNAL_ACCESS1 0xc30 +#define MC_SECURITY_CARVEOUT2_CLIENT_FORCE_INTERNAL_ACCESS4 0xc8c +#define MC_SECURITY_CARVEOUT4_CLIENT_FORCE_INTERNAL_ACCESS0 0xd1c +#define MC_SECURITY_CARVEOUT5_CLIENT_FORCE_INTERNAL_ACCESS1 0xd70 +#define MC_SECURITY_CARVEOUT1_CLIENT_FORCE_INTERNAL_ACCESS0 0xc2c +#define MC_SECURITY_CARVEOUT5_CLIENT_FORCE_INTERNAL_ACCESS4 0xd7c +#define MC_SECURITY_CARVEOUT3_SIZE_128KB 0xcb4 +#define MC_SECURITY_CARVEOUT2_CFG0 0xc58 +#define MC_SECURITY_CARVEOUT1_CFG0 0xc08 +#define MC_SECURITY_CARVEOUT2_CLIENT_FORCE_INTERNAL_ACCESS2 0xc84 +#define MC_SECURITY_CARVEOUT2_CLIENT_ACCESS0 0xc68 +#define MC_SECURITY_CARVEOUT3_BOM 0xcac +#define MC_SECURITY_CARVEOUT2_CLIENT_ACCESS2 0xc70 +#define MC_SECURITY_CARVEOUT5_CLIENT_FORCE_INTERNAL_ACCESS3 0xd78 +#define MC_SECURITY_CARVEOUT2_CLIENT_FORCE_INTERNAL_ACCESS0 0xc7c +#define MC_SECURITY_CARVEOUT4_CLIENT_ACCESS4 0xd18 +#define MC_SECURITY_CARVEOUT3_CLIENT_ACCESS1 0xcbc +#define MC_SECURITY_CARVEOUT1_CLIENT_FORCE_INTERNAL_ACCESS3 0xc38 +#define MC_SECURITY_CARVEOUT1_CLIENT_FORCE_INTERNAL_ACCESS2 0xc34 +#define MC_SECURITY_CARVEOUT3_CLIENT_ACCESS2 0xcc0 +#define MC_SECURITY_CARVEOUT5_CLIENT_ACCESS2 0xd60 +#define MC_SECURITY_CARVEOUT3_CFG0 0xca8 +#define MC_SECURITY_CARVEOUT3_CLIENT_ACCESS0 0xcb8 +#define MC_SECURITY_CARVEOUT2_CLIENT_FORCE_INTERNAL_ACCESS3 0xc88 +#define MC_SECURITY_CARVEOUT2_SIZE_128KB 0xc64 +#define MC_SECURITY_CARVEOUT5_BOM_HI 0xd50 +#define MC_SECURITY_CARVEOUT1_SIZE_128KB 0xc14 +#define MC_SECURITY_CARVEOUT4_CLIENT_ACCESS3 0xd14 +#define MC_SECURITY_CARVEOUT1_BOM 0xc0c +#define MC_SECURITY_CARVEOUT4_CLIENT_FORCE_INTERNAL_ACCESS4 0xd2c +#define MC_SECURITY_CARVEOUT5_CLIENT_ACCESS4 0xd68 +#define MC_SECURITY_CARVEOUT3_CLIENT_ACCESS4 0xcc8 +#define MC_SECURITY_CARVEOUT5_CLIENT_ACCESS0 0xd58 +#define MC_SECURITY_CARVEOUT4_CLIENT_FORCE_INTERNAL_ACCESS2 0xd24 +#define MC_SECURITY_CARVEOUT3_CLIENT_ACCESS3 0xcc4 +#define MC_SECURITY_CARVEOUT2_CLIENT_ACCESS4 0xc78 +#define MC_SECURITY_CARVEOUT1_CLIENT_ACCESS1 0xc1c +#define MC_SECURITY_CARVEOUT1_CLIENT_ACCESS0 0xc18 +#define MC_SECURITY_CARVEOUT4_CLIENT_FORCE_INTERNAL_ACCESS3 0xd28 +#define MC_SECURITY_CARVEOUT5_CLIENT_ACCESS1 0xd5c +#define MC_SECURITY_CARVEOUT3_BOM_HI 0xcb0 +#define MC_SECURITY_CARVEOUT3_CLIENT_FORCE_INTERNAL_ACCESS3 0xcd8 +#define MC_SECURITY_CARVEOUT2_BOM_HI 0xc60 +#define MC_SECURITY_CARVEOUT4_BOM_HI 0xd00 +#define MC_SECURITY_CARVEOUT5_CLIENT_ACCESS3 0xd64 +#define MC_SECURITY_CARVEOUT3_CLIENT_FORCE_INTERNAL_ACCESS4 0xcdc +#define MC_SECURITY_CARVEOUT2_CLIENT_FORCE_INTERNAL_ACCESS1 0xc80 +#define MC_SECURITY_CARVEOUT5_SIZE_128KB 0xd54 +#define MC_SECURITY_CARVEOUT4_CLIENT_FORCE_INTERNAL_ACCESS1 0xd20 +#define MC_SECURITY_CARVEOUT3_CLIENT_FORCE_INTERNAL_ACCESS2 0xcd4 +#define MC_SECURITY_CARVEOUT4_CLIENT_ACCESS1 0xd0c +#define MC_SECURITY_CARVEOUT2_CLIENT_ACCESS3 0xc74 +#define MC_SECURITY_CARVEOUT3_CLIENT_FORCE_INTERNAL_ACCESS0 0xccc +#define MC_SECURITY_CARVEOUT4_BOM 0xcfc +#define MC_SECURITY_CARVEOUT5_CFG0 0xd48 +#define MC_SECURITY_CARVEOUT2_BOM 0xc5c +#define MC_SECURITY_CARVEOUT5_BOM 0xd4c +#define MC_SECURITY_CARVEOUT1_CLIENT_ACCESS3 0xc24 +#define MC_SECURITY_CARVEOUT5_CLIENT_FORCE_INTERNAL_ACCESS0 0xd6c +#define MC_SECURITY_CARVEOUT3_CLIENT_FORCE_INTERNAL_ACCESS1 0xcd0 +#define MC_SECURITY_CARVEOUT1_BOM_HI 0xc10 +#define MC_SECURITY_CARVEOUT1_CLIENT_ACCESS2 0xc20 +#define MC_SECURITY_CARVEOUT1_CLIENT_FORCE_INTERNAL_ACCESS4 0xc3c +#define MC_SECURITY_CARVEOUT2_CLIENT_ACCESS1 0xc6c +#define MC_SECURITY_CARVEOUT4_CLIENT_ACCESS0 0xd08 +#define MC_ERR_APB_ASID_UPDATE_STATUS 0x9d0 +#define MC_DA_CONFIG0 0x9dc diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.cpp similarity index 67% rename from libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp rename to libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.cpp index e60e6bbe1..49f2ac286 100644 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.cpp @@ -16,7 +16,7 @@ #include #include "kern_secure_monitor.hpp" -namespace ams::kern::smc { +namespace ams::kern::board::nintendo::nx::smc { namespace { @@ -55,7 +55,9 @@ namespace ams::kern::smc { : : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory" ); - /* TODO: Restore X18 */ + + /* Restore the CoreLocalRegion into X18. */ + cpu::SetCoreLocalRegionAddress(cpu::GetTpidrEl1()); } /* Store arguments to output. */ @@ -98,15 +100,23 @@ namespace ams::kern::smc { args.x[7] = x7; } + /* Global lock for generate random bytes. */ + KSpinLock g_generate_random_lock; + } /* SMC functionality needed for init. */ namespace init { + void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { + SecureMonitorArguments args = { FunctionId_CpuOn, core_id, entrypoint, arg }; + CallPrivilegedSecureMonitorFunctionForInit(args); + } + void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) { SecureMonitorArguments args = { FunctionId_GetConfig, static_cast(config_item) }; CallPrivilegedSecureMonitorFunctionForInit(args); - MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success)); + MESOSPHERE_INIT_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success)); for (size_t i = 0; i < num_qwords && i < 7; i++) { out[i] = args.x[1 + i]; } @@ -114,11 +124,11 @@ namespace ams::kern::smc { void GenerateRandomBytes(void *dst, size_t size) { /* Call SmcGenerateRandomBytes() */ - /* TODO: Lock this to ensure only one core calls at once. */ SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size }; - MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0])); + MESOSPHERE_INIT_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0])); + CallPrivilegedSecureMonitorFunctionForInit(args); - MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success)); + MESOSPHERE_INIT_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success)); /* Copy output. */ std::memcpy(dst, &args.x[1], size); @@ -133,6 +143,45 @@ namespace ams::kern::smc { } + void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) { + SecureMonitorArguments args = { FunctionId_GetConfig, static_cast(config_item) }; + CallPrivilegedSecureMonitorFunction(args); + MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success)); + for (size_t i = 0; i < num_qwords && i < 7; i++) { + out[i] = args.x[1 + i]; + } + } + + bool ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) { + SecureMonitorArguments args = { FunctionId_ReadWriteRegister, address, mask, value }; + CallPrivilegedSecureMonitorFunction(args); + *out = static_cast(args.x[1]); + return static_cast(args.x[0]) == SmcResult::Success; + } + + void ConfigureCarveout(size_t which, uintptr_t address, size_t size) { + SecureMonitorArguments args = { FunctionId_ConfigureCarveout, static_cast(which), static_cast(address), static_cast(size) }; + CallPrivilegedSecureMonitorFunction(args); + MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success)); + } + + void GenerateRandomBytes(void *dst, size_t size) { + /* Setup for call. */ + SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size }; + MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0])); + + /* Make call. */ + { + KScopedInterruptDisable intr_disable; + KScopedSpinLock lk(g_generate_random_lock); + CallPrivilegedSecureMonitorFunction(args); + } + MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success)); + + /* Copy output. */ + std::memcpy(dst, &args.x[1], size); + } + void NORETURN Panic(u32 color) { SecureMonitorArguments args = { FunctionId_Panic, color }; CallPrivilegedSecureMonitorFunction(args); diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp similarity index 82% rename from libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp rename to libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp index 434dba413..fa78b0ee2 100644 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp @@ -16,7 +16,7 @@ #pragma once #include -namespace ams::kern::smc { +namespace ams::kern::board::nintendo::nx::smc { /* Types. */ enum MemorySize { @@ -25,6 +25,15 @@ namespace ams::kern::smc { MemorySize_8GB = 2, }; + enum MemoryArrangement { + MemoryArrangement_4GB = 0, + MemoryArrangement_4GBForAppletDev = 1, + MemoryArrangement_4GBForSystemDev = 2, + MemoryArrangement_6GB = 3, + MemoryArrangement_6GBForAppletDev = 4, + MemoryArrangement_8GB = 5, + }; + enum class ConfigItem : u32 { /* Standard config items. */ DisableProgramVerification = 1, @@ -75,10 +84,16 @@ namespace ams::kern::smc { }; /* TODO: Rest of Secure Monitor API. */ + void GenerateRandomBytes(void *dst, size_t size); + void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item); + bool ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value); + void ConfigureCarveout(size_t which, uintptr_t address, size_t size); + void NORETURN Panic(u32 color); namespace init { + void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg); void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item); void GenerateRandomBytes(void *dst, size_t size); bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value); diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp deleted file mode 100644 index d28453f4d..000000000 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2018-2020 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#include -#include "kern_secure_monitor.hpp" - -namespace ams::kern { - - namespace { - - ALWAYS_INLINE size_t GetRealMemorySizeForInit() { - /* TODO: Move this into a header for the MC in general. */ - constexpr u32 MemoryControllerConfigurationRegister = 0x70019050; - u32 config_value; - MESOSPHERE_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0)); - return static_cast(config_value & 0x3FFF) << 20; - } - - ALWAYS_INLINE util::BitPack32 GetKernelConfigurationForInit() { - u64 value = 0; - smc::init::GetConfig(&value, 1, smc::ConfigItem::KernelConfiguration); - return util::BitPack32{static_cast(value)}; - } - - ALWAYS_INLINE u64 GenerateRandomU64ForInit() { - u64 value; - smc::init::GenerateRandomBytes(&value, sizeof(value)); - return value; - } - - ALWAYS_INLINE size_t GetIntendedMemorySizeForInit() { - switch (GetKernelConfigurationForInit().Get()) { - case smc::MemorySize_4GB: - default: /* All invalid modes should go to 4GB. */ - return 4_GB; - case smc::MemorySize_6GB: - return 6_GB; - case smc::MemorySize_8GB: - return 8_GB; - } - } - - } - - /* Initialization. */ - KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(uintptr_t base_address) { - const size_t real_dram_size = GetRealMemorySizeForInit(); - const size_t intended_dram_size = GetIntendedMemorySizeForInit(); - if (intended_dram_size * 2 < real_dram_size) { - return base_address; - } else { - return base_address + ((real_dram_size - intended_dram_size) / 2); - } - } - - bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() { - return GetKernelConfigurationForInit().Get(); - } - - /* Randomness for Initialization. */ - void KSystemControl::Init::GenerateRandomBytes(void *dst, size_t size) { - MESOSPHERE_ABORT_UNLESS(size <= 0x38); - smc::init::GenerateRandomBytes(dst, size); - } - - u64 KSystemControl::Init::GenerateRandomRange(u64 min, u64 max) { - const u64 range_size = ((max + 1) - min); - const u64 effective_max = (std::numeric_limits::max() / range_size) * range_size; - while (true) { - if (const u64 rnd = GenerateRandomU64ForInit(); rnd < effective_max) { - return min + (rnd % range_size); - } - } - } - - void KSystemControl::StopSystem() { - /* Display a panic screen via exosphere. */ - smc::Panic(0xF00); - while (true) { /* ... */ } - } - -} \ No newline at end of file diff --git a/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp b/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp new file mode 100644 index 000000000..db644ec1a --- /dev/null +++ b/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::init { + + #define SLAB_COUNT(CLASS) g_slab_resource_counts.num_##CLASS + + #define FOREACH_SLAB_TYPE(HANDLER, ...) \ + HANDLER(KProcess, (SLAB_COUNT(KProcess)), ## __VA_ARGS__) \ + HANDLER(KThread, (SLAB_COUNT(KThread)), ## __VA_ARGS__) \ + HANDLER(KLinkedListNode, (SLAB_COUNT(KThread) * 17), ## __VA_ARGS__) \ + HANDLER(KEvent, (SLAB_COUNT(KEvent)), ## __VA_ARGS__) \ + HANDLER(KInterruptEvent, (SLAB_COUNT(KInterruptEvent)), ## __VA_ARGS__) \ + HANDLER(KInterruptEventTask, (SLAB_COUNT(KInterruptEvent)), ## __VA_ARGS__) \ + HANDLER(KPort, (SLAB_COUNT(KPort)), ## __VA_ARGS__) \ + HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ## __VA_ARGS__) \ + HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ## __VA_ARGS__) \ + HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ## __VA_ARGS__) \ + HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ## __VA_ARGS__) \ + HANDLER(KDeviceAddressSpace, (SLAB_COUNT(KDeviceAddressSpace)), ## __VA_ARGS__) \ + HANDLER(KSession, (SLAB_COUNT(KSession)), ## __VA_ARGS__) \ + HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ## __VA_ARGS__) \ + HANDLER(KLightSession, (SLAB_COUNT(KLightSession)), ## __VA_ARGS__) \ + HANDLER(KThreadLocalPage, (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), ## __VA_ARGS__) \ + HANDLER(KObjectName, (SLAB_COUNT(KObjectName)), ## __VA_ARGS__) \ + HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ## __VA_ARGS__) \ + HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ## __VA_ARGS__) \ + HANDLER(KDebug, (SLAB_COUNT(KDebug)), ## __VA_ARGS__) + + namespace { + + + #define DEFINE_SLAB_TYPE_ENUM_MEMBER(NAME, COUNT, ...) KSlabType_##NAME, + + enum KSlabType : u32 { + FOREACH_SLAB_TYPE(DEFINE_SLAB_TYPE_ENUM_MEMBER) + KSlabType_Count, + }; + + #undef DEFINE_SLAB_TYPE_ENUM_MEMBER + + /* Constexpr counts. */ + constexpr size_t SlabCountKProcess = 80; + constexpr size_t SlabCountKThread = 800; + constexpr size_t SlabCountKEvent = 700; + constexpr size_t SlabCountKInterruptEvent = 100; + constexpr size_t SlabCountKPort = 256; + constexpr size_t SlabCountKSharedMemory = 80; + constexpr size_t SlabCountKTransferMemory = 200; + constexpr size_t SlabCountKCodeMemory = 10; + constexpr size_t SlabCountKDeviceAddressSpace = 300; + constexpr size_t SlabCountKSession = 900; + constexpr size_t SlabCountKLightSession = 100; + constexpr size_t SlabCountKObjectName = 7; + constexpr size_t SlabCountKResourceLimit = 5; + constexpr size_t SlabCountKDebug = cpu::NumCores; + + constexpr size_t SlabCountExtraKThread = 160; + + /* This is used for gaps between the slab allocators. */ + constexpr size_t SlabRegionReservedSize = 2_MB; + + /* Global to hold our resource counts. */ + KSlabResourceCounts g_slab_resource_counts = { + .num_KProcess = SlabCountKProcess, + .num_KThread = SlabCountKThread, + .num_KEvent = SlabCountKEvent, + .num_KInterruptEvent = SlabCountKInterruptEvent, + .num_KPort = SlabCountKPort, + .num_KSharedMemory = SlabCountKSharedMemory, + .num_KTransferMemory = SlabCountKTransferMemory, + .num_KCodeMemory = SlabCountKCodeMemory, + .num_KDeviceAddressSpace = SlabCountKDeviceAddressSpace, + .num_KSession = SlabCountKSession, + .num_KLightSession = SlabCountKLightSession, + .num_KObjectName = SlabCountKObjectName, + .num_KResourceLimit = SlabCountKResourceLimit, + .num_KDebug = SlabCountKDebug, + }; + + template + NOINLINE KVirtualAddress InitializeSlabHeap(KVirtualAddress address, size_t num_objects) { + const size_t size = util::AlignUp(sizeof(T) * num_objects, alignof(void *)); + KVirtualAddress start = util::AlignUp(GetInteger(address), alignof(T)); + + if (size > 0) { + MESOSPHERE_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().FindContainingRegion(GetInteger(start) + size - 1)->IsDerivedFrom(KMemoryRegionType_KernelSlab)); + T::InitializeSlabHeap(GetVoidPointer(start), size); + } + + return start + size; + } + + } + + + const KSlabResourceCounts &GetSlabResourceCounts() { + return g_slab_resource_counts; + } + + void InitializeSlabResourceCounts() { + /* Note: Nintendo initializes all fields here, but we initialize all constants at compile-time. */ + if (KSystemControl::Init::ShouldIncreaseThreadResourceLimit()) { + g_slab_resource_counts.num_KThread += SlabCountExtraKThread; + } + } + + size_t CalculateTotalSlabHeapSize() { + size_t size = 0; + + #define ADD_SLAB_SIZE(NAME, COUNT, ...) ({ \ + size += alignof(NAME); \ + size += util::AlignUp(sizeof(NAME) * (COUNT), alignof(void *)); \ + }); + + /* NOTE: This can't be used right now because we don't have all these types implemented. */ + /* Once we do, uncomment the following and stop using the hardcoded size. */ + /* TODO: FOREACH_SLAB_TYPE(ADD_SLAB_SIZE) */ + size = 0x647000; + + return size; + } + + void InitializeKPageBufferSlabHeap() { + const auto &counts = GetSlabResourceCounts(); + const size_t num_pages = counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; + const size_t slab_size = num_pages * PageSize; + + /* Reserve memory from the system resource limit. */ + MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, slab_size)); + + /* Allocate memory for the slab. */ + constexpr auto AllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); + const KVirtualAddress slab_address = Kernel::GetMemoryManager().AllocateContinuous(num_pages, 1, AllocateOption); + MESOSPHERE_ABORT_UNLESS(slab_address != Null); + + /* Open references to the slab. */ + Kernel::GetMemoryManager().Open(slab_address, num_pages); + + /* Initialize the slabheap. */ + KPageBuffer::InitializeSlabHeap(GetVoidPointer(slab_address), slab_size); + } + + void InitializeSlabHeaps() { + /* Get the start of the slab region, since that's where we'll be working. */ + KVirtualAddress address = KMemoryLayout::GetSlabRegionAddress(); + + /* Initialize slab type array to be in sorted order. */ + KSlabType slab_types[KSlabType_Count]; + for (size_t i = 0; i < util::size(slab_types); i++) { slab_types[i] = static_cast(i); } + + /* N shuffles the slab type array with the following simple algorithm. */ + for (size_t i = 0; i < util::size(slab_types); i++) { + const size_t rnd = KSystemControl::GenerateRandomRange(0, util::size(slab_types) - 1); + std::swap(slab_types[i], slab_types[rnd]); + } + + /* Create an array to represent the gaps between the slabs. */ + size_t slab_gaps[util::size(slab_types)]; + for (size_t i = 0; i < util::size(slab_gaps); i++) { + /* Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange is inclusive. */ + /* However, Nintendo also has the off-by-one error, and it's "harmless", so we will include it ourselves. */ + slab_gaps[i] = KSystemControl::GenerateRandomRange(0, SlabRegionReservedSize); + } + + /* Sort the array, so that we can treat differences between values as offsets to the starts of slabs. */ + for (size_t i = 1; i < util::size(slab_gaps); i++) { + for (size_t j = i; j > 0 && slab_gaps[j-1] > slab_gaps[j]; j--) { + std::swap(slab_gaps[j], slab_gaps[j-1]); + } + } + + for (size_t i = 0; i < util::size(slab_types); i++) { + /* Add the random gap to the address. */ + address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1]; + + #define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \ + case KSlabType_##NAME: \ + address = InitializeSlabHeap(address, COUNT); \ + break; + + /* Initialize the slabheap. */ + switch (slab_types[i]) { + /* For each of the slab types, we want to initialize that heap. */ + FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP) + /* If we somehow get an invalid type, abort. */ + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + } + +} \ No newline at end of file diff --git a/libraries/libmesosphere/source/kern_debug_log.cpp b/libraries/libmesosphere/source/kern_debug_log.cpp new file mode 100644 index 000000000..45a53f534 --- /dev/null +++ b/libraries/libmesosphere/source/kern_debug_log.cpp @@ -0,0 +1,458 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "kern_debug_log_impl.hpp" + +namespace ams::kern { + #pragma GCC push_options + #pragma GCC optimize ("-Os") + + namespace { + + /* Useful definitions for our VSNPrintf implementation. */ + enum FormatSpecifierFlag : u32 { + FormatSpecifierFlag_None = 0, + FormatSpecifierFlag_EmptySign = (1 << 0), + FormatSpecifierFlag_ForceSign = (1 << 1), + FormatSpecifierFlag_Hash = (1 << 2), + FormatSpecifierFlag_LeftJustify = (1 << 3), + FormatSpecifierFlag_ZeroPad = (1 << 4), + FormatSpecifierFlag_Char = (1 << 5), + FormatSpecifierFlag_Short = (1 << 6), + FormatSpecifierFlag_Long = (1 << 7), + FormatSpecifierFlag_LongLong = (1 << 8), + FormatSpecifierFlag_Uppercase = (1 << 9), + FormatSpecifierFlag_HasPrecision = (1 << 10), + }; + + using FormatSpecifierFlagStorage = std::underlying_type::type; + + constexpr ALWAYS_INLINE bool IsDigit(char c) { + return '0' <= c && c <= '9'; + } + + constexpr ALWAYS_INLINE u32 ParseU32(const char *&str) { + u32 value = 0; + do { + value = (value * 10) + static_cast(*(str++) - '0'); + } while (IsDigit(*str)); + return value; + } + + constexpr ALWAYS_INLINE size_t Strnlen(const char *str, size_t max) { + const char *cur = str; + while (*cur && max--) { + cur++; + } + return static_cast(cur - str); + } + + ALWAYS_INLINE void VSNPrintfImpl(char * const dst, const size_t dst_size, const char *format, ::std::va_list vl) { + size_t dst_index = 0; + + auto WriteCharacter = [dst, dst_size, &dst_index](char c) ALWAYS_INLINE_LAMBDA { + if (dst_index < dst_size) { + dst[dst_index++] = c; + } + }; + + /* Loop over every character in the string, looking for format specifiers. */ + while (*format) { + if (const char c = *(format++); c != '%') { + WriteCharacter(c); + continue; + } + + /* We have to parse a format specifier. */ + /* Start by parsing flags. */ + FormatSpecifierFlagStorage flags = FormatSpecifierFlag_None; + auto SetFlag = [&flags](FormatSpecifierFlag f) ALWAYS_INLINE_LAMBDA { flags |= f; }; + auto ClearFlag = [&flags](FormatSpecifierFlag f) ALWAYS_INLINE_LAMBDA { flags &= ~f; }; + auto HasFlag = [&flags](FormatSpecifierFlag f) ALWAYS_INLINE_LAMBDA { return (flags & f) != 0; }; + { + bool parsed_flags = false; + while (!parsed_flags) { + switch (*format) { + case ' ': SetFlag(FormatSpecifierFlag_EmptySign); format++; break; + case '+': SetFlag(FormatSpecifierFlag_ForceSign); format++; break; + case '#': SetFlag(FormatSpecifierFlag_Hash); format++; break; + case '-': SetFlag(FormatSpecifierFlag_LeftJustify); format++; break; + case '0': SetFlag(FormatSpecifierFlag_ZeroPad); format++; break; + default: + parsed_flags = true; + break; + } + } + } + + /* Next, parse width. */ + u32 width = 0; + if (IsDigit(*format)) { + /* Integer width. */ + width = ParseU32(format); + } else if (*format == '*') { + /* Dynamic width. */ + const int _width = va_arg(vl, int); + if (_width >= 0) { + width = static_cast(_width); + } else { + SetFlag(FormatSpecifierFlag_LeftJustify); + width = static_cast(-_width); + } + format++; + } + + /* Next, parse precision if present. */ + u32 precision = 0; + if (*format == '.') { + SetFlag(FormatSpecifierFlag_HasPrecision); + format++; + + if (IsDigit(*format)) { + /* Integer precision. */ + precision = ParseU32(format); + } else if (*format == '*') { + /* Dynamic precision. */ + const int _precision = va_arg(vl, int); + if (_precision > 0) { + precision = static_cast(_precision); + } + format++; + } + } + + /* Parse length. */ + constexpr bool SizeIsLong = sizeof(size_t) == sizeof(long); + constexpr bool IntMaxIsLong = sizeof(intmax_t) == sizeof(long); + constexpr bool PtrDiffIsLong = sizeof(ptrdiff_t) == sizeof(long); + switch (*format) { + case 'z': + SetFlag(SizeIsLong ? FormatSpecifierFlag_Long : FormatSpecifierFlag_LongLong); + format++; + break; + case 'j': + SetFlag(IntMaxIsLong ? FormatSpecifierFlag_Long : FormatSpecifierFlag_LongLong); + format++; + break; + case 't': + SetFlag(PtrDiffIsLong ? FormatSpecifierFlag_Long : FormatSpecifierFlag_LongLong); + format++; + break; + case 'h': + SetFlag(FormatSpecifierFlag_Short); + format++; + if (*format == 'h') { + SetFlag(FormatSpecifierFlag_Char); + format++; + } + break; + case 'l': + SetFlag(FormatSpecifierFlag_Long); + format++; + if (*format == 'l') { + SetFlag(FormatSpecifierFlag_LongLong); + format++; + } + break; + default: + break; + } + + const char specifier = *(format++); + switch (specifier) { + case 'p': + if constexpr (sizeof(uintptr_t) == sizeof(long long)) { + SetFlag(FormatSpecifierFlag_LongLong); + } else { + SetFlag(FormatSpecifierFlag_Long); + } + SetFlag(FormatSpecifierFlag_Hash); + [[fallthrough]]; + case 'd': + case 'i': + case 'u': + case 'b': + case 'o': + case 'x': + case 'X': + { + /* Determine the base to print with. */ + u32 base; + switch (specifier) { + case 'b': + base = 2; + break; + case 'o': + base = 8; + break; + case 'X': + SetFlag(FormatSpecifierFlag_Uppercase); + [[fallthrough]]; + case 'p': + case 'x': + base = 16; + break; + default: + base = 10; + ClearFlag(FormatSpecifierFlag_Hash); + break; + } + + /* Precision implies no zero-padding. */ + if (HasFlag(FormatSpecifierFlag_HasPrecision)) { + ClearFlag(FormatSpecifierFlag_ZeroPad); + } + + /* Unsigned types don't get signs. */ + const bool is_unsigned = base != 10 || specifier == 'u'; + if (is_unsigned) { + ClearFlag(FormatSpecifierFlag_EmptySign); + ClearFlag(FormatSpecifierFlag_ForceSign); + } + + auto PrintInteger = [&](bool negative, uintmax_t value) { + constexpr size_t BufferSize = 64; /* Binary digits for 64-bit numbers may use 64 digits. */ + char buf[BufferSize]; + size_t len = 0; + + /* No hash flag for zero. */ + if (value == 0) { + ClearFlag(FormatSpecifierFlag_Hash); + } + + if (!HasFlag(FormatSpecifierFlag_HasPrecision) || value != 0) { + do { + const char digit = static_cast(value % base); + buf[len++] = (digit < 10) ? ('0' + digit) : ((HasFlag(FormatSpecifierFlag_Uppercase) ? 'A' : 'a') + digit - 10); + value /= base; + } while (value); + } + + /* Determine our prefix length. */ + size_t prefix_len = 0; + const bool has_sign = negative || HasFlag(FormatSpecifierFlag_ForceSign) || HasFlag(FormatSpecifierFlag_EmptySign); + if (has_sign) { + prefix_len++; + } + if (HasFlag(FormatSpecifierFlag_Hash)) { + prefix_len += (base != 8) ? 2 : 1; + } + + /* Determine zero-padding count. */ + size_t num_zeroes = (len < precision) ? precision - len : 0; + if (!HasFlag(FormatSpecifierFlag_LeftJustify) && HasFlag(FormatSpecifierFlag_ZeroPad)) { + num_zeroes = (len + prefix_len < width) ? width - len - prefix_len : 0; + } + + /* Print out left padding. */ + if (!HasFlag(FormatSpecifierFlag_LeftJustify)) { + for (size_t i = len + prefix_len + num_zeroes; i < static_cast(width); i++) { + WriteCharacter(' '); + } + } + + /* Print out sign. */ + if (negative) { + WriteCharacter('-'); + } else if (HasFlag(FormatSpecifierFlag_ForceSign)) { + WriteCharacter('+'); + } else if (HasFlag(FormatSpecifierFlag_EmptySign)) { + WriteCharacter(' '); + } + + /* Print out base prefix. */ + if (HasFlag(FormatSpecifierFlag_Hash)) { + WriteCharacter('0'); + if (base == 2) { + WriteCharacter('b'); + } else if (base == 16) { + WriteCharacter('x'); + } + } + + /* Print out zeroes. */ + for (size_t i = 0; i < num_zeroes; i++) { + WriteCharacter('0'); + } + + /* Print out digits. */ + for (size_t i = 0; i < len; i++) { + WriteCharacter(buf[len - 1 - i]); + } + + /* Print out right padding. */ + if (HasFlag(FormatSpecifierFlag_LeftJustify)) { + for (size_t i = len + prefix_len + num_zeroes; i < static_cast(width); i++) { + WriteCharacter(' '); + } + } + }; + + /* Output the integer. */ + if (is_unsigned) { + uintmax_t n = 0; + if (HasFlag(FormatSpecifierFlag_LongLong)) { + n = static_cast(va_arg(vl, unsigned long long)); + } else if (HasFlag(FormatSpecifierFlag_Long)) { + n = static_cast(va_arg(vl, unsigned long)); + } else if (HasFlag(FormatSpecifierFlag_Char)) { + n = static_cast(va_arg(vl, unsigned int)); + } else if (HasFlag(FormatSpecifierFlag_Short)) { + n = static_cast(va_arg(vl, unsigned int)); + } else { + n = static_cast(va_arg(vl, unsigned int)); + } + if (specifier == 'p' && n == 0) { + WriteCharacter('('); + WriteCharacter('n'); + WriteCharacter('i'); + WriteCharacter('l'); + WriteCharacter(')'); + } else { + PrintInteger(false, n); + } + } else { + intmax_t n = 0; + if (HasFlag(FormatSpecifierFlag_LongLong)) { + n = static_cast(va_arg(vl, signed long long)); + } else if (HasFlag(FormatSpecifierFlag_Long)) { + n = static_cast(va_arg(vl, signed long)); + } else if (HasFlag(FormatSpecifierFlag_Char)) { + n = static_cast(va_arg(vl, signed int)); + } else if (HasFlag(FormatSpecifierFlag_Short)) { + n = static_cast(va_arg(vl, signed int)); + } else { + n = static_cast(va_arg(vl, signed int)); + } + const bool negative = n < 0; + const uintmax_t u = (negative) ? static_cast(-n) : static_cast(n); + PrintInteger(negative, u); + } + } + break; + case 'c': + { + size_t len = 1; + if (!HasFlag(FormatSpecifierFlag_LeftJustify)) { + while (len++ < width) { + WriteCharacter(' '); + } + } + WriteCharacter(static_cast(va_arg(vl, int))); + if (HasFlag(FormatSpecifierFlag_LeftJustify)) { + while (len++ < width) { + WriteCharacter(' '); + } + } + } + break; + case 's': + { + const char *str = va_arg(vl, char *); + if (str == nullptr) { + str = "(null)"; + } + + size_t len = Strnlen(str, precision > 0 ? precision : std::numeric_limits::max()); + if (HasFlag(FormatSpecifierFlag_HasPrecision)) { + len = (len < precision) ? len : precision; + } + if (!HasFlag(FormatSpecifierFlag_LeftJustify)) { + while (len++ < width) { + WriteCharacter(' '); + } + } + while (*str && (!HasFlag(FormatSpecifierFlag_HasPrecision) || (precision--) != 0)) { + WriteCharacter(*(str++)); + } + if (HasFlag(FormatSpecifierFlag_LeftJustify)) { + while (len++ < width) { + WriteCharacter(' '); + } + } + } + break; + case '%': + default: + WriteCharacter(specifier); + break; + } + } + + /* Ensure null termination. */ + WriteCharacter('\0'); + dst[dst_size - 1] = '\0'; + } + + KSpinLock g_debug_log_lock; + bool g_initialized_impl; + + /* NOTE: Nintendo's print buffer is size 0x100. */ + char g_print_buffer[0x400]; + + void PutString(const char *str) { + if (g_initialized_impl) { + while (*str) { + const char c = *(str++); + if (c == '\n') { + KDebugLogImpl::PutChar('\r'); + } + KDebugLogImpl::PutChar(c); + } + KDebugLogImpl::Flush(); + } + } + + } + + + #pragma GCC pop_options + + void KDebugLog::Initialize() { + if (KTargetSystem::IsDebugLoggingEnabled()) { + KScopedInterruptDisable di; + KScopedSpinLock lk(g_debug_log_lock); + + if (!g_initialized_impl) { + g_initialized_impl = KDebugLogImpl::Initialize(); + } + } + } + + void KDebugLog::Printf(const char *format, ...) { + if (KTargetSystem::IsDebugLoggingEnabled()) { + ::std::va_list vl; + va_start(vl, format); + VPrintf(format, vl); + va_end(vl); + } + } + + void KDebugLog::VPrintf(const char *format, ::std::va_list vl) { + if (KTargetSystem::IsDebugLoggingEnabled()) { + KScopedInterruptDisable di; + KScopedSpinLock lk(g_debug_log_lock); + + VSNPrintf(g_print_buffer, util::size(g_print_buffer), format, vl); + PutString(g_print_buffer); + } + } + + void KDebugLog::VSNPrintf(char *dst, const size_t dst_size, const char *format, ::std::va_list vl) { + VSNPrintfImpl(dst, dst_size, format, vl); + } + +} diff --git a/libraries/libmesosphere/source/kern_debug_log_impl.board.nintendo_nx.cpp b/libraries/libmesosphere/source/kern_debug_log_impl.board.nintendo_nx.cpp new file mode 100644 index 000000000..e90cbc70f --- /dev/null +++ b/libraries/libmesosphere/source/kern_debug_log_impl.board.nintendo_nx.cpp @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "kern_debug_log_impl.hpp" + +namespace ams::kern { + + namespace { + + enum UartRegister { + UartRegister_THR = 0, + UartRegister_IER = 1, + UartRegister_FCR = 2, + UartRegister_LCR = 3, + + UartRegister_LSR = 5, + + UartRegister_IRSA_CSR = 8, + + UartRegister_DLL = 0, + UartRegister_DLH = 1, + }; + + KVirtualAddress g_uart_address = 0; + + NOINLINE u32 ReadUartRegister(UartRegister which) { + return GetPointer(g_uart_address)[which]; + } + + NOINLINE void WriteUartRegister(UartRegister which, u32 value) { + GetPointer(g_uart_address)[which] = value; + } + + } + + bool KDebugLogImpl::Initialize() { + /* Set the uart register base address. */ + g_uart_address = KMemoryLayout::GetUartAddress(); + + /* Parameters for uart. */ + constexpr u32 BaudRate = 115200; + constexpr u32 Pllp = 408000000; + constexpr u32 Rate = 16 * BaudRate; + constexpr u32 Divisor = (Pllp + Rate / 2) / Rate; + + /* Initialize the UART registers. */ + /* Set Divisor Latch Access bit, to allow access to DLL/DLH */ + WriteUartRegister(UartRegister_LCR, 0x80); + ReadUartRegister(UartRegister_LCR); + + /* Program the divisor into DLL/DLH. */ + WriteUartRegister(UartRegister_DLL, Divisor & 0xFF); + WriteUartRegister(UartRegister_DLH, (Divisor >> 8) & 0xFF); + ReadUartRegister(UartRegister_DLH); + + /* Set word length to 3, clear Divisor Latch Access. */ + WriteUartRegister(UartRegister_LCR, 0x03); + ReadUartRegister(UartRegister_LCR); + + /* Disable UART interrupts. */ + WriteUartRegister(UartRegister_IER, 0x00); + + /* Configure the FIFO to be enabled and clear receive. */ + WriteUartRegister(UartRegister_FCR, 0x03); + WriteUartRegister(UartRegister_IRSA_CSR, 0x02); + ReadUartRegister(UartRegister_FCR); + + return true; + } + + void KDebugLogImpl::PutChar(char c) { + while (ReadUartRegister(UartRegister_LSR) & 0x100) { + /* While the FIFO is full, yield. */ + __asm__ __volatile__("yield" ::: "memory"); + } + WriteUartRegister(UartRegister_THR, c); + cpu::DataSynchronizationBarrier(); + } + + void KDebugLogImpl::Flush() { + while ((ReadUartRegister(UartRegister_LSR) & 0x40) == 0) { + /* Wait for the TMTY bit to be one (transmit empty). */ + } + } + +} diff --git a/libraries/libmesosphere/source/kern_debug_log_impl.hpp b/libraries/libmesosphere/source/kern_debug_log_impl.hpp new file mode 100644 index 000000000..1a278a03c --- /dev/null +++ b/libraries/libmesosphere/source/kern_debug_log_impl.hpp @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KDebugLogImpl { + public: + static NOINLINE bool Initialize(); + static NOINLINE void PutChar(char c); + static NOINLINE void Flush(); + }; + +} diff --git a/libraries/libmesosphere/source/kern_initial_process.cpp b/libraries/libmesosphere/source/kern_initial_process.cpp new file mode 100644 index 000000000..598863894 --- /dev/null +++ b/libraries/libmesosphere/source/kern_initial_process.cpp @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + struct InitialProcessInfo { + KProcess *process; + size_t stack_size; + s32 priority; + }; + + KVirtualAddress GetInitialProcessBinaryAddress() { + return KMemoryLayout::GetPageTableHeapRegion().GetEndAddress() - InitialProcessBinarySizeMax; + } + + void LoadInitialProcessBinaryHeader(InitialProcessBinaryHeader *header) { + if (header->magic != InitialProcessBinaryMagic) { + *header = *GetPointer(GetInitialProcessBinaryAddress()); + } + + MESOSPHERE_ABORT_UNLESS(header->magic == InitialProcessBinaryMagic); + MESOSPHERE_ABORT_UNLESS(header->num_processes <= init::GetSlabResourceCounts().num_KProcess); + } + + void CreateProcesses(InitialProcessInfo *infos, KVirtualAddress binary_address, const InitialProcessBinaryHeader &header) { + u8 *current = GetPointer(binary_address + sizeof(InitialProcessBinaryHeader)); + const u8 * const end = GetPointer(binary_address + header.size - sizeof(KInitialProcessHeader)); + + const size_t num_processes = header.num_processes; + for (size_t i = 0; i < num_processes; i++) { + /* Validate that we can read the current KIP. */ + MESOSPHERE_ABORT_UNLESS(current <= end); + KInitialProcessReader reader; + MESOSPHERE_ABORT_UNLESS(reader.Attach(current)); + + /* Parse process parameters and reserve memory. */ + ams::svc::CreateProcessParameter params; + MESOSPHERE_R_ABORT_UNLESS(reader.MakeCreateProcessParameter(std::addressof(params), true)); + MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, params.code_num_pages * PageSize)); + + /* Create the process. */ + KProcess *new_process = nullptr; + { + /* Declare page group to use for process memory. */ + KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager())); + + /* Allocate memory for the process. */ + auto &mm = Kernel::GetMemoryManager(); + const auto pool = static_cast(reader.UsesSecureMemory() ? KMemoryManager::Pool_System : KSystemControl::GetInitialProcessBinaryPool()); + MESOSPHERE_R_ABORT_UNLESS(mm.Allocate(std::addressof(pg), params.code_num_pages, KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront))); + + { + /* Ensure that we do not leak pages. */ + KScopedPageGroup spg(pg); + + /* Map the process's memory into the temporary region. */ + const auto &temp_region = KMemoryLayout::GetTempRegion(); + KProcessAddress temp_address = Null; + MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().MapPageGroup(std::addressof(temp_address), pg, temp_region.GetAddress(), temp_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite)); + + /* Load the process. */ + MESOSPHERE_R_ABORT_UNLESS(reader.Load(temp_address, params)); + + /* Unmap the temporary mapping. */ + MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPageGroup(temp_address, pg, KMemoryState_Kernel)); + + /* Create a KProcess object. */ + new_process = KProcess::Create(); + MESOSPHERE_ABORT_UNLESS(new_process != nullptr); + + /* Initialize the process. */ + MESOSPHERE_R_ABORT_UNLESS(new_process->Initialize(params, pg, reader.GetCapabilities(), reader.GetNumCapabilities(), std::addressof(Kernel::GetSystemResourceLimit()), pool)); + } + } + + /* Set the process's memory permissions. */ + MESOSPHERE_R_ABORT_UNLESS(reader.SetMemoryPermissions(new_process->GetPageTable(), params)); + + /* Register the process. */ + KProcess::Register(new_process); + + /* Set the ideal core id. */ + new_process->SetIdealCoreId(reader.GetIdealCoreId()); + + /* Save the process info. */ + infos[i].process = new_process; + infos[i].stack_size = reader.GetStackSize(); + infos[i].priority = reader.GetPriority(); + + /* Advance the reader. */ + current += reader.GetBinarySize(); + } + } + + KVirtualAddress g_initial_process_binary_address; + InitialProcessBinaryHeader g_initial_process_binary_header; + u64 g_initial_process_id_min = std::numeric_limits::max(); + u64 g_initial_process_id_max = std::numeric_limits::min(); + + } + + u64 GetInitialProcessIdMin() { + return g_initial_process_id_min; + } + + u64 GetInitialProcessIdMax() { + return g_initial_process_id_max; + } + + void CopyInitialProcessBinaryToKernelMemory() { + LoadInitialProcessBinaryHeader(&g_initial_process_binary_header); + + if (g_initial_process_binary_header.num_processes > 0) { + /* Reserve pages for the initial process binary from the system resource limit. */ + auto &mm = Kernel::GetMemoryManager(); + const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize); + const size_t num_pages = total_size / PageSize; + MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, total_size)); + + /* Allocate memory for the image. */ + const KMemoryManager::Pool pool = static_cast(KSystemControl::GetInitialProcessBinaryPool()); + const auto allocate_option = KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront); + KVirtualAddress allocated_memory = mm.AllocateContinuous(num_pages, 1, allocate_option); + MESOSPHERE_ABORT_UNLESS(allocated_memory != Null); + mm.Open(allocated_memory, num_pages); + + /* Relocate the image. */ + std::memmove(GetVoidPointer(allocated_memory), GetVoidPointer(GetInitialProcessBinaryAddress()), g_initial_process_binary_header.size); + std::memset(GetVoidPointer(GetInitialProcessBinaryAddress()), 0, g_initial_process_binary_header.size); + g_initial_process_binary_address = allocated_memory; + } + } + + void CreateAndRunInitialProcesses() { + /* Allocate space for the processes. */ + InitialProcessInfo *infos = static_cast(__builtin_alloca(sizeof(InitialProcessInfo) * g_initial_process_binary_header.num_processes)); + + /* Create the processes. */ + CreateProcesses(infos, g_initial_process_binary_address, g_initial_process_binary_header); + + /* Release the memory used by the image. */ + { + const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize); + const size_t num_pages = total_size / PageSize; + Kernel::GetMemoryManager().Close(g_initial_process_binary_address, num_pages); + Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, total_size); + } + + /* Determine the initial process id range. */ + for (size_t i = 0; i < g_initial_process_binary_header.num_processes; i++) { + const auto pid = infos[i].process->GetId(); + g_initial_process_id_min = std::min(g_initial_process_id_min, pid); + g_initial_process_id_max = std::max(g_initial_process_id_max, pid); + } + + /* Run the processes. */ + for (size_t i = 0; i < g_initial_process_binary_header.num_processes; i++) { + MESOSPHERE_R_ABORT_UNLESS(infos[i].process->Run(infos[i].priority, infos[i].stack_size)); + } + } + +} diff --git a/libraries/libmesosphere/source/kern_k_address_space_info.cpp b/libraries/libmesosphere/source/kern_k_address_space_info.cpp new file mode 100644 index 000000000..e8c04ec91 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_address_space_info.cpp @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + constexpr uintptr_t Invalid = std::numeric_limits::max(); + + constexpr KAddressSpaceInfo AddressSpaceInfos[] = { + { .bit_width = 32, .address = 2_MB, .size = 1_GB - 2_MB, KAddressSpaceInfo::Type_32Bit, }, + { .bit_width = 32, .address = 1_GB, .size = 4_GB - 1_GB, KAddressSpaceInfo::Type_Small64Bit, }, + { .bit_width = 32, .address = Invalid, .size = 1_GB, KAddressSpaceInfo::Type_Heap, }, + { .bit_width = 32, .address = Invalid, .size = 1_GB, KAddressSpaceInfo::Type_Alias, }, + { .bit_width = 36, .address = 128_MB, .size = 2_GB - 128_MB, KAddressSpaceInfo::Type_32Bit, }, + { .bit_width = 36, .address = 2_GB, .size = 64_GB - 2_GB, KAddressSpaceInfo::Type_Small64Bit, }, + { .bit_width = 36, .address = Invalid, .size = 6_GB, KAddressSpaceInfo::Type_Heap, }, + { .bit_width = 36, .address = Invalid, .size = 6_GB, KAddressSpaceInfo::Type_Alias, }, + { .bit_width = 39, .address = 128_MB, .size = 512_GB - 128_MB, KAddressSpaceInfo::Type_Large64Bit, }, + { .bit_width = 39, .address = Invalid, .size = 64_GB, KAddressSpaceInfo::Type_32Bit, }, + { .bit_width = 39, .address = Invalid, .size = 6_GB, KAddressSpaceInfo::Type_Heap, }, + { .bit_width = 39, .address = Invalid, .size = 64_GB, KAddressSpaceInfo::Type_Alias, }, + { .bit_width = 39, .address = Invalid, .size = 2_GB, KAddressSpaceInfo::Type_Stack, }, + }; + + constexpr bool IsAllowedIndexForAddress(size_t index) { + return index < util::size(AddressSpaceInfos) && AddressSpaceInfos[index].GetAddress() != Invalid; + } + + constexpr size_t AddressSpaceIndices32Bit[KAddressSpaceInfo::Type_Count] = { + 0, 1, 0, 2, 0, 3, + }; + + constexpr size_t AddressSpaceIndices36Bit[KAddressSpaceInfo::Type_Count] = { + 4, 5, 4, 6, 4, 7, + }; + + constexpr size_t AddressSpaceIndices39Bit[KAddressSpaceInfo::Type_Count] = { + 9, 8, 8, 10, 12, 11, + }; + + constexpr bool IsAllowed32BitType(KAddressSpaceInfo::Type type) { + return type < KAddressSpaceInfo::Type_Count && type != KAddressSpaceInfo::Type_Large64Bit && type != KAddressSpaceInfo::Type_Stack; + } + + constexpr bool IsAllowed36BitType(KAddressSpaceInfo::Type type) { + return type < KAddressSpaceInfo::Type_Count && type != KAddressSpaceInfo::Type_Large64Bit && type != KAddressSpaceInfo::Type_Stack; + } + + constexpr bool IsAllowed39BitType(KAddressSpaceInfo::Type type) { + return type < KAddressSpaceInfo::Type_Count && type != KAddressSpaceInfo::Type_Small64Bit; + } + + } + + uintptr_t KAddressSpaceInfo::GetAddressSpaceStart(size_t width, KAddressSpaceInfo::Type type) { + switch (width) { + case 32: + MESOSPHERE_ABORT_UNLESS(IsAllowed32BitType(type)); + MESOSPHERE_ABORT_UNLESS(IsAllowedIndexForAddress(AddressSpaceIndices32Bit[type])); + return AddressSpaceInfos[AddressSpaceIndices32Bit[type]].GetAddress(); + case 36: + MESOSPHERE_ABORT_UNLESS(IsAllowed36BitType(type)); + MESOSPHERE_ABORT_UNLESS(IsAllowedIndexForAddress(AddressSpaceIndices36Bit[type])); + return AddressSpaceInfos[AddressSpaceIndices36Bit[type]].GetAddress(); + case 39: + MESOSPHERE_ABORT_UNLESS(IsAllowed39BitType(type)); + MESOSPHERE_ABORT_UNLESS(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[type])); + return AddressSpaceInfos[AddressSpaceIndices39Bit[type]].GetAddress(); + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + + size_t KAddressSpaceInfo::GetAddressSpaceSize(size_t width, KAddressSpaceInfo::Type type) { + switch (width) { + case 32: + MESOSPHERE_ABORT_UNLESS(IsAllowed32BitType(type)); + return AddressSpaceInfos[AddressSpaceIndices32Bit[type]].GetSize(); + case 36: + MESOSPHERE_ABORT_UNLESS(IsAllowed36BitType(type)); + return AddressSpaceInfos[AddressSpaceIndices36Bit[type]].GetSize(); + case 39: + MESOSPHERE_ABORT_UNLESS(IsAllowed39BitType(type)); + return AddressSpaceInfos[AddressSpaceIndices39Bit[type]].GetSize(); + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + +} diff --git a/libraries/libmesosphere/source/kern_k_scoped_interrupt.cpp b/libraries/libmesosphere/source/kern_k_auto_object.cpp similarity index 60% rename from libraries/libmesosphere/source/kern_k_scoped_interrupt.cpp rename to libraries/libmesosphere/source/kern_k_auto_object.cpp index b342684db..5a023d5bb 100644 --- a/libraries/libmesosphere/source/kern_k_scoped_interrupt.cpp +++ b/libraries/libmesosphere/source/kern_k_auto_object.cpp @@ -17,20 +17,9 @@ namespace ams::kern { - WEAK_SYMBOL KScopedInterruptDisable::KScopedInterruptDisable() { - /* TODO: Disable interrupts. */ - } - - WEAK_SYMBOL KScopedInterruptDisable::~KScopedInterruptDisable() { - /* TODO: un-disable interrupts. */ - } - - WEAK_SYMBOL KScopedInterruptEnable::KScopedInterruptEnable() { - /* TODO: Enable interrupts. */ - } - - WEAK_SYMBOL KScopedInterruptEnable::~KScopedInterruptEnable() { - /* TODO: un-enable interrupts. */ + KAutoObject *KAutoObject::Create(KAutoObject *obj) { + obj->ref_count = 1; + return obj; } } diff --git a/libraries/libmesosphere/source/kern_k_auto_object_container.cpp b/libraries/libmesosphere/source/kern_k_auto_object_container.cpp new file mode 100644 index 000000000..aebbc8ec8 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_auto_object_container.cpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + + Result KAutoObjectWithListContainer::Register(KAutoObjectWithList *obj) { + MESOSPHERE_ASSERT_THIS(); + + KScopedLightLock lk(this->lock); + + this->object_list.insert(*obj); + + return ResultSuccess(); + } + + Result KAutoObjectWithListContainer::Unregister(KAutoObjectWithList *obj) { + MESOSPHERE_ASSERT_THIS(); + + KScopedLightLock lk(this->lock); + + this->object_list.erase(this->object_list.iterator_to(*obj)); + + return ams::svc::ResultNotFound(); + } + + size_t KAutoObjectWithListContainer::GetOwnedCount(KProcess *owner) { + MESOSPHERE_ASSERT_THIS(); + + KScopedLightLock lk(this->lock); + + size_t count = 0; + + for (auto &obj : this->object_list) { + if (obj.GetOwner() == owner) { + count++; + } + } + + return count; + } + +} diff --git a/libraries/libmesosphere/source/kern_k_capabilities.cpp b/libraries/libmesosphere/source/kern_k_capabilities.cpp new file mode 100644 index 000000000..5e9b36356 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_capabilities.cpp @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + Result KCapabilities::Initialize(const u32 *caps, s32 num_caps, KProcessPageTable *page_table) { + /* We're initializing an initial process. */ + /* Most fields have already been cleared by our constructor. */ + + /* Initial processes may run on all cores. */ + this->core_mask = (1ul << cpu::NumCores) - 1; + + /* Initial processes may use any user priority they like. */ + this->priority_mask = ~0xFul; + + /* TODO: Here, Nintendo sets the kernel version to (current kernel version). */ + /* How should we handle this? Not a MESOSPHERE_TODO because it's not critical. */ + + /* Parse the capabilities array. */ + return this->SetCapabilities(caps, num_caps, page_table); + } + + Result KCapabilities::SetCorePriorityCapability(const util::BitPack32 cap) { + /* We can't set core/priority if we've already set them. */ + R_UNLESS(this->core_mask == 0, svc::ResultInvalidArgument()); + R_UNLESS(this->priority_mask == 0, svc::ResultInvalidArgument()); + + /* Validate the core/priority. */ + const auto min_core = cap.Get(); + const auto max_core = cap.Get(); + const auto max_prio = cap.Get(); + const auto min_prio = cap.Get(); + + R_UNLESS(min_core <= max_core, svc::ResultInvalidCombination()); + R_UNLESS(min_prio <= max_prio, svc::ResultInvalidCombination()); + R_UNLESS(max_core < cpu::NumCores, svc::ResultInvalidCoreId()); + + MESOSPHERE_ASSERT(max_core < BITSIZEOF(u64)); + MESOSPHERE_ASSERT(max_prio < BITSIZEOF(u64)); + + /* Set core mask. */ + for (auto core_id = min_core; core_id <= max_core; core_id++) { + this->core_mask |= (1ul << core_id); + } + MESOSPHERE_ASSERT((this->core_mask & ((1ul << cpu::NumCores) - 1)) == this->core_mask); + + /* Set priority mask. */ + for (auto prio = min_prio; prio <= max_prio; prio++) { + this->priority_mask |= (1ul << prio); + } + + /* We must have some core/priority we can use. */ + R_UNLESS(this->core_mask != 0, svc::ResultInvalidArgument()); + R_UNLESS(this->priority_mask != 0, svc::ResultInvalidArgument()); + + return ResultSuccess(); + } + + Result KCapabilities::SetSyscallMaskCapability(const util::BitPack32 cap, u32 &set_svc) { + /* Validate the index. */ + const auto mask = cap.Get(); + const auto index = cap.Get(); + + const u32 index_flag = (1u << index); + R_UNLESS((set_svc & index_flag) == 0, svc::ResultInvalidCombination()); + set_svc |= index_flag; + + /* Set SVCs. */ + for (size_t i = 0; i < SyscallMask::Mask::Count; i++) { + const u32 svc_id = SyscallMask::Mask::Count * index + i; + if (mask & (1u << i)) { + R_UNLESS(this->SetSvcAllowed(svc_id), svc::ResultOutOfRange()); + } + } + + return ResultSuccess(); + } + + Result KCapabilities::MapRange(const util::BitPack32 cap, const util::BitPack32 size_cap, KProcessPageTable *page_table) { + /* Validate reserved bits are unused. */ + R_UNLESS(size_cap.Get() == 0, svc::ResultOutOfRange()); + + /* Get/validate address/size */ + const u64 phys_addr = cap.Get() * PageSize; + const size_t num_pages = size_cap.Get(); + const size_t size = num_pages * PageSize; + R_UNLESS(phys_addr == GetInteger(KPhysicalAddress(phys_addr)), svc::ResultInvalidAddress()); + R_UNLESS(num_pages != 0, svc::ResultInvalidSize()); + R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress()); + R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, svc::ResultInvalidAddress()); + + /* Do the mapping. */ + const KMemoryPermission perm = cap.Get() ? KMemoryPermission_UserRead : KMemoryPermission_UserReadWrite; + if (size_cap.Get()) { + return page_table->MapStatic(phys_addr, size, perm); + } else { + return page_table->MapIo(phys_addr, size, perm); + } + } + + Result KCapabilities::MapIoPage(const util::BitPack32 cap, KProcessPageTable *page_table) { + /* Get/validate address/size */ + const u64 phys_addr = cap.Get() * PageSize; + const size_t num_pages = 1; + const size_t size = num_pages * PageSize; + R_UNLESS(phys_addr == GetInteger(KPhysicalAddress(phys_addr)), svc::ResultInvalidAddress()); + R_UNLESS(num_pages != 0, svc::ResultInvalidSize()); + R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress()); + R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, svc::ResultInvalidAddress()); + + /* Do the mapping. */ + return page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite); + } + + Result KCapabilities::MapRegion(const util::BitPack32 cap, KProcessPageTable *page_table) { + /* Define the allowed memory regions. */ + constexpr KMemoryRegionType MemoryRegions[] = { + KMemoryRegionType_None, + KMemoryRegionType_KernelTraceBuffer, + KMemoryRegionType_OnMemoryBootImage, + KMemoryRegionType_DTB, + }; + + /* Extract regions/read only. */ + const RegionType types[3] = { cap.Get(), cap.Get(), cap.Get(), }; + const bool ro[3] = { cap.Get(), cap.Get(), cap.Get(), }; + + for (size_t i = 0; i < util::size(types); i++) { + const auto type = types[i]; + const auto perm = ro[i] ? KMemoryPermission_UserRead : KMemoryPermission_UserReadWrite; + switch (type) { + case RegionType::None: + break; + case RegionType::KernelTraceBuffer: + case RegionType::OnMemoryBootImage: + case RegionType::DTB: + R_TRY(page_table->MapRegion(MemoryRegions[static_cast(type)], perm)); + default: + return svc::ResultNotFound(); + } + } + + return ResultSuccess(); + } + + Result KCapabilities::SetInterruptPairCapability(const util::BitPack32 cap) { + /* Extract interrupts. */ + const u32 ids[2] = { cap.Get(), cap.Get(), }; + + for (size_t i = 0; i < util::size(ids); i++) { + if (ids[i] != PaddingInterruptId) { + R_UNLESS(Kernel::GetInterruptManager().IsInterruptDefined(ids[i]), svc::ResultOutOfRange()); + R_UNLESS(this->SetInterruptAllowed(ids[i]), svc::ResultOutOfRange()); + } + } + + return ResultSuccess(); + } + + Result KCapabilities::SetProgramTypeCapability(const util::BitPack32 cap) { + /* Validate. */ + R_UNLESS(cap.Get() == 0, svc::ResultReservedUsed()); + + this->program_type = cap.Get(); + return ResultSuccess(); + } + + Result KCapabilities::SetKernelVersionCapability(const util::BitPack32 cap) { + /* Ensure we haven't set our version before. */ + R_UNLESS(this->intended_kernel_version.Get() == 0, svc::ResultInvalidArgument()); + + /* Set, ensure that we set a valid version. */ + this->intended_kernel_version = cap; + R_UNLESS(this->intended_kernel_version.Get() != 0, svc::ResultInvalidArgument()); + + return ResultSuccess(); + } + + Result KCapabilities::SetHandleTableCapability(const util::BitPack32 cap) { + /* Validate. */ + R_UNLESS(cap.Get() == 0, svc::ResultReservedUsed()); + + this->handle_table_size = cap.Get(); + return ResultSuccess(); + } + + Result KCapabilities::SetDebugFlagsCapability(const util::BitPack32 cap) { + /* Validate. */ + R_UNLESS(cap.Get() == 0, svc::ResultReservedUsed()); + + this->debug_capabilities.Set(cap.Get()); + this->debug_capabilities.Set(cap.Get()); + return ResultSuccess(); + } + + Result KCapabilities::SetCapability(const util::BitPack32 cap, u32 &set_flags, u32 &set_svc, KProcessPageTable *page_table) { + /* Validate this is a capability we can act on. */ + const auto type = GetCapabilityType(cap); + R_UNLESS(type != CapabilityType::Invalid, svc::ResultInvalidArgument()); + R_UNLESS(type != CapabilityType::Padding, ResultSuccess()); + + /* Check that we haven't already processed this capability. */ + const auto flag = GetCapabilityFlag(type); + R_UNLESS(((set_flags & InitializeOnceFlags) & flag) == 0, svc::ResultInvalidCombination()); + set_flags |= flag; + + /* Process the capability. */ + switch (type) { + case CapabilityType::CorePriority: return this->SetCorePriorityCapability(cap); + case CapabilityType::SyscallMask: return this->SetSyscallMaskCapability(cap, set_svc); + case CapabilityType::MapIoPage: return this->MapIoPage(cap, page_table); + case CapabilityType::MapRegion: return this->MapRegion(cap, page_table); + case CapabilityType::InterruptPair: return this->SetInterruptPairCapability(cap); + case CapabilityType::ProgramType: return this->SetProgramTypeCapability(cap); + case CapabilityType::KernelVersion: return this->SetKernelVersionCapability(cap); + case CapabilityType::HandleTable: return this->SetHandleTableCapability(cap); + case CapabilityType::DebugFlags: return this->SetDebugFlagsCapability(cap); + default: return svc::ResultInvalidArgument(); + } + } + + Result KCapabilities::SetCapabilities(const u32 *caps, s32 num_caps, KProcessPageTable *page_table) { + u32 set_flags = 0, set_svc = 0; + + for (s32 i = 0; i < num_caps; i++) { + const util::BitPack32 cap = { caps[i] }; + if (GetCapabilityType(cap) == CapabilityType::MapRange) { + /* Check that the pair cap exists. */ + R_UNLESS((++i) < num_caps, svc::ResultInvalidCombination()); + + /* Check the pair cap is a map range cap. */ + const util::BitPack32 size_cap = { caps[i] }; + R_UNLESS(GetCapabilityType(size_cap) == CapabilityType::MapRange, svc::ResultInvalidCombination()); + + /* Map the range. */ + R_TRY(this->MapRange(cap, size_cap, page_table)); + } else { + R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table)); + } + } + + return ResultSuccess(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_class_token.cpp b/libraries/libmesosphere/source/kern_k_class_token.cpp new file mode 100644 index 000000000..2531d9f45 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_class_token.cpp @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + /* Ensure that we generate correct class tokens for all types. */ + + /* Ensure that the absolute token values are correct. */ + static_assert(ClassToken == 0b00000000'00000000); + static_assert(ClassToken == 0b00000000'00000001); + static_assert(ClassToken == 0b00000000'00000011); + static_assert(ClassToken == 0b00000111'00000011); + static_assert(ClassToken == 0b00001011'00000001); + static_assert(ClassToken == 0b00010011'00000001); + /* TODO: static_assert(ClassToken == 0b00100011'00000001); */ + /* TODO: static_assert(ClassToken == 0b01000011'00000001); */ + /* TODO: static_assert(ClassToken == 0b10000011'00000001); */ + /* TODO: static_assert(ClassToken == 0b00001101'00000000); */ + static_assert(ClassToken == 0b00010101'00000001); + static_assert(ClassToken == 0b00100101'00000000); + static_assert(ClassToken == 0b01000101'00000000); + static_assert(ClassToken == 0b10000101'00000000); + static_assert(ClassToken == 0b00011001'00000000); + static_assert(ClassToken == 0b00101001'00000000); + static_assert(ClassToken == 0b01001001'00000000); + /* TODO: static_assert(ClassToken == 0b10001001'00000000); */ + /* TODO: static_assert(ClassToken == 0b00110001'00000000); */ + /* TODO: static_assert(ClassToken == 0b01010001'00000000); */ + static_assert(ClassToken == 0b10010001'00000000); + static_assert(ClassToken == 0b01100001'00000000); + static_assert(ClassToken == 0b10100001'00000000); + static_assert(ClassToken == 0b11000001'00000000); + + /* Ensure that the token hierarchy is correct. */ + + /* Base classes */ + static_assert(ClassToken == (0b00000000)); + static_assert(ClassToken == (0b00000001 | ClassToken)); + static_assert(ClassToken == (0b00000010 | ClassToken)); + + /* Final classes */ + static_assert(ClassToken == ((0b00000111 << 8) | ClassToken)); + static_assert(ClassToken == ((0b00001011 << 8) | ClassToken)); + static_assert(ClassToken == ((0b00010011 << 8) | ClassToken)); + /* TODO: static_assert(ClassToken == ((0b00100011 << 8) | ClassToken)); */ + /* TODO: static_assert(ClassToken == ((0b01000011 << 8) | ClassToken)); */ + /* TODO: static_assert(ClassToken == ((0b10000011 << 8) | ClassToken)); */ + /* TODO: static_assert(ClassToken == ((0b00001101 << 8) | ClassToken)); */ + static_assert(ClassToken == ((0b00010101 << 8) | ClassToken)); + static_assert(ClassToken == ((0b00100101 << 8) | ClassToken)); + static_assert(ClassToken == ((0b01000101 << 8) | ClassToken)); + static_assert(ClassToken == ((0b10000101 << 8) | ClassToken)); + static_assert(ClassToken == ((0b00011001 << 8) | ClassToken)); + static_assert(ClassToken == ((0b00101001 << 8) | ClassToken)); + static_assert(ClassToken == ((0b01001001 << 8) | ClassToken)); + /* TODO: static_assert(ClassToken == ((0b10001001 << 8) | ClassToken)); */ + /* TODO: static_assert(ClassToken == ((0b00110001 << 8) | ClassToken)); */ + /* TODO: static_assert(ClassToken == ((0b01010001 << 8) | ClassToken)); */ + static_assert(ClassToken == ((0b10010001 << 8) | ClassToken)); + static_assert(ClassToken == ((0b01100001 << 8) | ClassToken)); + static_assert(ClassToken == ((0b10100001 << 8) | ClassToken)); + static_assert(ClassToken == ((0b11000001 << 8) | ClassToken)); + + /* Ensure that the token hierarchy reflects the class hierarchy. */ + + /* Base classes. */ + static_assert(!std::is_final::value && std::is_base_of::value); + static_assert(!std::is_final::value && std::is_base_of::value); + + /* Final classes */ + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ + /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ + /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ + /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ + /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ + /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + + +} diff --git a/mesosphere/kernel_ldr/source/kern_k_scoped_interrupt.cpp b/libraries/libmesosphere/source/kern_k_device_address_space.cpp similarity index 53% rename from mesosphere/kernel_ldr/source/kern_k_scoped_interrupt.cpp rename to libraries/libmesosphere/source/kern_k_device_address_space.cpp index 00ef6f718..ffe27b765 100644 --- a/mesosphere/kernel_ldr/source/kern_k_scoped_interrupt.cpp +++ b/libraries/libmesosphere/source/kern_k_device_address_space.cpp @@ -17,20 +17,9 @@ namespace ams::kern { - inline KScopedInterruptDisable::KScopedInterruptDisable() { - /* Intentionally do nothing, KernelLdr doesn't have interrupts set up. */ - } - - inline KScopedInterruptDisable::~KScopedInterruptDisable() { - /* Intentionally do nothing, KernelLdr doesn't have interrupts set up. */ - } - - inline KScopedInterruptEnable::KScopedInterruptEnable() { - /* Intentionally do nothing, KernelLdr doesn't have interrupts set up. */ - } - - inline KScopedInterruptEnable::~KScopedInterruptEnable() { - /* Intentionally do nothing, KernelLdr doesn't have interrupts set up. */ + void KDeviceAddressSpace::Initialize() { + /* This just forwards to the device page table manager. */ + KDevicePageTable::Initialize(); } } diff --git a/libraries/libmesosphere/source/kern_k_dpc_manager.cpp b/libraries/libmesosphere/source/kern_k_dpc_manager.cpp new file mode 100644 index 000000000..bef4397aa --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_dpc_manager.cpp @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + class KDpcTask { + private: + static inline KLightLock s_lock; + static inline KLightConditionVariable s_cond_var; + static inline u64 s_core_mask; + static inline KDpcTask *s_task; + private: + static bool HasRequest(s32 core_id) { + return (s_core_mask & (1ull << core_id)) != 0; + } + + static void SetRequest(s32 core_id) { + s_core_mask |= (1ull << core_id); + } + + static void ClearRequest(s32 core_id) { + s_core_mask &= ~(1ull << core_id); + } + public: + virtual void DoTask() { /* ... */ } + + static void WaitForRequest() { + /* Wait for a request to come in. */ + const auto core_id = GetCurrentCoreId(); + KScopedLightLock lk(s_lock); + while (!HasRequest(core_id)) { + s_cond_var.Wait(&s_lock, -1ll); + } + } + + static bool TimedWaitForRequest(s64 timeout) { + /* Wait for a request to come in. */ + const auto core_id = GetCurrentCoreId(); + KScopedLightLock lk(s_lock); + while (!HasRequest(core_id)) { + s_cond_var.Wait(&s_lock, timeout); + if (KHardwareTimer::GetTick() >= timeout) { + return false; + } + } + return true; + } + + static void HandleRequest() { + /* Perform the request. */ + s_task->DoTask(); + + /* Clear the request. */ + const auto core_id = GetCurrentCoreId(); + KScopedLightLock lk(s_lock); + ClearRequest(core_id); + if (s_core_mask == 0) { + s_cond_var.Broadcast(); + } + } + + }; + + /* Convenience definitions. */ + constexpr s32 DpcManagerThreadPriority = 3; + constexpr s64 DpcManagerTimeout = ams::svc::Tick(TimeSpan::FromMilliSeconds(10)); + + /* Globals. */ + s64 g_preemption_priorities[cpu::NumCores]; + + /* Manager thread functions. */ + void DpcManagerNormalThreadFunction(uintptr_t arg) { + while (true) { + KDpcTask::WaitForRequest(); + KDpcTask::HandleRequest(); + } + } + + void DpcManagerPreemptionThreadFunction(uintptr_t arg) { + s64 timeout = KHardwareTimer::GetTick() + DpcManagerTimeout; + while (true) { + if (KDpcTask::TimedWaitForRequest(timeout)) { + KDpcTask::HandleRequest(); + } else { + /* Rotate the scheduler queue for each core. */ + KScopedSchedulerLock lk; + + for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) { + if (const s32 priority = g_preemption_priorities[core_id]; priority > DpcManagerThreadPriority) { + KScheduler::RotateScheduledQueue(static_cast(core_id), priority); + } + } + + /* Update our next timeout. */ + timeout = KHardwareTimer::GetTick() + DpcManagerTimeout; + } + } + } + + } + + void KDpcManager::Initialize(s32 core_id, s32 priority) { + /* Reserve a thread from the system limit. */ + MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1)); + + /* Create a new thread. */ + KThread *new_thread = KThread::Create(); + MESOSPHERE_ABORT_UNLESS(new_thread != nullptr); + + /* Launch the new thread. */ + g_preemption_priorities[core_id] = priority; + if (core_id == cpu::NumCores - 1) { + MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, DpcManagerNormalThreadFunction, 0, DpcManagerThreadPriority, core_id)); + } else { + MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, DpcManagerPreemptionThreadFunction, 0, DpcManagerThreadPriority, core_id)); + } + + /* Register the new thread. */ + KThread::Register(new_thread); + + /* Run the thread. */ + new_thread->Run(); + } + + void KDpcManager::HandleDpc() { + /* The only deferred procedure supported by Horizon is thread termination. */ + /* Check if we need to terminate the current thread. */ + KThread *cur_thread = GetCurrentThreadPointer(); + if (cur_thread->IsTerminationRequested()) { + KScopedInterruptEnable ei; + cur_thread->Exit(); + } + } + +} diff --git a/libraries/libmesosphere/source/kern_k_handle_table.cpp b/libraries/libmesosphere/source/kern_k_handle_table.cpp new file mode 100644 index 000000000..ca71003a8 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_handle_table.cpp @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + Result KHandleTable::Finalize() { + MESOSPHERE_ASSERT_THIS(); + + /* Get the table and clear our record of it. */ + Entry *saved_table = nullptr; + u16 saved_table_size = 0; + { + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + std::swap(this->table, saved_table); + std::swap(this->table_size, saved_table_size); + } + + /* Close and free all entries. */ + for (size_t i = 0; i < saved_table_size; i++) { + Entry *entry = std::addressof(saved_table[i]); + + if (KAutoObject *obj = entry->GetObject(); obj != nullptr) { + obj->Close(); + this->FreeEntry(entry); + } + } + + return ResultSuccess(); + } + + bool KHandleTable::Remove(ams::svc::Handle handle) { + MESOSPHERE_ASSERT_THIS(); + + /* Don't allow removal of a pseudo-handle. */ + if (ams::svc::IsPseudoHandle(handle)) { + return false; + } + + /* Handles must not have reserved bits set. */ + if (GetHandleBitPack(handle).Get() != 0) { + return false; + } + + /* Find the object and free the entry. */ + KAutoObject *obj = nullptr; + { + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + if (Entry *entry = this->FindEntry(handle); entry != nullptr) { + obj = entry->GetObject(); + this->FreeEntry(entry); + } else { + return false; + } + } + + /* Close the object. */ + obj->Close(); + return true; + } + + Result KHandleTable::Add(ams::svc::Handle *out_handle, KAutoObject *obj, u16 type) { + MESOSPHERE_ASSERT_THIS(); + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + /* Never exceed our capacity. */ + R_UNLESS(this->count < this->table_size, svc::ResultOutOfHandles()); + + /* Allocate entry, set output handle. */ + { + const auto linear_id = this->AllocateLinearId(); + Entry *entry = this->AllocateEntry(); + entry->SetUsed(obj, linear_id, type); + obj->Open(); + *out_handle = EncodeHandle(this->GetEntryIndex(entry), linear_id); + } + + return ResultSuccess(); + } + + Result KHandleTable::Reserve(ams::svc::Handle *out_handle) { + MESOSPHERE_ASSERT_THIS(); + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + /* Never exceed our capacity. */ + R_UNLESS(this->count < this->table_size, svc::ResultOutOfHandles()); + + *out_handle = EncodeHandle(this->GetEntryIndex(this->AllocateEntry()), this->AllocateLinearId()); + return ResultSuccess(); + } + + void KHandleTable::Unreserve(ams::svc::Handle handle) { + MESOSPHERE_ASSERT_THIS(); + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + /* Unpack the handle. */ + const auto handle_pack = GetHandleBitPack(handle); + const auto index = handle_pack.Get(); + const auto linear_id = handle_pack.Get(); + const auto reserved = handle_pack.Get(); + MESOSPHERE_ASSERT(reserved == 0); + MESOSPHERE_ASSERT(linear_id != 0); + MESOSPHERE_ASSERT(index < this->table_size); + + /* Free the entry. */ + /* NOTE: This code does not check the linear id. */ + Entry *entry = std::addressof(this->table[index]); + MESOSPHERE_ASSERT(entry->GetObject() == nullptr); + + this->FreeEntry(entry); + } + + void KHandleTable::Register(ams::svc::Handle handle, KAutoObject *obj, u16 type) { + MESOSPHERE_ASSERT_THIS(); + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + /* Unpack the handle. */ + const auto handle_pack = GetHandleBitPack(handle); + const auto index = handle_pack.Get(); + const auto linear_id = handle_pack.Get(); + const auto reserved = handle_pack.Get(); + MESOSPHERE_ASSERT(reserved == 0); + MESOSPHERE_ASSERT(linear_id != 0); + MESOSPHERE_ASSERT(index < this->table_size); + + /* Set the entry. */ + Entry *entry = std::addressof(this->table[index]); + MESOSPHERE_ASSERT(entry->GetObject() == nullptr); + + entry->SetUsed(obj, linear_id, type); + obj->Open(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp new file mode 100644 index 000000000..f5278d0e1 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + struct BlzSegmentFlags { + using Offset = util::BitPack16::Field<0, 12, u32>; + using Size = util::BitPack16::Field; + }; + + NOINLINE void BlzUncompress(void *_end) { + /* Parse the footer, endian agnostic. */ + static_assert(sizeof(u32) == 4); + static_assert(sizeof(u16) == 2); + static_assert(sizeof(u8) == 1); + + u8 *end = static_cast(_end); + const u32 total_size = (end[-12] << 0) | (end[-11] << 8) | (end[-10] << 16) | (end[- 9] << 24); + const u32 footer_size = (end[- 8] << 0) | (end[- 7] << 8) | (end[- 6] << 16) | (end[- 5] << 24); + const u32 additional_size = (end[- 4] << 0) | (end[- 3] << 8) | (end[- 2] << 16) | (end[- 1] << 24); + + /* Prepare to decompress. */ + u8 *cmp_start = end - total_size; + u32 cmp_ofs = total_size - footer_size; + u32 out_ofs = total_size + additional_size; + + /* Decompress. */ + while (out_ofs) { + u8 control = cmp_start[--cmp_ofs]; + + /* Each bit in the control byte is a flag indicating compressed or not compressed. */ + for (size_t i = 0; i < 8 && out_ofs; ++i, control <<= 1) { + if (control & 0x80) { + /* NOTE: Nintendo does not check if it's possible to decompress. */ + /* As such, we will leave the following as a debug assertion, and not a release assertion. */ + MESOSPHERE_AUDIT(cmp_ofs >= sizeof(u16)); + cmp_ofs -= sizeof(u16); + + /* Extract segment bounds. */ + const util::BitPack16 seg_flags{static_cast((cmp_start[cmp_ofs] << 0) | (cmp_start[cmp_ofs + 1] << 8))}; + const u32 seg_ofs = seg_flags.Get() + 3; + const u32 seg_size = std::min(seg_flags.Get() + 3, out_ofs); + MESOSPHERE_AUDIT(out_ofs + seg_ofs <= total_size + additional_size); + + /* Copy the data. */ + out_ofs -= seg_size; + for (size_t j = 0; j < seg_size; j++) { + cmp_start[out_ofs + j] = cmp_start[out_ofs + seg_ofs + j]; + } + } else { + /* NOTE: Nintendo does not check if it's possible to copy. */ + /* As such, we will leave the following as a debug assertion, and not a release assertion. */ + MESOSPHERE_AUDIT(cmp_ofs >= sizeof(u8)); + cmp_start[--out_ofs] = cmp_start[--cmp_ofs]; + } + } + } + } + + } + + Result KInitialProcessReader::MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const { + /* Get and validate addresses/sizes. */ + const uintptr_t rx_address = this->kip_header->GetRxAddress(); + const size_t rx_size = this->kip_header->GetRxSize(); + const uintptr_t ro_address = this->kip_header->GetRoAddress(); + const size_t ro_size = this->kip_header->GetRoSize(); + const uintptr_t rw_address = this->kip_header->GetRwAddress(); + const size_t rw_size = this->kip_header->GetRwSize(); + const uintptr_t bss_address = this->kip_header->GetBssAddress(); + const size_t bss_size = this->kip_header->GetBssSize(); + R_UNLESS(util::IsAligned(rx_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(ro_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(rw_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(rx_address <= rx_address + util::AlignUp(rx_size, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(ro_address <= ro_address + util::AlignUp(ro_size, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(rw_address <= rw_address + util::AlignUp(rw_size, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(bss_address <= bss_address + util::AlignUp(bss_size, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(rx_address + util::AlignUp(rx_size, PageSize) <= ro_address, svc::ResultInvalidAddress()); + R_UNLESS(ro_address + util::AlignUp(ro_size, PageSize) <= rw_address, svc::ResultInvalidAddress()); + R_UNLESS(rw_address + rw_size <= bss_address, svc::ResultInvalidAddress()); + + /* Validate the address space. */ + if (this->Is64BitAddressSpace()) { + R_UNLESS(this->Is64Bit(), svc::ResultInvalidCombination()); + } + + using ASType = KAddressSpaceInfo::Type; + + const uintptr_t start_address = rx_address; + const uintptr_t end_address = bss_size > 0 ? bss_address + bss_size : rw_address + rw_size; + const size_t as_width = this->Is64BitAddressSpace() ? 39 : 32; + const ASType as_type = this->Is64BitAddressSpace() ? KAddressSpaceInfo::Type_Large64Bit : KAddressSpaceInfo::Type_32Bit; + const uintptr_t map_start = KAddressSpaceInfo::GetAddressSpaceStart(as_width, as_type); + const size_t map_size = KAddressSpaceInfo::GetAddressSpaceSize(as_width, as_type); + const uintptr_t map_end = map_start + map_size; + MESOSPHERE_ABORT_UNLESS(start_address == 0); + + /* Set fields in parameter. */ + out->code_address = map_start + start_address; + out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize; + out->program_id = this->kip_header->GetProgramId(); + out->version = this->kip_header->GetVersion(); + out->flags = 0; + MESOSPHERE_ABORT_UNLESS((out->code_address / PageSize) + out->code_num_pages <= (map_end / PageSize)); + + /* Copy name field. */ + this->kip_header->GetName(out->name, sizeof(out->name)); + + /* Apply ASLR, if needed. */ + if (enable_aslr) { + const size_t choices = (map_end / KernelAslrAlignment) - (util::AlignUp(out->code_address + out->code_num_pages * PageSize, KernelAslrAlignment) / KernelAslrAlignment); + out->code_address += KSystemControl::GenerateRandomRange(0, choices) * KernelAslrAlignment; + out->flags |= ams::svc::CreateProcessFlag_EnableAslr; + } + + /* Apply other flags. */ + if (this->Is64Bit()) { + out->flags |= ams::svc::CreateProcessFlag_Is64Bit; + } + if (this->Is64BitAddressSpace()) { + out->flags |= ams::svc::CreateProcessFlag_AddressSpace64Bit; + } else { + out->flags |= ams::svc::CreateProcessFlag_AddressSpace32Bit; + } + + return ResultSuccess(); + } + + Result KInitialProcessReader::Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const { + /* Clear memory at the address. */ + std::memset(GetVoidPointer(address), 0, params.code_num_pages * PageSize); + + /* Prepare to layout the data. */ + const KProcessAddress rx_address = address + this->kip_header->GetRxAddress(); + const KProcessAddress ro_address = address + this->kip_header->GetRoAddress(); + const KProcessAddress rw_address = address + this->kip_header->GetRwAddress(); + const u8 *rx_binary = reinterpret_cast(this->kip_header + 1); + const u8 *ro_binary = rx_binary + this->kip_header->GetRxCompressedSize(); + const u8 *rw_binary = ro_binary + this->kip_header->GetRoCompressedSize(); + + /* Copy text. */ + if (util::AlignUp(this->kip_header->GetRxSize(), PageSize)) { + std::memcpy(GetVoidPointer(rx_address), rx_binary, this->kip_header->GetRxCompressedSize()); + if (this->kip_header->IsRxCompressed()) { + BlzUncompress(GetVoidPointer(rx_address + this->kip_header->GetRxCompressedSize())); + } + } + + /* Copy rodata. */ + if (util::AlignUp(this->kip_header->GetRoSize(), PageSize)) { + std::memcpy(GetVoidPointer(ro_address), ro_binary, this->kip_header->GetRoCompressedSize()); + if (this->kip_header->IsRoCompressed()) { + BlzUncompress(GetVoidPointer(ro_address + this->kip_header->GetRoCompressedSize())); + } + } + + /* Copy rwdata. */ + if (util::AlignUp(this->kip_header->GetRwSize(), PageSize)) { + std::memcpy(GetVoidPointer(rw_address), rw_binary, this->kip_header->GetRwCompressedSize()); + if (this->kip_header->IsRwCompressed()) { + BlzUncompress(GetVoidPointer(rw_address + this->kip_header->GetRwCompressedSize())); + } + } + + /* Flush caches. */ + /* NOTE: official kernel does an entire cache flush by set/way here, which is incorrect as other cores are online. */ + /* We will simply flush by virtual address, since that's what ARM says is correct to do. */ + MESOSPHERE_R_ABORT_UNLESS(cpu::FlushDataCache(GetVoidPointer(address), params.code_num_pages * PageSize)); + cpu::InvalidateEntireInstructionCache(); + + return ResultSuccess(); + } + + Result KInitialProcessReader::SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const { + const size_t rx_size = this->kip_header->GetRxSize(); + const size_t ro_size = this->kip_header->GetRoSize(); + const size_t rw_size = this->kip_header->GetRwSize(); + const size_t bss_size = this->kip_header->GetBssSize(); + + /* Set R-X pages. */ + if (rx_size) { + const uintptr_t start = this->kip_header->GetRxAddress() + params.code_address; + R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(rx_size, PageSize), ams::svc::MemoryPermission_ReadExecute)); + } + + /* Set R-- pages. */ + if (ro_size) { + const uintptr_t start = this->kip_header->GetRoAddress() + params.code_address; + R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(ro_size, PageSize), ams::svc::MemoryPermission_Read)); + } + + /* Set RW- pages. */ + if (rw_size || bss_size) { + const uintptr_t start = (rw_size ? this->kip_header->GetRwAddress() : this->kip_header->GetBssAddress()) + params.code_address; + const uintptr_t end = (bss_size ? this->kip_header->GetBssAddress() + bss_size : this->kip_header->GetRwAddress() + rw_size) + params.code_address; + R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(end - start, PageSize), ams::svc::MemoryPermission_ReadWrite)); + } + + return ResultSuccess(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp b/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp new file mode 100644 index 000000000..83722e1b4 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KInterruptTaskManager::TaskQueue::Enqueue(KInterruptTask *task) { + MESOSPHERE_ASSERT(task->GetNextTask() == nullptr); + MESOSPHERE_ASSERT(task != this->head); + MESOSPHERE_ASSERT(task != this->tail); + + /* Insert the task into the queue. */ + if (this->tail != nullptr) { + this->tail->SetNextTask(task); + } else { + this->head = task; + } + + this->tail = task; + } + + void KInterruptTaskManager::TaskQueue::Dequeue() { + MESOSPHERE_ASSERT(this->head != nullptr); + MESOSPHERE_ASSERT(this->tail != nullptr); + + /* Pop the task from the front of the queue. */ + if (this->head == this->tail) { + this->head = nullptr; + this->tail = nullptr; + } else { + this->head = this->head->GetNextTask(); + } + } + + void KInterruptTaskManager::ThreadFunction(uintptr_t arg) { + reinterpret_cast(arg)->ThreadFunctionImpl(); + } + + void KInterruptTaskManager::ThreadFunctionImpl() { + MESOSPHERE_ASSERT_THIS(); + + while (true) { + /* Get a task. */ + KInterruptTask *task = nullptr; + { + KScopedInterruptDisable di; + + task = this->task_queue.GetHead(); + if (task == nullptr) { + this->thread->SetState(KThread::ThreadState_Waiting); + continue; + } + + this->task_queue.Dequeue(); + } + + /* Do the task. */ + task->DoTask(); + } + } + + void KInterruptTaskManager::Initialize() { + /* Reserve a thread from the system limit. */ + MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1)); + + /* Create and initialize the thread. */ + this->thread = KThread::Create(); + MESOSPHERE_ABORT_UNLESS(this->thread != nullptr); + MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeHighPriorityThread(this->thread, ThreadFunction, reinterpret_cast(this))); + KThread::Register(this->thread); + + /* Run the thread. */ + this->thread->Run(); + } + + void KInterruptTaskManager::EnqueueTask(KInterruptTask *task) { + MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled()); + + /* Enqueue the task and signal the scheduler. */ + this->task_queue.Enqueue(task); + Kernel::GetScheduler().SetInterruptTaskThreadRunnable(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_light_lock.cpp b/libraries/libmesosphere/source/kern_k_light_lock.cpp new file mode 100644 index 000000000..9f91d67bf --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_light_lock.cpp @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { + KThread *cur_thread = reinterpret_cast(_cur_thread); + + /* Pend the current thread waiting on the owner thread. */ + { + KScopedSchedulerLock sl; + + /* Ensure we actually have locking to do. */ + if (AMS_UNLIKELY(this->tag.load(std::memory_order_relaxed) != _owner)) { + return; + } + + /* Add the current thread as a waiter on the owner. */ + KThread *owner_thread = reinterpret_cast(_owner & ~1ul); + cur_thread->SetAddressKey(reinterpret_cast(std::addressof(this->tag))); + owner_thread->AddWaiter(cur_thread); + + /* Set thread states. */ + if (AMS_LIKELY(cur_thread->GetState() == KThread::ThreadState_Runnable)) { + cur_thread->SetState(KThread::ThreadState_Waiting); + } + if (owner_thread->IsSuspended()) { + owner_thread->ContinueIfHasKernelWaiters(); + } + } + + /* We're no longer waiting on the lock owner. */ + { + KScopedSchedulerLock sl; + KThread *owner_thread = cur_thread->GetLockOwner(); + if (AMS_UNLIKELY(owner_thread)) { + owner_thread->RemoveWaiter(cur_thread); + } + } + } + + void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { + KThread *owner_thread = reinterpret_cast(_cur_thread); + + /* Unlock. */ + { + KScopedSchedulerLock sl; + + /* Get the next owner. */ + s32 num_waiters = 0; + KThread *next_owner = owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast(std::addressof(this->tag))); + + /* Pass the lock to the next owner. */ + uintptr_t next_tag = 0; + if (next_owner) { + next_tag = reinterpret_cast(next_owner); + if (num_waiters > 1) { + next_tag |= 0x1; + } + + if (AMS_LIKELY(next_owner->GetState() == KThread::ThreadState_Waiting)) { + next_owner->SetState(KThread::ThreadState_Runnable); + } + } + + /* We may have unsuspended in the process of acquiring the lock, so we'll re-suspend now if so. */ + if (owner_thread->IsSuspended()) { + owner_thread->TrySuspend(); + } + + /* Write the new tag value. */ + this->tag.store(next_tag); + } + } + +} diff --git a/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp new file mode 100644 index 000000000..016247ba9 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + constexpr std::tuple MemoryStateNames[] = { + {KMemoryState_Free , "----- Free -----"}, + {KMemoryState_Io , "Io "}, + {KMemoryState_Static , "Static "}, + {KMemoryState_Code , "Code "}, + {KMemoryState_CodeData , "CodeData "}, + {KMemoryState_Normal , "Normal "}, + {KMemoryState_Shared , "Shared "}, + {KMemoryState_AliasCode , "AliasCode "}, + {KMemoryState_AliasCodeData , "AliasCodeData "}, + {KMemoryState_Ipc , "Ipc "}, + {KMemoryState_Stack , "Stack "}, + {KMemoryState_ThreadLocal , "ThreadLocal "}, + {KMemoryState_Transfered , "Transfered "}, + {KMemoryState_SharedTransfered , "SharedTransfered"}, + {KMemoryState_SharedCode , "SharedCode "}, + {KMemoryState_Inaccessible , "Inaccessible "}, + {KMemoryState_NonSecureIpc , "NonSecureIpc "}, + {KMemoryState_NonDeviceIpc , "NonDeviceIpc "}, + {KMemoryState_Kernel , "Kernel "}, + {KMemoryState_GeneratedCode , "GeneratedCode "}, + {KMemoryState_CodeOut , "CodeOut "}, + }; + + constexpr const char *GetMemoryStateName(KMemoryState state) { + for (size_t i = 0; i < util::size(MemoryStateNames); i++) { + if (std::get<0>(MemoryStateNames[i]) == state) { + return std::get<1>(MemoryStateNames[i]); + } + } + return "Unknown "; + } + + constexpr const char *GetMemoryPermissionString(const KMemoryInfo &info) { + if (info.state == KMemoryState_Free) { + return " "; + } else { + switch (info.perm) { + case KMemoryPermission_UserReadExecute: + return "r-x"; + case KMemoryPermission_UserRead: + return "r--"; + case KMemoryPermission_UserReadWrite: + return "rw-"; + default: + return "---"; + } + } + } + + void DumpMemoryInfo(const KMemoryInfo &info) { + const char *state = GetMemoryStateName(info.state); + const char *perm = GetMemoryPermissionString(info); + const void *start = reinterpret_cast(info.GetAddress()); + const void *end = reinterpret_cast(info.GetLastAddress()); + const size_t kb = info.GetSize() / 1_KB; + + const char l = (info.attribute & KMemoryAttribute_Locked) ? 'L' : '-'; + const char i = (info.attribute & KMemoryAttribute_IpcLocked) ? 'I' : '-'; + const char d = (info.attribute & KMemoryAttribute_DeviceShared) ? 'D' : '-'; + const char u = (info.attribute & KMemoryAttribute_Uncached) ? 'U' : '-'; + + MESOSPHERE_LOG("%p - %p (%9zu KB) %s %s %c%c%c%c [%d, %d]\n", start, end, kb, perm, state, l, i, d, u, info.ipc_lock_count, info.device_use_count); + } + + } + + Result KMemoryBlockManager::Initialize(KProcessAddress st, KProcessAddress nd, KMemoryBlockSlabManager *slab_manager) { + /* Allocate a block to encapsulate the address space, insert it into the tree. */ + KMemoryBlock *start_block = slab_manager->Allocate(); + R_UNLESS(start_block != nullptr, svc::ResultOutOfResource()); + + /* Set our start and end. */ + this->start_address = st; + this->end_address = nd; + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(this->start_address), PageSize)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(this->end_address), PageSize)); + + /* Initialize and insert the block. */ + start_block->Initialize(this->start_address, (this->end_address - this->start_address) / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None); + this->memory_block_tree.insert(*start_block); + + return ResultSuccess(); + } + + void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager *slab_manager) { + /* Erase every block until we have none left. */ + auto it = this->memory_block_tree.begin(); + while (it != this->memory_block_tree.end()) { + KMemoryBlock *block = std::addressof(*it); + it = this->memory_block_tree.erase(it); + slab_manager->Free(block); + } + + MESOSPHERE_ASSERT(this->memory_block_tree.empty()); + } + + KProcessAddress KMemoryBlockManager::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const { + if (num_pages > 0) { + const KProcessAddress region_end = region_start + region_num_pages * PageSize; + const KProcessAddress region_last = region_end - 1; + for (const_iterator it = this->FindIterator(region_start); it != this->memory_block_tree.cend(); it++) { + const KMemoryInfo info = it->GetMemoryInfo(); + if (region_last < info.GetAddress()) { + break; + } + if (info.state != KMemoryState_Free) { + continue; + } + + KProcessAddress area = (info.GetAddress() <= GetInteger(region_start)) ? region_start : info.GetAddress(); + area += guard_pages * PageSize; + + const KProcessAddress offset_area = util::AlignDown(GetInteger(area), alignment) + offset; + area = (area <= offset_area) ? offset_area : offset_area + alignment; + + const KProcessAddress area_end = area + num_pages * PageSize + guard_pages * PageSize; + const KProcessAddress area_last = area_end - 1; + + if (info.GetAddress() <= GetInteger(area) && area < area_last && area_last <= region_last && GetInteger(area_last) <= info.GetLastAddress()) { + return area; + } + } + } + + return Null; + } + + void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr) { + /* Ensure for auditing that we never end up with an invalid tree. */ + KScopedMemoryBlockManagerAuditor auditor(this); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize)); + MESOSPHERE_ASSERT((attr & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared)) == 0); + + KProcessAddress cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); + + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); + if (it->HasProperties(state, perm, attr)) { + /* If we already have the right properties, just advance. */ + if (cur_address + remaining_size < cur_info.GetEndAddress()) { + remaining_pages = 0; + cur_address += remaining_size; + } else { + remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; + cur_address = cur_info.GetEndAddress(); + } + } else { + /* If we need to, create a new block before and insert it. */ + if (cur_info.GetAddress() != GetInteger(cur_address)) { + KMemoryBlock *new_block = allocator->Allocate(); + + it->Split(new_block, cur_address); + it = this->memory_block_tree.insert(*new_block); + it++; + + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); + } + + /* If we need to, create a new block after and insert it. */ + if (cur_info.GetSize() > remaining_size) { + KMemoryBlock *new_block = allocator->Allocate(); + + it->Split(new_block, cur_address + remaining_size); + it = this->memory_block_tree.insert(*new_block); + + cur_info = it->GetMemoryInfo(); + } + + /* Update block state. */ + it->Update(state, perm, attr); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + } + } + + /* Find the iterator now that we've updated. */ + it = this->FindIterator(address); + if (address != this->start_address) { + it--; + } + + /* Coalesce blocks that we can. */ + while (true) { + iterator prev = it++; + if (it == this->memory_block_tree.end()) { + break; + } + + if (prev->HasSameProperties(*it)) { + KMemoryBlock *block = std::addressof(*it); + const size_t pages = it->GetNumPages(); + this->memory_block_tree.erase(it); + allocator->Free(block); + prev->Add(pages); + it = prev; + } + + if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) { + break; + } + } + } + + /* Debug. */ + bool KMemoryBlockManager::CheckState() const { + /* If we fail, we should dump blocks. */ + auto dump_guard = SCOPE_GUARD { this->DumpBlocks(); }; + + /* Loop over every block, ensuring that we are sorted and coalesced. */ + auto it = this->memory_block_tree.cbegin(); + auto prev = it++; + while (it != this->memory_block_tree.cend()) { + const KMemoryInfo prev_info = prev->GetMemoryInfo(); + const KMemoryInfo cur_info = it->GetMemoryInfo(); + + /* Sequential blocks with same properties should be coalesced. */ + if (prev->HasSameProperties(*it)) { + return false; + } + + /* Sequential blocks should be sequential. */ + if (prev_info.GetEndAddress() != cur_info.GetAddress()) { + return false; + } + + /* If the block is ipc locked, it must have a count. */ + if ((cur_info.attribute & KMemoryAttribute_IpcLocked) != 0 && cur_info.ipc_lock_count == 0) { + return false; + } + + /* If the block is device shared, it must have a count. */ + if ((cur_info.attribute & KMemoryAttribute_DeviceShared) != 0 && cur_info.device_use_count == 0) { + return false; + } + + /* Advance the iterator. */ + prev = it++; + } + + /* Our loop will miss checking the last block, potentially, so check it. */ + if (prev != this->memory_block_tree.cend()) { + const KMemoryInfo prev_info = prev->GetMemoryInfo(); + /* If the block is ipc locked, it must have a count. */ + if ((prev_info.attribute & KMemoryAttribute_IpcLocked) != 0 && prev_info.ipc_lock_count == 0) { + return false; + } + + /* If the block is device shared, it must have a count. */ + if ((prev_info.attribute & KMemoryAttribute_DeviceShared) != 0 && prev_info.device_use_count == 0) { + return false; + } + } + + /* We're valid, so no need to print. */ + dump_guard.Cancel(); + return true; + } + + void KMemoryBlockManager::DumpBlocks() const { + /* Dump each block. */ + for (const auto &block : this->memory_block_tree) { + DumpMemoryInfo(block.GetMemoryInfo()); + } + } +} diff --git a/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp b/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp new file mode 100644 index 000000000..d36c45cbc --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + constexpr uintptr_t DramPhysicalAddress = 0x80000000; + constexpr size_t ReservedEarlyDramSize = 0x60000; + + ALWAYS_INLINE bool SetupUartPhysicalMemoryRegion() { + #if defined(MESOSPHERE_DEBUG_LOG_USE_UART_A) + return KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x70006000, 0x40, KMemoryRegionType_Uart | KMemoryRegionAttr_ShouldKernelMap); + #elif defined(MESOSPHERE_DEBUG_LOG_USE_UART_B) + return KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x70006040, 0x40, KMemoryRegionType_Uart | KMemoryRegionAttr_ShouldKernelMap); + #elif defined(MESOSPHERE_DEBUG_LOG_USE_UART_C) + return KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x70006200, 0x100, KMemoryRegionType_Uart | KMemoryRegionAttr_ShouldKernelMap); + #elif defined(MESOSPHERE_DEBUG_LOG_USE_UART_D) + return KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x70006300, 0x100, KMemoryRegionType_Uart | KMemoryRegionAttr_ShouldKernelMap); + #else + #error "Unknown Debug UART device!" + #endif + } + + } + + namespace init { + + void SetupDevicePhysicalMemoryRegions() { + /* TODO: Give these constexpr defines somewhere? */ + MESOSPHERE_INIT_ABORT_UNLESS(SetupUartPhysicalMemoryRegion()); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x70019000, 0x1000, KMemoryRegionType_MemoryController | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x7001C000, 0x1000, KMemoryRegionType_MemoryController0 | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x7001D000, 0x1000, KMemoryRegionType_MemoryController1 | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x7000E000, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x7000E400, 0xC00, KMemoryRegionType_PowerManagementController | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x50040000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x50041000, 0x1000, KMemoryRegionType_InterruptDistributor | KMemoryRegionAttr_ShouldKernelMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x50042000, 0x1000, KMemoryRegionType_InterruptCpuInterface | KMemoryRegionAttr_ShouldKernelMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x50043000, 0x1D000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x6000F000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x6001DC00, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + } + + void SetupDramPhysicalMemoryRegions() { + const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize(); + const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress); + + /* Insert blocks into the tree. */ + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly)); + } + + } + +} \ No newline at end of file diff --git a/libraries/libmesosphere/source/kern_k_memory_layout.cpp b/libraries/libmesosphere/source/kern_k_memory_layout.cpp new file mode 100644 index 000000000..240ec475d --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_memory_layout.cpp @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + bool KMemoryRegionTree::Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) { + /* Locate the memory region that contains the address. */ + auto it = this->FindContainingRegion(address); + + /* We require that the old attr is correct. */ + if (it->GetAttributes() != old_attr) { + return false; + } + + /* We further require that the region can be split from the old region. */ + const uintptr_t inserted_region_end = address + size; + const uintptr_t inserted_region_last = inserted_region_end - 1; + if (it->GetLastAddress() < inserted_region_last) { + return false; + } + + /* Further, we require that the type id is a valid transformation. */ + if (!it->CanDerive(type_id)) { + return false; + } + + /* Cache information from the region before we remove it. */ + KMemoryRegion *cur_region = std::addressof(*it); + const uintptr_t old_address = it->GetAddress(); + const size_t old_size = it->GetSize(); + const uintptr_t old_end = old_address + old_size; + const uintptr_t old_last = old_end - 1; + const uintptr_t old_pair = it->GetPairAddress(); + const u32 old_type = it->GetType(); + + /* Erase the existing region from the tree. */ + this->erase(it); + + /* If we need to insert a region before the region, do so. */ + if (old_address != address) { + new (cur_region) KMemoryRegion(old_address, address - old_address, old_pair, old_attr, old_type); + this->insert(*cur_region); + cur_region = KMemoryLayout::GetMemoryRegionAllocator().Allocate(); + } + + /* Insert a new region. */ + const uintptr_t new_pair = (old_pair != std::numeric_limits::max()) ? old_pair + (address - old_address) : old_pair; + new (cur_region) KMemoryRegion(address, size, new_pair, new_attr, type_id); + this->insert(*cur_region); + + /* If we need to insert a region after the region, do so. */ + if (old_last != inserted_region_last) { + const uintptr_t after_pair = (old_pair != std::numeric_limits::max()) ? old_pair + (inserted_region_end - old_address) : old_pair; + this->insert(*KMemoryLayout::GetMemoryRegionAllocator().Create(inserted_region_end, old_end - inserted_region_end, after_pair, old_attr, old_type)); + } + + return true; + } + + KVirtualAddress KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) { + /* We want to find the total extents of the type id. */ + const auto extents = this->GetDerivedRegionExtents(type_id); + + /* Ensure that our alignment is correct. */ + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(extents.GetAddress(), alignment)); + + const uintptr_t first_address = extents.GetAddress(); + const uintptr_t last_address = extents.GetLastAddress(); + + while (true) { + const uintptr_t candidate = util::AlignDown(KSystemControl::Init::GenerateRandomRange(first_address, last_address), alignment); + + /* Ensure that the candidate doesn't overflow with the size. */ + if (!(candidate < candidate + size)) { + continue; + } + + const uintptr_t candidate_last = candidate + size - 1; + + /* Ensure that the candidate fits within the region. */ + if (candidate_last > last_address) { + continue; + } + + /* Locate the candidate region, and ensure it fits. */ + const KMemoryRegion *candidate_region = std::addressof(*this->FindContainingRegion(candidate)); + if (candidate_last > candidate_region->GetLastAddress()) { + continue; + } + + /* Ensure that the region has the correct type id. */ + if (candidate_region->GetType() != type_id) + continue; + + return candidate; + } + } + + void KMemoryLayout::InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start) { + /* Set static differences. */ + s_linear_phys_to_virt_diff = GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start); + s_linear_virt_to_phys_diff = GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start); + + /* Initialize linear trees. */ + for (auto ®ion : GetPhysicalMemoryRegionTree()) { + if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { + continue; + } + GetPhysicalLinearMemoryRegionTree().insert(*GetMemoryRegionAllocator().Create(region.GetAddress(), region.GetSize(), region.GetAttributes(), region.GetType())); + } + + for (auto ®ion : GetVirtualMemoryRegionTree()) { + if (!region.IsDerivedFrom(KMemoryRegionType_Dram)) { + continue; + } + GetVirtualLinearMemoryRegionTree().insert(*GetMemoryRegionAllocator().Create(region.GetAddress(), region.GetSize(), region.GetAttributes(), region.GetType())); + } + } + + namespace init { + + namespace { + + constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable); + + constexpr size_t CarveoutAlignment = 0x20000; + constexpr size_t CarveoutSizeMax = 512_MB - CarveoutAlignment; + + constexpr size_t CoreLocalRegionAlign = PageSize; + constexpr size_t CoreLocalRegionSize = PageSize * (1 + cpu::NumCores); + constexpr size_t CoreLocalRegionSizeWithGuards = CoreLocalRegionSize + 2 * PageSize; + constexpr size_t CoreLocalRegionBoundsAlign = 1_GB; + /* TODO: static_assert(CoreLocalRegionSize == sizeof(KCoreLocalRegion)); */ + + KVirtualAddress GetCoreLocalRegionVirtualAddress() { + while (true) { + const uintptr_t candidate_start = GetInteger(KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(CoreLocalRegionSizeWithGuards, CoreLocalRegionAlign, KMemoryRegionType_None)); + const uintptr_t candidate_end = candidate_start + CoreLocalRegionSizeWithGuards; + const uintptr_t candidate_last = candidate_end - 1; + + const KMemoryRegion *containing_region = std::addressof(*KMemoryLayout::GetVirtualMemoryRegionTree().FindContainingRegion(candidate_start)); + + if (candidate_last > containing_region->GetLastAddress()) { + continue; + } + + if (containing_region->GetType() != KMemoryRegionType_None) { + continue; + } + + if (util::AlignDown(candidate_start, CoreLocalRegionBoundsAlign) != util::AlignDown(candidate_last, CoreLocalRegionBoundsAlign)) { + continue; + } + + if (containing_region->GetAddress() > util::AlignDown(candidate_start, CoreLocalRegionBoundsAlign)) { + continue; + } + + if (util::AlignUp(candidate_last, CoreLocalRegionBoundsAlign) - 1 > containing_region->GetLastAddress()) { + continue; + } + + return candidate_start + PageSize; + } + + } + + void InsertPoolPartitionRegionIntoBothTrees(size_t start, size_t size, KMemoryRegionType phys_type, KMemoryRegionType virt_type, u32 &cur_attr) { + const u32 attr = cur_attr++; + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(start, size, phys_type, attr)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstRegionByTypeAttr(phys_type, attr)->GetPairAddress(), size, virt_type, attr)); + } + + } + + void SetupCoreLocalRegionMemoryRegions(KInitialPageTable &page_table, KInitialPageAllocator &page_allocator) { + const KVirtualAddress core_local_virt_start = GetCoreLocalRegionVirtualAddress(); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(core_local_virt_start), CoreLocalRegionSize, KMemoryRegionType_CoreLocal)); + + /* Allocate a page for each core. */ + KPhysicalAddress core_local_region_start_phys[cpu::NumCores] = {}; + for (size_t i = 0; i < cpu::NumCores; i++) { + core_local_region_start_phys[i] = page_allocator.Allocate(); + } + + /* Allocate an l1 page table for each core. */ + KPhysicalAddress core_l1_ttbr1_phys[cpu::NumCores] = {}; + core_l1_ttbr1_phys[0] = util::AlignDown(cpu::GetTtbr1El1(), PageSize); + for (size_t i = 1; i < cpu::NumCores; i++) { + core_l1_ttbr1_phys[i] = page_allocator.Allocate(); + std::memcpy(reinterpret_cast(GetInteger(core_l1_ttbr1_phys[i])), reinterpret_cast(GetInteger(core_l1_ttbr1_phys[0])), PageSize); + } + + /* Use the l1 page table for each core to map the core local region for each core. */ + for (size_t i = 0; i < cpu::NumCores; i++) { + KInitialPageTable temp_pt(core_l1_ttbr1_phys[i], KInitialPageTable::NoClear{}); + temp_pt.Map(core_local_virt_start, PageSize, core_local_region_start_phys[i], KernelRwDataAttribute, page_allocator); + for (size_t j = 0; j < cpu::NumCores; j++) { + temp_pt.Map(core_local_virt_start + (j + 1) * PageSize, PageSize, core_local_region_start_phys[j], KernelRwDataAttribute, page_allocator); + } + + /* Setup the InitArguments. */ + SetInitArguments(static_cast(i), core_local_region_start_phys[i], GetInteger(core_l1_ttbr1_phys[i])); + } + + /* Ensure the InitArguments are flushed to cache. */ + StoreInitArguments(); + } + + void SetupPoolPartitionMemoryRegions() { + /* Start by identifying the extents of the DRAM memory region. */ + const auto dram_extents = KMemoryLayout::GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Dram); + + /* Get Application and Applet pool sizes. */ + const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize(); + const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize(); + const size_t unsafe_system_pool_min_size = KSystemControl::Init::GetMinimumNonSecureSystemPoolSize(); + + /* Find the start of the kernel DRAM region. */ + const uintptr_t kernel_dram_start = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerivedRegion(KMemoryRegionType_DramKernel)->GetAddress(); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(kernel_dram_start, CarveoutAlignment)); + + /* Find the start of the pool partitions region. */ + const uintptr_t pool_partitions_start = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstRegionByTypeAttr(KMemoryRegionType_DramPoolPartition)->GetAddress(); + + /* Decide on starting addresses for our pools. */ + const uintptr_t application_pool_start = dram_extents.GetEndAddress() - application_pool_size; + const uintptr_t applet_pool_start = application_pool_start - applet_pool_size; + const uintptr_t unsafe_system_pool_start = std::min(kernel_dram_start + CarveoutSizeMax, util::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment)); + const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start; + + /* We want to arrange application pool depending on where the middle of dram is. */ + const uintptr_t dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2; + u32 cur_pool_attr = 0; + size_t total_overhead_size = 0; + if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) { + InsertPoolPartitionRegionIntoBothTrees(application_pool_start, application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(application_pool_size); + } else { + const size_t first_application_pool_size = dram_midpoint - application_pool_start; + const size_t second_application_pool_size = application_pool_start + application_pool_size - dram_midpoint; + InsertPoolPartitionRegionIntoBothTrees(application_pool_start, first_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr); + InsertPoolPartitionRegionIntoBothTrees(dram_midpoint, second_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(first_application_pool_size); + total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(second_application_pool_size); + } + + /* Insert the applet pool. */ + InsertPoolPartitionRegionIntoBothTrees(applet_pool_start, applet_pool_size, KMemoryRegionType_DramAppletPool, KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(applet_pool_size); + + /* Insert the nonsecure system pool. */ + InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(unsafe_system_pool_size); + + /* Insert the metadata pool. */ + total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size); + const uintptr_t metadata_pool_start = unsafe_system_pool_start - total_overhead_size; + const size_t metadata_pool_size = total_overhead_size; + u32 metadata_pool_attr = 0; + InsertPoolPartitionRegionIntoBothTrees(metadata_pool_start, metadata_pool_size, KMemoryRegionType_DramMetadataPool, KMemoryRegionType_VirtualDramMetadataPool, metadata_pool_attr); + + /* Insert the system pool. */ + const uintptr_t system_pool_size = metadata_pool_start - pool_partitions_start; + InsertPoolPartitionRegionIntoBothTrees(pool_partitions_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr); + + } + + } + + +} diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp new file mode 100644 index 000000000..b633a6eb7 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -0,0 +1,243 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) { + switch (type) { + case KMemoryRegionType_VirtualDramApplicationPool: return KMemoryManager::Pool_Application; + case KMemoryRegionType_VirtualDramAppletPool: return KMemoryManager::Pool_Applet; + case KMemoryRegionType_VirtualDramSystemPool: return KMemoryManager::Pool_System; + case KMemoryRegionType_VirtualDramSystemNonSecurePool: return KMemoryManager::Pool_SystemNonSecure; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + + } + + void KMemoryManager::Initialize(KVirtualAddress metadata_region, size_t metadata_region_size) { + /* Clear the metadata region to zero. */ + const KVirtualAddress metadata_region_end = metadata_region + metadata_region_size; + std::memset(GetVoidPointer(metadata_region), 0, metadata_region_size); + + /* Traverse the virtual memory layout tree, initializing each manager as appropriate. */ + while (true) { + /* Locate the region that should initialize the current manager. */ + const KMemoryRegion *region = nullptr; + for (const auto &it : KMemoryLayout::GetVirtualMemoryRegionTree()) { + /* We only care about regions that we need to create managers for. */ + if (!it.IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) { + continue; + } + + /* We want to initialize the managers in order. */ + if (it.GetAttributes() != this->num_managers) { + continue; + } + + region = std::addressof(it); + break; + } + + /* If we didn't find a region, then we're done initializing managers. */ + if (region == nullptr) { + break; + } + + /* Ensure that the region is correct. */ + MESOSPHERE_ASSERT(region->GetAddress() != NullGetAddress())>); + MESOSPHERE_ASSERT(region->GetSize() > 0); + MESOSPHERE_ASSERT(region->GetEndAddress() >= region->GetAddress()); + MESOSPHERE_ASSERT(region->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)); + MESOSPHERE_ASSERT(region->GetAttributes() == this->num_managers); + + /* Initialize a new manager for the region. */ + const Pool pool = GetPoolFromMemoryRegionType(region->GetType()); + Impl *manager = std::addressof(this->managers[this->num_managers++]); + MESOSPHERE_ABORT_UNLESS(this->num_managers <= util::size(this->managers)); + + const size_t cur_size = manager->Initialize(region, pool, metadata_region, metadata_region_end); + metadata_region += cur_size; + MESOSPHERE_ABORT_UNLESS(metadata_region <= metadata_region_end); + + /* Insert the manager into the pool list. */ + if (this->pool_managers_tail[pool] == nullptr) { + this->pool_managers_head[pool] = manager; + } else { + this->pool_managers_tail[pool]->SetNext(manager); + manager->SetPrev(this->pool_managers_tail[pool]); + } + this->pool_managers_tail[pool] = manager; + } + } + + KVirtualAddress KMemoryManager::AllocateContinuous(size_t num_pages, size_t align_pages, u32 option) { + /* Early return if we're allocating no pages. */ + if (num_pages == 0) { + return Null; + } + + /* Lock the pool that we're allocating from. */ + const auto [pool, dir] = DecodeOption(option); + KScopedLightLock lk(this->pool_locks[pool]); + + /* Choose a heap based on our page size request. */ + const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); + + /* Loop, trying to iterate from each block. */ + Impl *chosen_manager = nullptr; + KVirtualAddress allocated_block = Null; + for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; chosen_manager = this->GetNextManager(chosen_manager, dir)) { + allocated_block = chosen_manager->AllocateBlock(heap_index); + if (allocated_block != Null) { + break; + } + } + + /* If we failed to allocate, quit now. */ + if (allocated_block == Null) { + return Null; + } + + /* If we allocated more than we need, free some. */ + const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index); + if (allocated_pages > num_pages) { + chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); + } + + /* Maintain the optimized memory bitmap, if we should. */ + if (this->has_optimized_process[pool]) { + chosen_manager->TrackAllocationForOptimizedProcess(allocated_block, num_pages); + } + + return allocated_block; + } + + Result KMemoryManager::Allocate(KPageGroup *out, size_t num_pages, u32 option) { + MESOSPHERE_ASSERT(out != nullptr); + MESOSPHERE_ASSERT(out->GetNumPages() == 0); + + /* Early return if we're allocating no pages. */ + if (num_pages == 0) { + return ResultSuccess(); + } + + /* Lock the pool that we're allocating from. */ + const auto [pool, dir] = DecodeOption(option); + KScopedLightLock lk(this->pool_locks[pool]); + + /* Choose a heap based on our page size request. */ + const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); + R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory()); + + /* Ensure that we don't leave anything un-freed. */ + auto group_guard = SCOPE_GUARD { + for (const auto &it : *out) { + auto &manager = this->GetManager(it.GetAddress()); + const size_t num_pages = std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); + manager.Free(it.GetAddress(), num_pages); + } + out->Finalize(); + }; + + /* Keep allocating until we've allocated all our pages. */ + for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { + const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index); + for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) { + while (num_pages >= pages_per_alloc) { + /* Allocate a block. */ + KVirtualAddress allocated_block = cur_manager->AllocateBlock(index); + if (allocated_block == Null) { + break; + } + + /* Safely add it to our group. */ + { + auto block_guard = SCOPE_GUARD { cur_manager->Free(allocated_block, pages_per_alloc); }; + R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); + block_guard.Cancel(); + } + + /* Maintain the optimized memory bitmap, if we should. */ + if (this->has_optimized_process[pool]) { + cur_manager->TrackAllocationForOptimizedProcess(allocated_block, pages_per_alloc); + } + + num_pages -= pages_per_alloc; + } + } + } + + /* Only succeed if we allocated as many pages as we wanted. */ + MESOSPHERE_ASSERT(num_pages >= 0); + R_UNLESS(num_pages == 0, svc::ResultOutOfMemory()); + + /* We succeeded! */ + group_guard.Cancel(); + return ResultSuccess(); + } + + size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress metadata, KVirtualAddress metadata_end) { + /* Calculate metadata sizes. */ + const size_t ref_count_size = (region->GetSize() / PageSize) * sizeof(u16); + const size_t optimize_map_size = (util::AlignUp((region->GetSize() / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64); + const size_t manager_size = util::AlignUp(optimize_map_size + ref_count_size, PageSize); + const size_t page_heap_size = KPageHeap::CalculateMetadataOverheadSize(region->GetSize()); + const size_t total_metadata_size = manager_size + page_heap_size; + MESOSPHERE_ABORT_UNLESS(manager_size <= total_metadata_size); + MESOSPHERE_ABORT_UNLESS(metadata + total_metadata_size <= metadata_end); + MESOSPHERE_ABORT_UNLESS(util::IsAligned(total_metadata_size, PageSize)); + + /* Setup region. */ + this->pool = p; + this->metadata_region = metadata; + this->page_reference_counts = GetPointer(metadata + optimize_map_size); + MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(this->metadata_region), PageSize)); + + /* Initialize the manager's KPageHeap. */ + this->heap.Initialize(region->GetAddress(), region->GetSize(), metadata + manager_size, page_heap_size); + + /* Free the memory to the heap. */ + this->heap.Free(region->GetAddress(), region->GetSize() / PageSize); + + /* Update the heap's used size. */ + this->heap.UpdateUsedSize(); + + return total_metadata_size; + } + + void KMemoryManager::Impl::TrackAllocationForOptimizedProcess(KVirtualAddress block, size_t num_pages) { + size_t offset = this->heap.GetPageOffset(block); + const size_t last = offset + num_pages - 1; + u64 *optimize_map = GetPointer(this->metadata_region); + while (offset <= last) { + optimize_map[offset / BITSIZEOF(u64)] &= ~(u64(1) << (offset % BITSIZEOF(u64))); + offset++; + } + } + + size_t KMemoryManager::Impl::CalculateMetadataOverheadSize(size_t region_size) { + const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); + const size_t optimize_map_size = (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64); + const size_t manager_meta_size = util::AlignUp(optimize_map_size + ref_count_size, PageSize); + const size_t page_heap_size = KPageHeap::CalculateMetadataOverheadSize(region_size); + return manager_meta_size + page_heap_size; + } + +} diff --git a/libraries/libmesosphere/source/kern_k_page_group.cpp b/libraries/libmesosphere/source/kern_k_page_group.cpp new file mode 100644 index 000000000..3e81eb593 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_page_group.cpp @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KPageGroup::Finalize() { + auto it = this->block_list.begin(); + while (it != this->block_list.end()) { + KBlockInfo *info = std::addressof(*it); + it = this->block_list.erase(it); + this->manager->Free(info); + } + } + + size_t KPageGroup::GetNumPages() const { + size_t num_pages = 0; + + for (const auto &it : *this) { + num_pages += it.GetNumPages(); + } + + return num_pages; + } + + Result KPageGroup::AddBlock(KVirtualAddress addr, size_t num_pages) { + /* Succeed immediately if we're adding no pages. */ + R_UNLESS(num_pages != 0, ResultSuccess()); + + /* Check for overflow. */ + MESOSPHERE_ASSERT(addr < addr + num_pages * PageSize); + + /* Try to just append to the last block. */ + if (!this->block_list.empty()) { + auto it = --(this->block_list.end()); + R_UNLESS(!it->TryConcatenate(addr, num_pages), ResultSuccess()); + } + + /* Allocate a new block. */ + KBlockInfo *new_block = this->manager->Allocate(); + R_UNLESS(new_block != nullptr, svc::ResultOutOfResource()); + + /* Initialize the block. */ + new_block->Initialize(addr, num_pages); + this->block_list.push_back(*new_block); + + return ResultSuccess(); + } + + void KPageGroup::Open() const { + auto &mm = Kernel::GetMemoryManager(); + + for (const auto &it : *this) { + mm.Open(it.GetAddress(), it.GetNumPages()); + } + } + + void KPageGroup::Close() const { + auto &mm = Kernel::GetMemoryManager(); + + for (const auto &it : *this) { + mm.Close(it.GetAddress(), it.GetNumPages()); + } + } + + bool KPageGroup::IsEquivalentTo(const KPageGroup &rhs) const { + auto lit = this->block_list.cbegin(); + auto rit = rhs.block_list.cbegin(); + auto lend = this->block_list.cend(); + auto rend = rhs.block_list.cend(); + + while (lit != lend && rit != rend) { + if (*lit != *rit) { + return false; + } + + ++lit; + ++rit; + } + + return lit == lend && rit == rend; + } + +} diff --git a/libraries/libmesosphere/source/kern_k_page_heap.cpp b/libraries/libmesosphere/source/kern_k_page_heap.cpp new file mode 100644 index 000000000..3db3c605b --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_page_heap.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KPageHeap::Initialize(KVirtualAddress address, size_t size, KVirtualAddress metadata_address, size_t metadata_size, const size_t *block_shifts, size_t num_block_shifts) { + /* Check our assumptions. */ + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize)); + MESOSPHERE_ASSERT(util::IsAligned(size, PageSize)); + MESOSPHERE_ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts); + const KVirtualAddress metadata_end = metadata_address + metadata_size; + + /* Set our members. */ + this->heap_address = address; + this->heap_size = size; + this->num_blocks = num_block_shifts; + + /* Setup bitmaps. */ + u64 *cur_bitmap_storage = GetPointer(metadata_address); + for (size_t i = 0; i < num_block_shifts; i++) { + const size_t cur_block_shift = block_shifts[i]; + const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0; + cur_bitmap_storage = this->blocks[i].Initialize(this->heap_address, this->heap_size, cur_block_shift, next_block_shift, cur_bitmap_storage); + } + + /* Ensure we didn't overextend our bounds. */ + MESOSPHERE_ABORT_UNLESS(KVirtualAddress(cur_bitmap_storage) <= metadata_end); + } + + size_t KPageHeap::GetNumFreePages() const { + size_t num_free = 0; + + for (size_t i = 0; i < this->num_blocks; i++) { + num_free += this->blocks[i].GetNumFreePages(); + } + + return num_free; + } + + KVirtualAddress KPageHeap::AllocateBlock(s32 index) { + const size_t needed_size = this->blocks[index].GetSize(); + + for (s32 i = index; i < static_cast(this->num_blocks); i++) { + if (const KVirtualAddress addr = this->blocks[i].PopBlock(); addr != Null) { + if (const size_t allocated_size = this->blocks[i].GetSize(); allocated_size > needed_size) { + this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); + } + return addr; + } + } + + return Null; + } + + void KPageHeap::FreeBlock(KVirtualAddress block, s32 index) { + do { + block = this->blocks[index++].PushBlock(block); + } while (block != Null); + } + + void KPageHeap::Free(KVirtualAddress addr, size_t num_pages) { + /* Freeing no pages is a no-op. */ + if (num_pages == 0) { + return; + } + + /* Find the largest block size that we can free, and free as many as possible. */ + s32 big_index = static_cast(this->num_blocks) - 1; + const KVirtualAddress start = addr; + const KVirtualAddress end = addr + num_pages * PageSize; + KVirtualAddress before_start = start; + KVirtualAddress before_end = start; + KVirtualAddress after_start = end; + KVirtualAddress after_end = end; + while (big_index >= 0) { + const size_t block_size = this->blocks[big_index].GetSize(); + const KVirtualAddress big_start = util::AlignUp(GetInteger(start), block_size); + const KVirtualAddress big_end = util::AlignDown(GetInteger(end), block_size); + if (big_start < big_end) { + /* Free as many big blocks as we can. */ + for (auto block = big_start; block < big_end; block += block_size) { + this->FreeBlock(block, big_index); + } + before_end = big_start; + after_start = big_end; + break; + } + big_index--; + } + MESOSPHERE_ASSERT(big_index >= 0); + + /* Free space before the big blocks. */ + for (s32 i = big_index - 1; i >= 0; i--) { + const size_t block_size = this->blocks[i].GetSize(); + while (before_start + block_size <= before_end) { + before_end -= block_size; + this->FreeBlock(before_end, i); + } + } + + /* Free space after the big blocks. */ + for (s32 i = big_index - 1; i >= 0; i--) { + const size_t block_size = this->blocks[i].GetSize(); + while (after_start + block_size <= after_end) { + this->FreeBlock(after_start, i); + after_start += block_size; + } + } + } + + size_t KPageHeap::CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts) { + size_t overhead_size = 0; + for (size_t i = 0; i < num_block_shifts; i++) { + const size_t cur_block_shift = block_shifts[i]; + const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0; + overhead_size += KPageHeap::Block::CalculateMetadataOverheadSize(region_size, cur_block_shift, next_block_shift); + } + return util::AlignUp(overhead_size, PageSize); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp new file mode 100644 index 000000000..4b4eb1d3d --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -0,0 +1,1023 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +namespace ams::kern { + + Result KPageTableBase::InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end) { + /* Initialize our members. */ + this->address_space_width = (is_64_bit) ? BITSIZEOF(u64) : BITSIZEOF(u32); + this->address_space_start = KProcessAddress(GetInteger(start)); + this->address_space_end = KProcessAddress(GetInteger(end)); + this->is_kernel = true; + this->enable_aslr = true; + + this->heap_region_start = 0; + this->heap_region_end = 0; + this->current_heap_end = 0; + this->alias_region_start = 0; + this->alias_region_end = 0; + this->stack_region_start = 0; + this->stack_region_end = 0; + this->kernel_map_region_start = 0; + this->kernel_map_region_end = 0; + this->alias_code_region_start = 0; + this->alias_code_region_end = 0; + this->code_region_start = 0; + this->code_region_end = 0; + this->max_heap_size = 0; + this->max_physical_memory_size = 0; + + this->memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager()); + this->block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); + + this->allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); + this->heap_fill_value = MemoryFillValue_Zero; + this->ipc_fill_value = MemoryFillValue_Zero; + this->stack_fill_value = MemoryFillValue_Zero; + + this->cached_physical_linear_region = nullptr; + this->cached_physical_heap_region = nullptr; + this->cached_virtual_heap_region = nullptr; + + /* Initialize our implementation. */ + this->impl.InitializeForKernel(table, start, end); + + /* Initialize our memory block manager. */ + return this->memory_block_manager.Initialize(this->address_space_start, this->address_space_end, this->memory_block_slab_manager); + + return ResultSuccess(); + } + + Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager) { + /* Validate the region. */ + MESOSPHERE_ABORT_UNLESS(start <= code_address); + MESOSPHERE_ABORT_UNLESS(code_address < code_address + code_size); + MESOSPHERE_ABORT_UNLESS(code_address + code_size - 1 <= end - 1); + + /* Declare variables to hold our region sizes. */ + + /* Define helpers. */ + auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA { + return KAddressSpaceInfo::GetAddressSpaceStart(this->address_space_width, type); + }; + auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA { + return KAddressSpaceInfo::GetAddressSpaceSize(this->address_space_width, type); + }; + + /* Set our width and heap/alias sizes. */ + this->address_space_width = GetAddressSpaceWidth(as_type); + size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias); + size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap); + + /* Adjust heap/alias size if we don't have an alias region. */ + if ((as_type & ams::svc::CreateProcessFlag_AddressSpaceMask) == ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias) { + heap_region_size += alias_region_size; + alias_region_size = 0; + } + + /* Set code regions and determine remaining sizes. */ + KProcessAddress process_code_start; + KProcessAddress process_code_end; + size_t stack_region_size; + size_t kernel_map_region_size; + if (this->address_space_width == 39) { + alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias); + heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap); + stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Stack); + kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type_32Bit); + this->code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_Large64Bit); + this->code_region_end = this->code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_Large64Bit); + this->alias_code_region_start = this->code_region_start; + this->alias_code_region_end = this->code_region_end; + process_code_start = util::AlignDown(GetInteger(code_address), RegionAlignment); + process_code_end = util::AlignUp(GetInteger(code_address) + code_size, RegionAlignment); + } else { + stack_region_size = 0; + kernel_map_region_size = 0; + this->code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_32Bit); + this->code_region_end = this->code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_32Bit); + this->stack_region_start = this->code_region_start; + this->alias_code_region_start = this->code_region_start; + this->alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type_Small64Bit) + GetSpaceSize(KAddressSpaceInfo::Type_Small64Bit); + this->stack_region_end = this->code_region_end; + this->kernel_map_region_start = this->code_region_start; + this->kernel_map_region_end = this->code_region_end; + process_code_start = this->code_region_start; + process_code_end = this->code_region_end; + } + + /* Set other basic fields. */ + this->enable_aslr = enable_aslr; + this->address_space_start = start; + this->address_space_end = end; + this->is_kernel = false; + this->memory_block_slab_manager = mem_block_slab_manager; + this->block_info_manager = block_info_manager; + + /* Determine the region we can place our undetermineds in. */ + KProcessAddress alloc_start; + size_t alloc_size; + if ((GetInteger(process_code_start) - GetInteger(this->code_region_start)) >= (GetInteger(end) - GetInteger(process_code_end))) { + alloc_start = this->code_region_start; + alloc_size = GetInteger(process_code_start) - GetInteger(this->code_region_start); + } else { + alloc_start = process_code_end; + alloc_size = GetInteger(end) - GetInteger(process_code_end); + } + const size_t needed_size = (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size); + R_UNLESS(alloc_size >= needed_size, svc::ResultOutOfMemory()); + + const size_t remaining_size = alloc_size - needed_size; + + /* Determine random placements for each region. */ + size_t alias_rnd = 0, heap_rnd = 0, stack_rnd = 0, kmap_rnd = 0; + if (enable_aslr) { + alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment; + heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment; + stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment; + kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment; + } + + /* Setup heap and alias regions. */ + this->alias_region_start = alloc_start + alias_rnd; + this->alias_region_end = this->alias_region_start + alias_region_size; + this->heap_region_start = alloc_start + heap_rnd; + this->heap_region_end = this->heap_region_start + heap_region_size; + + if (alias_rnd <= heap_rnd) { + this->heap_region_start += alias_region_size; + this->heap_region_end += alias_region_size; + } else { + this->alias_region_start += heap_region_size; + this->alias_region_end += heap_region_size; + } + + /* Setup stack region. */ + if (stack_region_size) { + this->stack_region_start = alloc_start + stack_rnd; + this->stack_region_end = this->stack_region_start + stack_region_size; + + if (alias_rnd < stack_rnd) { + this->stack_region_start += alias_region_size; + this->stack_region_end += alias_region_size; + } else { + this->alias_region_start += stack_region_size; + this->alias_region_end += stack_region_size; + } + + if (heap_rnd < stack_rnd) { + this->stack_region_start += heap_region_size; + this->stack_region_end += heap_region_size; + } else { + this->heap_region_start += stack_region_size; + this->heap_region_end += stack_region_size; + } + } + + /* Setup kernel map region. */ + if (kernel_map_region_size) { + this->kernel_map_region_start = alloc_start + kmap_rnd; + this->kernel_map_region_end = this->kernel_map_region_start + kernel_map_region_size; + + if (alias_rnd < kmap_rnd) { + this->kernel_map_region_start += alias_region_size; + this->kernel_map_region_end += alias_region_size; + } else { + this->alias_region_start += kernel_map_region_size; + this->alias_region_end += kernel_map_region_size; + } + + if (heap_rnd < kmap_rnd) { + this->kernel_map_region_start += heap_region_size; + this->kernel_map_region_end += heap_region_size; + } else { + this->heap_region_start += kernel_map_region_size; + this->heap_region_end += kernel_map_region_size; + } + + if (stack_region_size) { + if (stack_rnd < kmap_rnd) { + this->kernel_map_region_start += stack_region_size; + this->kernel_map_region_end += stack_region_size; + } else { + this->stack_region_start += kernel_map_region_size; + this->stack_region_end += kernel_map_region_size; + } + } + } + + /* Set heap and fill members. */ + this->current_heap_end = this->heap_region_start; + this->max_heap_size = 0; + this->max_physical_memory_size = 0; + + const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled(); + this->heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero; + this->ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero; + this->stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero; + + /* Set allocation option. */ + this->allocate_option = KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction_FromBack : KMemoryManager::Direction_FromFront); + + /* Ensure that we regions inside our address space. */ + auto IsInAddressSpace = [&](KProcessAddress addr) ALWAYS_INLINE_LAMBDA { return this->address_space_start <= addr && addr <= this->address_space_end; }; + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->alias_region_start)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->alias_region_end)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->heap_region_start)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->heap_region_end)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->stack_region_start)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->stack_region_end)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->kernel_map_region_start)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->kernel_map_region_end)); + + /* Ensure that we selected regions that don't overlap. */ + const KProcessAddress alias_start = this->alias_region_start; + const KProcessAddress alias_last = this->alias_region_end - 1; + const KProcessAddress heap_start = this->heap_region_start; + const KProcessAddress heap_last = this->heap_region_end - 1; + const KProcessAddress stack_start = this->stack_region_start; + const KProcessAddress stack_last = this->stack_region_end - 1; + const KProcessAddress kmap_start = this->kernel_map_region_start; + const KProcessAddress kmap_last = this->kernel_map_region_end - 1; + MESOSPHERE_ABORT_UNLESS(alias_last < heap_start || heap_last < alias_start); + MESOSPHERE_ABORT_UNLESS(alias_last < stack_start || stack_last < alias_start); + MESOSPHERE_ABORT_UNLESS(alias_last < kmap_start || kmap_last < alias_start); + MESOSPHERE_ABORT_UNLESS(heap_last < stack_start || stack_last < heap_start); + MESOSPHERE_ABORT_UNLESS(heap_last < kmap_start || kmap_last < heap_start); + + /* Initialize our implementation. */ + this->impl.InitializeForProcess(table, GetInteger(start), GetInteger(end)); + + /* Initialize our memory block manager. */ + return this->memory_block_manager.Initialize(this->address_space_start, this->address_space_end, this->memory_block_slab_manager); + + return ResultSuccess(); + } + + + void KPageTableBase::Finalize() { + this->memory_block_manager.Finalize(this->memory_block_slab_manager); + MESOSPHERE_TODO("cpu::InvalidateEntireInstructionCache();"); + } + + KProcessAddress KPageTableBase::GetRegionAddress(KMemoryState state) const { + switch (state) { + case KMemoryState_Free: + case KMemoryState_Kernel: + return this->address_space_start; + case KMemoryState_Normal: + return this->heap_region_start; + case KMemoryState_Ipc: + case KMemoryState_NonSecureIpc: + case KMemoryState_NonDeviceIpc: + return this->alias_region_start; + case KMemoryState_Stack: + return this->stack_region_start; + case KMemoryState_Io: + case KMemoryState_Static: + case KMemoryState_ThreadLocal: + return this->kernel_map_region_start; + case KMemoryState_Shared: + case KMemoryState_AliasCode: + case KMemoryState_AliasCodeData: + case KMemoryState_Transfered: + case KMemoryState_SharedTransfered: + case KMemoryState_SharedCode: + case KMemoryState_GeneratedCode: + case KMemoryState_CodeOut: + return this->alias_code_region_start; + case KMemoryState_Code: + case KMemoryState_CodeData: + return this->code_region_start; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + + size_t KPageTableBase::GetRegionSize(KMemoryState state) const { + switch (state) { + case KMemoryState_Free: + case KMemoryState_Kernel: + return this->address_space_end - this->address_space_start; + case KMemoryState_Normal: + return this->heap_region_end - this->heap_region_start; + case KMemoryState_Ipc: + case KMemoryState_NonSecureIpc: + case KMemoryState_NonDeviceIpc: + return this->alias_region_end - this->alias_region_start; + case KMemoryState_Stack: + return this->stack_region_end - this->stack_region_start; + case KMemoryState_Io: + case KMemoryState_Static: + case KMemoryState_ThreadLocal: + return this->kernel_map_region_end - this->kernel_map_region_start; + case KMemoryState_Shared: + case KMemoryState_AliasCode: + case KMemoryState_AliasCodeData: + case KMemoryState_Transfered: + case KMemoryState_SharedTransfered: + case KMemoryState_SharedCode: + case KMemoryState_GeneratedCode: + case KMemoryState_CodeOut: + return this->alias_code_region_end - this->alias_code_region_start; + case KMemoryState_Code: + case KMemoryState_CodeData: + return this->code_region_end - this->code_region_start; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + + bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { + const KProcessAddress end = addr + size; + const KProcessAddress last = end - 1; + + const KProcessAddress region_start = this->GetRegionAddress(state); + const size_t region_size = this->GetRegionSize(state); + + const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1; + const bool is_in_heap = !(end <= this->heap_region_start || this->heap_region_end <= addr); + const bool is_in_alias = !(end <= this->alias_region_start || this->alias_region_end <= addr); + switch (state) { + case KMemoryState_Free: + case KMemoryState_Kernel: + return is_in_region; + case KMemoryState_Io: + case KMemoryState_Static: + case KMemoryState_Code: + case KMemoryState_CodeData: + case KMemoryState_Shared: + case KMemoryState_AliasCode: + case KMemoryState_AliasCodeData: + case KMemoryState_Stack: + case KMemoryState_ThreadLocal: + case KMemoryState_Transfered: + case KMemoryState_SharedTransfered: + case KMemoryState_SharedCode: + case KMemoryState_GeneratedCode: + case KMemoryState_CodeOut: + return is_in_region && !is_in_heap && !is_in_alias; + case KMemoryState_Normal: + MESOSPHERE_ASSERT(is_in_heap); + return is_in_region && !is_in_alias; + case KMemoryState_Ipc: + case KMemoryState_NonSecureIpc: + case KMemoryState_NonDeviceIpc: + MESOSPHERE_ASSERT(is_in_alias); + return is_in_region && !is_in_heap; + default: + return false; + } + } + + Result KPageTableBase::CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const { + /* Validate the states match expectation. */ + R_UNLESS((info.state & state_mask) == state, svc::ResultInvalidCurrentMemory()); + R_UNLESS((info.perm & perm_mask) == perm, svc::ResultInvalidCurrentMemory()); + R_UNLESS((info.attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory()); + + return ResultSuccess(); + } + + Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + /* Get information about the first block. */ + const KProcessAddress last_addr = addr + size - 1; + KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(addr); + KMemoryInfo info = it->GetMemoryInfo(); + + /* Validate all blocks in the range have correct state. */ + const KMemoryState first_state = info.state; + const KMemoryPermission first_perm = info.perm; + const KMemoryAttribute first_attr = info.attribute; + while (true) { + /* Validate the current block. */ + R_UNLESS(info.state == first_state, svc::ResultInvalidCurrentMemory()); + R_UNLESS(info.perm == first_perm, svc::ResultInvalidCurrentMemory()); + R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory()); + + /* Validate against the provided masks. */ + R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); + + /* Break once we're done. */ + if (last_addr <= info.GetLastAddress()) { + break; + } + + /* Advance our iterator. */ + it++; + MESOSPHERE_ASSERT(it != this->memory_block_manager.cend()); + info = it->GetMemoryInfo(); + } + + /* Write output state. */ + if (out_state) { + *out_state = first_state; + } + if (out_perm) { + *out_perm = first_perm; + } + if (out_attr) { + *out_attr = static_cast(first_attr & ~ignore_attr); + } + return ResultSuccess(); + } + + Result KPageTableBase::QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(out_info != nullptr); + MESOSPHERE_ASSERT(out_page != nullptr); + + const KMemoryBlock *block = this->memory_block_manager.FindBlock(address); + R_UNLESS(block != nullptr, svc::ResultInvalidCurrentMemory()); + + *out_info = block->GetMemoryInfo(); + out_page->flags = 0; + return ResultSuccess(); + } + + KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const { + KProcessAddress address = Null; + + if (num_pages <= region_num_pages) { + if (this->IsAslrEnabled()) { + /* Try to directly find a free area up to 8 times. */ + for (size_t i = 0; i < 8; i++) { + const size_t random_offset = KSystemControl::GenerateRandomRange(0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * alignment; + const KProcessAddress candidate = util::AlignDown(GetInteger(region_start + random_offset), alignment) + offset; + + KMemoryInfo info; + ams::svc::PageInfo page_info; + MESOSPHERE_R_ABORT_UNLESS(this->QueryInfoImpl(&info, &page_info, candidate)); + + if (info.state != KMemoryState_Free) { continue; } + if (!(region_start <= candidate)) { continue; } + if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { continue; } + if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= info.GetLastAddress())) { continue; } + if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= region_start + region_num_pages * PageSize - 1)) { continue; } + + address = candidate; + break; + } + /* Fall back to finding the first free area with a random offset. */ + if (address == Null) { + /* NOTE: Nintendo does not account for guard pages here. */ + /* This may theoretically cause an offset to be chosen that cannot be mapped. */ + /* TODO: Should we account for guard pages? */ + const size_t offset_pages = KSystemControl::GenerateRandomRange(0, region_num_pages - num_pages); + address = this->memory_block_manager.FindFreeArea(region_start + offset_pages * PageSize, region_num_pages - offset_pages, num_pages, alignment, offset, guard_pages); + } + } + /* Find the first free area. */ + if (address == Null) { + address = this->memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, alignment, offset, guard_pages); + } + } + + return address; + } + + Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + /* Create a page group to hold the pages we allocate. */ + KPageGroup pg(this->block_info_manager); + + /* Allocate the pages. */ + R_TRY(Kernel::GetMemoryManager().Allocate(std::addressof(pg), num_pages, this->allocate_option)); + + /* Ensure that the page group is open while we work with it. */ + KScopedPageGroup spg(pg); + + /* Clear all pages. */ + for (const auto &it : pg) { + std::memset(GetVoidPointer(it.GetAddress()), this->heap_fill_value, it.GetSize()); + } + + /* Map the pages. */ + return this->Operate(page_list, address, num_pages, pg, properties, OperationType_MapGroup, false); + } + + Result KPageTableBase::MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + /* Note the current address, so that we can iterate. */ + const KProcessAddress start_address = address; + KProcessAddress cur_address = address; + + /* Ensure that we clean up on failure. */ + auto mapping_guard = SCOPE_GUARD { + MESOSPHERE_ABORT_UNLESS(!reuse_ll); + if (cur_address != start_address) { + const KPageProperties unmap_properties = {}; + MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, start_address, (cur_address - start_address) / PageSize, Null, false, unmap_properties, OperationType_Unmap, true)); + } + }; + + /* Iterate, mapping all pages in the group. */ + for (const auto &block : pg) { + /* We only allow mapping pages in the heap, and we require we're mapping non-empty blocks. */ + MESOSPHERE_ABORT_UNLESS(block.GetAddress() < block.GetLastAddress()); + MESOSPHERE_ABORT_UNLESS(IsHeapVirtualAddress(block.GetAddress(), block.GetSize())); + + /* Map and advance. */ + R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), GetHeapPhysicalAddress(block.GetAddress()), true, properties, OperationType_Map, reuse_ll)); + cur_address += block.GetSize(); + } + + /* We succeeded! */ + mapping_guard.Cancel(); + return ResultSuccess(); + } + + Result KPageTableBase::MakePageGroup(KPageGroup &pg, KProcessAddress addr, size_t num_pages) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + const size_t size = num_pages * PageSize; + + /* We're making a new group, not adding to an existing one. */ + R_UNLESS(pg.empty(), svc::ResultInvalidCurrentMemory()); + + auto &impl = this->GetImpl(); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + R_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr), svc::ResultInvalidCurrentMemory()); + + /* Prepare tracking variables. */ + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + /* Iterate, adding to group as we go. */ + while (tot_size < size) { + R_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)), svc::ResultInvalidCurrentMemory()); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + const size_t cur_pages = cur_size / PageSize; + + R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory()); + R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_addr), cur_pages)); + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + /* Ensure we add the right amount for the last block. */ + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + /* add the last block. */ + const size_t cur_pages = cur_size / PageSize; + R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory()); + R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_addr), cur_pages)); + + return ResultSuccess(); + } + + bool KPageTableBase::IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + const size_t size = num_pages * PageSize; + + /* Empty groups are necessarily invalid. */ + if (pg.empty()) { + return false; + } + + auto &impl = this->GetImpl(); + + /* We're going to validate that the group we'd expect is the group we see. */ + auto cur_it = pg.begin(); + KVirtualAddress cur_block_address = cur_it->GetAddress(); + size_t cur_block_pages = cur_it->GetNumPages(); + + auto UpdateCurrentIterator = [&]() ALWAYS_INLINE_LAMBDA { + if (cur_block_pages == 0) { + if ((++cur_it) == pg.end()) { + return false; + } + + cur_block_address = cur_it->GetAddress(); + cur_block_pages = cur_it->GetNumPages(); + } + return true; + }; + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + if (!impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr)) { + return false; + } + + /* Prepare tracking variables. */ + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + /* Iterate, comparing expected to actual. */ + while (tot_size < size) { + if (!impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))) { + return false; + } + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + const size_t cur_pages = cur_size / PageSize; + + if (!IsHeapPhysicalAddress(cur_addr)) { + return false; + } + + if (!UpdateCurrentIterator()) { + return false; + } + + if (cur_block_address != GetHeapVirtualAddress(cur_addr) || cur_block_pages < cur_pages) { + return false; + } + + cur_block_address += cur_size; + cur_block_pages -= cur_pages; + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + /* Ensure we compare the right amount for the last block. */ + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + if (!IsHeapPhysicalAddress(cur_addr)) { + return false; + } + + if (!UpdateCurrentIterator()) { + return false; + } + + return cur_block_address == GetHeapVirtualAddress(cur_addr) && cur_block_pages == (cur_size / PageSize); + } + + Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) { + MESOSPHERE_UNIMPLEMENTED(); + } + + Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) { + const size_t num_pages = size / PageSize; + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Verify we can change the memory permission. */ + KMemoryState old_state; + KMemoryPermission old_perm; + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, addr, size, KMemoryState_FlagCode, KMemoryState_FlagCode, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Make a new page group for the region. */ + KPageGroup pg(this->block_info_manager); + + /* Determine new perm/state. */ + const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); + KMemoryState new_state = old_state; + const bool is_w = (new_perm & KMemoryPermission_UserWrite) == KMemoryPermission_UserWrite; + const bool is_x = (new_perm & KMemoryPermission_UserExecute) == KMemoryPermission_UserExecute; + MESOSPHERE_ASSERT(!(is_w && is_x)); + + if (is_w) { + switch (old_state) { + case KMemoryState_Code: new_state = KMemoryState_CodeData; break; + case KMemoryState_AliasCode: new_state = KMemoryState_AliasCodeData; break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + + /* Create a page group, if we're setting execute permissions. */ + if (is_x) { + R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages)); + } + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Perform mapping operation. */ + const KPageProperties properties = { new_perm, false, false, false }; + const auto operation = is_x ? OperationType_ChangePermissionsAndRefresh : OperationType_ChangePermissions; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null, false, properties, operation, false)); + + /* Update the blocks. */ + this->memory_block_manager.Update(&allocator, addr, num_pages, new_state, new_perm, KMemoryAttribute_None); + + /* Ensure cache coherency, if we're setting pages as executable. */ + if (is_x) { + for (const auto &block : pg) { + cpu::StoreDataCache(GetVoidPointer(block.GetAddress()), block.GetSize()); + } + cpu::InvalidateEntireInstructionCache(); + } + + return ResultSuccess(); + } + + Result KPageTableBase::SetHeapSize(KProcessAddress *out, size_t size) { + MESOSPHERE_UNIMPLEMENTED(); + } + + Result KPageTableBase::SetMaxHeapSize(size_t size) { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Only process page tables are allowed to set heap size. */ + MESOSPHERE_ASSERT(!this->IsKernel()); + + this->max_heap_size = size; + + return ResultSuccess(); + } + + Result KPageTableBase::QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const { + /* If the address is invalid, create a fake block. */ + if (!this->Contains(addr, 1)) { + *out_info = { + .address = GetInteger(this->address_space_end), + .size = 0 - GetInteger(this->address_space_end), + .state = static_cast(ams::svc::MemoryState_Inaccessible), + .perm = KMemoryPermission_None, + .attribute = KMemoryAttribute_None, + .original_perm = KMemoryPermission_None, + .ipc_lock_count = 0, + .device_use_count = 0, + }; + out_page_info->flags = 0; + + return ResultSuccess(); + } + + /* Otherwise, lock the table and query. */ + KScopedLightLock lk(this->general_lock); + return this->QueryInfoImpl(out_info, out_page_info, addr); + } + + Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); + MESOSPHERE_ASSERT(util::IsAligned(size, PageSize)); + MESOSPHERE_ASSERT(size > 0); + R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress()); + const size_t num_pages = size / PageSize; + const KPhysicalAddress last = phys_addr + size - 1; + + /* Get region extents. */ + const KProcessAddress region_start = this->GetRegionAddress(KMemoryState_Io); + const size_t region_size = this->GetRegionSize(KMemoryState_Io); + const size_t region_num_pages = region_size / PageSize; + + /* Locate the memory region. */ + auto region_it = KMemoryLayout::FindContainingRegion(phys_addr); + const auto end_it = KMemoryLayout::GetEnd(phys_addr); + R_UNLESS(region_it != end_it, svc::ResultInvalidAddress()); + + MESOSPHERE_ASSERT(region_it->Contains(GetInteger(phys_addr))); + + /* Ensure that the region is mappable. */ + const bool is_rw = perm == KMemoryPermission_UserReadWrite; + do { + /* Check the region attributes. */ + R_UNLESS(!region_it->IsDerivedFrom(KMemoryRegionType_Dram), svc::ResultInvalidAddress()); + R_UNLESS(!region_it->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, svc::ResultInvalidAddress()); + R_UNLESS(!region_it->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), svc::ResultInvalidAddress()); + + /* Check if we're done. */ + if (GetInteger(last) <= region_it->GetLastAddress()) { + break; + } + + /* Advance. */ + region_it++; + } while (region_it != end_it); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Select an address to map at. */ + KProcessAddress addr = Null; + const size_t phys_alignment = std::min(std::min(GetInteger(phys_addr) & -GetInteger(phys_addr), size & -size), MaxPhysicalMapAlignment); + for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) { + const size_t alignment = KPageTable::GetBlockSize(static_cast(block_type)); + if (alignment > phys_alignment) { + continue; + } + + addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages()); + if (addr != Null) { + break; + } + } + R_UNLESS(addr != Null, svc::ResultOutOfMemory()); + + /* Check that we can map IO here. */ + MESOSPHERE_ASSERT(this->CanContain(addr, size, KMemoryState_Io)); + MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Perform mapping operation. */ + const KPageProperties properties = { perm, true, false, false }; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false)); + + /* Update the blocks. */ + this->memory_block_manager.Update(&allocator, addr, num_pages, KMemoryState_Io, perm, KMemoryAttribute_None); + + /* We successfully mapped the pages. */ + return ResultSuccess(); + } + + Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { + MESOSPHERE_UNIMPLEMENTED(); + } + + Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) { + MESOSPHERE_UNIMPLEMENTED(); + } + + Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { + MESOSPHERE_ASSERT(util::IsAligned(alignment, PageSize) && alignment >= PageSize); + + /* Ensure this is a valid map request. */ + R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), svc::ResultInvalidCurrentMemory()); + R_UNLESS(num_pages < region_num_pages, svc::ResultOutOfMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Find a random address to map at. */ + KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages()); + R_UNLESS(addr != Null, svc::ResultOutOfMemory()); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), alignment)); + MESOSPHERE_ASSERT(this->CanContain(addr, num_pages * PageSize, state)); + MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Perform mapping operation. */ + const KPageProperties properties = { perm, false, false, false }; + if (is_pa_valid) { + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false)); + } else { + R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, properties)); + } + + /* Update the blocks. */ + this->memory_block_manager.Update(&allocator, addr, num_pages, state, perm, KMemoryAttribute_None); + + /* We successfully mapped the pages. */ + *out_addr = addr; + return ResultSuccess(); + } + + Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { + MESOSPHERE_UNIMPLEMENTED(); + } + + Result KPageTableBase::MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { + MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread()); + + /* Ensure this is a valid map request. */ + const size_t num_pages = pg.GetNumPages(); + R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), svc::ResultInvalidCurrentMemory()); + R_UNLESS(num_pages < region_num_pages, svc::ResultOutOfMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Find a random address to map at. */ + KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, 0, this->GetNumGuardPages()); + R_UNLESS(addr != Null, svc::ResultOutOfMemory()); + MESOSPHERE_ASSERT(this->CanContain(addr, num_pages * PageSize, state)); + MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Perform mapping operation. */ + const KPageProperties properties = { perm, state == KMemoryState_Io, false, false }; + R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); + + /* Update the blocks. */ + this->memory_block_manager.Update(&allocator, addr, num_pages, state, perm, KMemoryAttribute_None); + + /* We successfully mapped the pages. */ + *out_addr = addr; + return ResultSuccess(); + } + + Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) { + MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread()); + + /* Ensure this is a valid map request. */ + const size_t num_pages = pg.GetNumPages(); + const size_t size = num_pages * PageSize; + R_UNLESS(this->CanContain(addr, size, state), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check if state allows us to map. */ + R_TRY(this->CheckMemoryState(addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Perform mapping operation. */ + const KPageProperties properties = { perm, state == KMemoryState_Io, false, false }; + R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); + + /* Update the blocks. */ + this->memory_block_manager.Update(&allocator, addr, num_pages, state, perm, KMemoryAttribute_None); + + /* We successfully mapped the pages. */ + return ResultSuccess(); + } + + Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) { + MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread()); + + /* Ensure this is a valid unmap request. */ + const size_t num_pages = pg.GetNumPages(); + const size_t size = num_pages * PageSize; + R_UNLESS(this->CanContain(address, size, state), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check if state allows us to unmap. */ + R_TRY(this->CheckMemoryState(address, size, KMemoryState_All, state, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Check that the page group is valid. */ + R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), svc::ResultInvalidCurrentMemory()); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Perform unmapping operation. */ + const KPageProperties properties = { KMemoryPermission_None, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null, false, properties, OperationType_Unmap, false)); + + /* Update the blocks. */ + this->memory_block_manager.Update(&allocator, address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None); + + return ResultSuccess(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp new file mode 100644 index 000000000..892e701a4 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + constexpr u64 InitialProcessIdMin = 1; + constexpr u64 InitialProcessIdMax = 0x50; + std::atomic g_initial_process_id = InitialProcessIdMin; + + } + + void KProcess::Finalize() { + MESOSPHERE_UNIMPLEMENTED(); + } + + Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms) { + /* TODO: Validate intended kernel version. */ + /* How should we do this? */ + + /* Create and clear the process local region. */ + R_TRY(this->CreateThreadLocalRegion(std::addressof(this->plr_address))); + std::memset(this->GetThreadLocalRegionPointer(this->plr_address), 0, ams::svc::ThreadLocalRegionSize); + + /* Copy in the name from parameters. */ + static_assert(sizeof(params.name) < sizeof(this->name)); + std::memcpy(this->name, params.name, sizeof(params.name)); + this->name[sizeof(params.name)] = 0; + + /* Set misc fields. */ + this->state = State_Created; + this->main_thread_stack_size = 0; + this->creation_time = KHardwareTimer::GetTick(); + this->used_kernel_memory_size = 0; + this->ideal_core_id = 0; + this->flags = params.flags; + this->version = params.version; + this->program_id = params.program_id; + this->code_address = params.code_address; + this->code_size = params.code_num_pages * PageSize; + this->is_application = (params.flags & ams::svc::CreateProcessFlag_IsApplication); + this->is_jit_debug = false; + + /* Set thread fields. */ + for (size_t i = 0; i < cpu::NumCores; i++) { + this->running_threads[i] = nullptr; + this->running_thread_idle_counts[i] = 0; + this->pinned_threads[i] = nullptr; + } + + /* Set max memory based on address space type. */ + switch ((params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask)) { + case ams::svc::CreateProcessFlag_AddressSpace32Bit: + case ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated: + case ams::svc::CreateProcessFlag_AddressSpace64Bit: + this->max_process_memory = this->page_table.GetHeapRegionSize(); + break; + case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias: + this->max_process_memory = this->page_table.GetHeapRegionSize() + this->page_table.GetAliasRegionSize(); + break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + + /* Generate random entropy. */ + KSystemControl::GenerateRandomBytes(this->entropy, sizeof(this->entropy)); + + /* Clear remaining fields. */ + this->num_threads = 0; + this->peak_num_threads = 0; + this->num_created_threads = 0; + this->num_process_switches = 0; + this->num_thread_switches = 0; + this->num_fpu_switches = 0; + this->num_supervisor_calls = 0; + this->num_ipc_messages = 0; + + this->is_signaled = false; + this->attached_object = nullptr; + this->exception_thread = nullptr; + this->is_suspended = false; + this->memory_release_hint = 0; + this->schedule_count = 0; + + /* We're initialized! */ + this->is_initialized = true; + + return ResultSuccess(); + } + + Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(res_limit != nullptr); + MESOSPHERE_ABORT_UNLESS((params.code_num_pages * PageSize) / PageSize == params.code_num_pages); + + /* Set members. */ + this->memory_pool = pool; + this->resource_limit = res_limit; + this->system_resource_address = Null; + this->system_resource_num_pages = 0; + + /* Setup page table. */ + /* NOTE: Nintendo passes process ID despite not having set it yet. */ + /* This goes completely unused, but even so... */ + { + const auto as_type = static_cast(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask); + const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr); + const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication); + auto *mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager()); + auto *block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); + auto *pt_manager = std::addressof(Kernel::GetPageTableManager()); + R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager)); + } + auto pt_guard = SCOPE_GUARD { this->page_table.Finalize(); }; + + /* Ensure we can insert the code region. */ + R_UNLESS(this->page_table.CanContain(params.code_address, params.code_num_pages * PageSize, KMemoryState_Code), svc::ResultInvalidMemoryRegion()); + + /* Map the code region. */ + R_TRY(this->page_table.MapPageGroup(params.code_address, pg, KMemoryState_Code, KMemoryPermission_KernelRead)); + + /* Initialize capabilities. */ + R_TRY(this->capabilities.Initialize(caps, num_caps, std::addressof(this->page_table))); + + /* Initialize the process id. */ + this->process_id = g_initial_process_id++; + MESOSPHERE_ABORT_UNLESS(InitialProcessIdMin <= this->process_id); + MESOSPHERE_ABORT_UNLESS(this->process_id <= InitialProcessIdMax); + + /* Initialize the rest of the process. */ + R_TRY(this->Initialize(params)); + + /* Open a reference to the resource limit. */ + this->resource_limit->Open(); + + /* We succeeded! */ + pt_guard.Cancel(); + return ResultSuccess(); + } + + void KProcess::DoWorkerTask() { + MESOSPHERE_UNIMPLEMENTED(); + } + + Result KProcess::CreateThreadLocalRegion(KProcessAddress *out) { + KThreadLocalPage *tlp = nullptr; + KProcessAddress tlr = Null; + + /* See if we can get a region from a partially used TLP. */ + { + KScopedSchedulerLock sl; + + if (auto it = this->partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) { + tlr = it->Reserve(); + MESOSPHERE_ABORT_UNLESS(tlr != Null); + + if (it->IsAllUsed()) { + tlp = std::addressof(*it); + this->partially_used_tlp_tree.erase(it); + this->fully_used_tlp_tree.insert(*tlp); + } + + *out = tlr; + return ResultSuccess(); + } + } + + /* Allocate a new page. */ + tlp = KThreadLocalPage::Allocate(); + R_UNLESS(tlp != nullptr, svc::ResultOutOfMemory()); + auto tlp_guard = SCOPE_GUARD { KThreadLocalPage::Free(tlp); }; + + /* Initialize the new page. */ + R_TRY(tlp->Initialize(this)); + + /* Reserve a TLR. */ + tlr = tlp->Reserve(); + MESOSPHERE_ABORT_UNLESS(tlr != Null); + + /* Insert into our tree. */ + { + KScopedSchedulerLock sl; + if (tlp->IsAllUsed()) { + this->fully_used_tlp_tree.insert(*tlp); + } else { + this->partially_used_tlp_tree.insert(*tlp); + } + } + + /* We succeeded! */ + tlp_guard.Cancel(); + *out = tlr; + return ResultSuccess(); + } + + void *KProcess::GetThreadLocalRegionPointer(KProcessAddress addr) { + KThreadLocalPage *tlp = nullptr; + { + KScopedSchedulerLock sl; + if (auto it = this->partially_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); it != this->partially_used_tlp_tree.end()) { + tlp = std::addressof(*it); + } else if (auto it = this->fully_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); it != this->fully_used_tlp_tree.end()) { + tlp = std::addressof(*it); + } else { + return nullptr; + } + } + return static_cast(tlp->GetPointer()) + (GetInteger(addr) & (PageSize - 1)); + } + + bool KProcess::ReserveResource(ams::svc::LimitableResource which, s64 value) { + if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) { + return rl->Reserve(which, value); + } else { + return true; + } + } + + bool KProcess::ReserveResource(ams::svc::LimitableResource which, s64 value, s64 timeout) { + if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) { + return rl->Reserve(which, value, timeout); + } else { + return true; + } + } + + void KProcess::ReleaseResource(ams::svc::LimitableResource which, s64 value) { + if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) { + rl->Release(which, value); + } + } + + void KProcess::ReleaseResource(ams::svc::LimitableResource which, s64 value, s64 hint) { + if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) { + rl->Release(which, value, hint); + } + } + + void KProcess::IncrementThreadCount() { + MESOSPHERE_ASSERT(this->num_threads >= 0); + ++this->num_created_threads; + + if (const auto count = ++this->num_threads; count > this->peak_num_threads) { + this->peak_num_threads = count; + } + } + + void KProcess::DecrementThreadCount() { + MESOSPHERE_ASSERT(this->num_threads > 0); + + if (const auto count = --this->num_threads; count == 0) { + MESOSPHERE_TODO("this->Terminate();"); + } + } + + void KProcess::RegisterThread(KThread *thread) { + KScopedLightLock lk(this->list_lock); + + this->thread_list.push_back(*thread); + } + + void KProcess::UnregisterThread(KThread *thread) { + KScopedLightLock lk(this->list_lock); + + this->thread_list.erase(this->thread_list.iterator_to(*thread)); + } + + Result KProcess::Run(s32 priority, size_t stack_size) { + MESOSPHERE_ASSERT_THIS(); + + /* Lock ourselves, to prevent concurrent access. */ + KScopedLightLock lk(this->lock); + + /* Validate that we're in a state where we can initialize. */ + const auto state = this->state; + R_UNLESS(state == State_Created || state == State_CreatedAttached, svc::ResultInvalidState()); + + /* Place a tentative reservation of a thread for this process. */ + KScopedResourceReservation thread_reservation(this, ams::svc::LimitableResource_ThreadCountMax); + R_UNLESS(thread_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Ensure that we haven't already allocated stack. */ + MESOSPHERE_ABORT_UNLESS(this->main_thread_stack_size == 0); + + /* Ensure that we're allocating a valid stack. */ + stack_size = util::AlignUp(stack_size, PageSize); + R_UNLESS(stack_size + this->code_size <= this->max_process_memory, svc::ResultOutOfMemory()); + R_UNLESS(stack_size + this->code_size >= this->code_size, svc::ResultOutOfMemory()); + + /* Place a tentative reservation of memory for our new stack. */ + KScopedResourceReservation mem_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax); + R_UNLESS(mem_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Allocate and map our stack. */ + KProcessAddress stack_top = Null; + if (stack_size) { + KProcessAddress stack_bottom; + R_TRY(this->page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, KMemoryState_Stack, KMemoryPermission_UserReadWrite)); + + stack_top = stack_bottom + stack_size; + this->main_thread_stack_size = stack_size; + } + + /* Ensure our stack is safe to clean up on exit. */ + auto stack_guard = SCOPE_GUARD { + if (this->main_thread_stack_size) { + MESOSPHERE_R_ABORT_UNLESS(this->page_table.UnmapPages(stack_top - this->main_thread_stack_size, this->main_thread_stack_size / PageSize, KMemoryState_Stack)); + this->main_thread_stack_size = 0; + } + }; + + /* Set our maximum heap size. */ + R_TRY(this->page_table.SetMaxHeapSize(this->max_process_memory - (this->main_thread_stack_size + this->code_size))); + + /* Initialize our handle table. */ + R_TRY(this->handle_table.Initialize(this->capabilities.GetHandleTableSize())); + auto ht_guard = SCOPE_GUARD { this->handle_table.Finalize(); }; + + /* Create a new thread for the process. */ + KThread *main_thread = KThread::Create(); + R_UNLESS(main_thread != nullptr, svc::ResultOutOfResource()); + auto thread_guard = SCOPE_GUARD { main_thread->Close(); }; + + /* Initialize the thread. */ + R_TRY(KThread::InitializeUserThread(main_thread, reinterpret_cast(GetVoidPointer(this->GetEntryPoint())), 0, stack_top, priority, this->ideal_core_id, this)); + + /* Register the thread, and commit our reservation. */ + KThread::Register(main_thread); + thread_reservation.Commit(); + + /* Add the thread to our handle table. */ + ams::svc::Handle thread_handle; + R_TRY(this->handle_table.Add(std::addressof(thread_handle), main_thread)); + + /* Set the thread arguments. */ + main_thread->GetContext().SetArguments(0, thread_handle); + + /* Update our state. */ + this->ChangeState((state == State_Created) ? State_Running : State_RunningAttached); + auto state_guard = SCOPE_GUARD { this->ChangeState(state); }; + + /* Run our thread. */ + R_TRY(main_thread->Run()); + + /* We succeeded! Cancel our guards. */ + state_guard.Cancel(); + thread_guard.Cancel(); + ht_guard.Cancel(); + stack_guard.Cancel(); + mem_reservation.Commit(); + + /* Note for debug that we're running a new process. */ + MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", this->process_id, this->name, main_thread->GetId(), main_thread->GetAffinityMask().GetAffinityMask(), main_thread->GetIdealCore(), main_thread->GetActiveCore()); + + return ResultSuccess(); + } + + void KProcess::SetPreemptionState() { + MESOSPHERE_UNIMPLEMENTED(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_readable_event.cpp b/libraries/libmesosphere/source/kern_k_readable_event.cpp new file mode 100644 index 000000000..6b06dea6a --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_readable_event.cpp @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + bool KReadableEvent::IsSignaled() const { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + return this->is_signaled; + } + + void KReadableEvent::Destroy() { + MESOSPHERE_ASSERT_THIS(); + if (this->parent_event) { + this->parent_event->Close(); + } + } + + Result KReadableEvent::Signal() { + MESOSPHERE_ASSERT_THIS(); + + KScopedSchedulerLock lk; + + if (!this->is_signaled) { + this->is_signaled = true; + this->NotifyAvailable(); + } + + return ResultSuccess(); + } + + Result KReadableEvent::Clear() { + MESOSPHERE_ASSERT_THIS(); + + this->Reset(); + + return ResultSuccess(); + } + + Result KReadableEvent::Reset() { + MESOSPHERE_ASSERT_THIS(); + + KScopedSchedulerLock lk; + + R_UNLESS(this->is_signaled, svc::ResultInvalidState()); + + this->is_signaled = false; + return ResultSuccess(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_resource_limit.cpp b/libraries/libmesosphere/source/kern_k_resource_limit.cpp new file mode 100644 index 000000000..7261919be --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_resource_limit.cpp @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + constexpr s64 DefaultTimeout = ams::svc::Tick(TimeSpan::FromSeconds(10)); + + } + + void KResourceLimit::Initialize() { + /* This should be unnecessary for us, because our constructor will clear all fields. */ + /* The following is analagous to what Nintendo's implementation (no constexpr constructor) would do, though. */ + /* + this->waiter_count = 0; + for (size_t i = 0; i < util::size(this->limit_values); i++) { + this->limit_values[i] = 0; + this->current_values[i] = 0; + this->current_hints[i] = 0; + } + */ + } + + void KResourceLimit::Finalize() { + /* ... */ + } + + s64 KResourceLimit::GetLimitValue(ams::svc::LimitableResource which) const { + MESOSPHERE_ASSERT_THIS(); + + s64 value; + { + KScopedLightLock lk(this->lock); + value = this->limit_values[which]; + MESOSPHERE_ASSERT(value >= 0); + MESOSPHERE_ASSERT(this->current_values[which] <= this->limit_values[which]); + MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); + } + + return value; + } + + s64 KResourceLimit::GetCurrentValue(ams::svc::LimitableResource which) const { + MESOSPHERE_ASSERT_THIS(); + + s64 value; + { + KScopedLightLock lk(this->lock); + value = this->current_values[which]; + MESOSPHERE_ASSERT(value >= 0); + MESOSPHERE_ASSERT(this->current_values[which] <= this->limit_values[which]); + MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); + } + + return value; + } + + s64 KResourceLimit::GetFreeValue(ams::svc::LimitableResource which) const { + MESOSPHERE_ASSERT_THIS(); + + s64 value; + { + KScopedLightLock lk(this->lock); + MESOSPHERE_ASSERT(this->current_values[which] >= 0); + MESOSPHERE_ASSERT(this->current_values[which] <= this->limit_values[which]); + MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); + value = this->limit_values[which] - this->current_values[which]; + } + + return value; + } + + Result KResourceLimit::SetLimitValue(ams::svc::LimitableResource which, s64 value) { + MESOSPHERE_ASSERT_THIS(); + + KScopedLightLock lk(this->lock); + R_UNLESS(this->current_values[which] <= value, svc::ResultInvalidState()); + + this->limit_values[which] = value; + + return ResultSuccess(); + } + + bool KResourceLimit::Reserve(ams::svc::LimitableResource which, s64 value) { + return this->Reserve(which, value, KHardwareTimer::GetTick() + DefaultTimeout); + } + + bool KResourceLimit::Reserve(ams::svc::LimitableResource which, s64 value, s64 timeout) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(value >= 0); + + KScopedLightLock lk(this->lock); + + MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); + if (this->current_hints[which] >= this->limit_values[which]) { + return false; + } + + /* Loop until we reserve or run out of time. */ + while (true) { + MESOSPHERE_ASSERT(this->current_values[which] <= this->limit_values[which]); + MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); + + /* If we would overflow, don't allow to succeed. */ + if (this->current_values[which] + value <= this->current_values[which]) { + break; + } + + if (this->current_values[which] + value <= this->limit_values[which]) { + this->current_values[which] += value; + this->current_hints[which] += value; + return true; + } + + if (this->current_hints[which] + value <= this->limit_values[which] && (timeout < 0 || KHardwareTimer::GetTick() < timeout)) { + this->waiter_count++; + this->cond_var.Wait(&this->lock, timeout); + this->waiter_count--; + } else { + break; + } + } + + return false; + } + + void KResourceLimit::Release(ams::svc::LimitableResource which, s64 value) { + this->Release(which, value, value); + } + + void KResourceLimit::Release(ams::svc::LimitableResource which, s64 value, s64 hint) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(value >= 0); + MESOSPHERE_ASSERT(hint >= 0); + + KScopedLightLock lk(this->lock); + MESOSPHERE_ASSERT(this->current_values[which] <= this->limit_values[which]); + MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); + MESOSPHERE_ASSERT(value <= this->current_values[which]); + MESOSPHERE_ASSERT(hint <= this->current_hints[which]); + + this->current_values[which] -= value; + this->current_hints[which] -= hint; + + if (this->waiter_count != 0) { + this->cond_var.Broadcast(); + } + } + +} diff --git a/libraries/libmesosphere/source/kern_k_scheduler.cpp b/libraries/libmesosphere/source/kern_k_scheduler.cpp new file mode 100644 index 000000000..97dcb1165 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_scheduler.cpp @@ -0,0 +1,395 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + bool KScheduler::s_scheduler_update_needed; + KScheduler::LockType KScheduler::s_scheduler_lock; + KSchedulerPriorityQueue KScheduler::s_priority_queue; + + namespace { + + class KSchedulerInterruptTask : public KInterruptTask { + public: + constexpr KSchedulerInterruptTask() : KInterruptTask() { /* ... */ } + + virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override { + return GetDummyInterruptTask(); + } + + virtual void DoTask() override { + MESOSPHERE_PANIC("KSchedulerInterruptTask::DoTask was called!"); + } + }; + + ALWAYS_INLINE void IncrementScheduledCount(KThread *thread) { + if (KProcess *parent = thread->GetOwnerProcess(); parent != nullptr) { + parent->IncrementScheduledCount(); + } + } + + KSchedulerInterruptTask g_scheduler_interrupt_task; + + ALWAYS_INLINE auto *GetSchedulerInterruptTask() { + return std::addressof(g_scheduler_interrupt_task); + } + + } + + void KScheduler::Initialize(KThread *idle_thread) { + /* Set core ID and idle thread. */ + this->core_id = GetCurrentCoreId(); + this->idle_thread = idle_thread; + this->state.idle_thread_stack = this->idle_thread->GetStackTop(); + + /* Insert the main thread into the priority queue. */ + { + KScopedSchedulerLock lk; + GetPriorityQueue().PushBack(GetCurrentThreadPointer()); + SetSchedulerUpdateNeeded(); + } + + /* Bind interrupt handler. */ + Kernel::GetInterruptManager().BindHandler(GetSchedulerInterruptTask(), KInterruptName_Scheduler, this->core_id, KInterruptController::PriorityLevel_Scheduler, false, false); + } + + void KScheduler::Activate() { + MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1); + + this->state.should_count_idle = KTargetSystem::IsDebugMode(); + this->is_active = true; + RescheduleCurrentCore(); + } + + void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) { + if (const u64 core_mask = cores_needing_scheduling & ~(1ul << this->core_id); core_mask != 0) { + cpu::DataSynchronizationBarrier(); + Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_Scheduler, core_mask); + } + } + + u64 KScheduler::UpdateHighestPriorityThread(KThread *highest_thread) { + if (KThread *prev_highest_thread = this->state.highest_priority_thread; AMS_LIKELY(prev_highest_thread != highest_thread)) { + if (AMS_LIKELY(prev_highest_thread != nullptr)) { + IncrementScheduledCount(prev_highest_thread); + prev_highest_thread->SetLastScheduledTick(KHardwareTimer::GetTick()); + } + if (this->state.should_count_idle) { + if (AMS_LIKELY(highest_thread != nullptr)) { + /* TODO: Set parent process's idle count if it exists. */ + } else { + this->state.idle_count++; + } + } + + this->state.highest_priority_thread = highest_thread; + this->state.needs_scheduling = true; + return (1ul << this->core_id); + } else { + return 0; + } + } + + u64 KScheduler::UpdateHighestPriorityThreadsImpl() { + MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); + + /* Clear that we need to update. */ + ClearSchedulerUpdateNeeded(); + + u64 cores_needing_scheduling = 0, idle_cores = 0; + KThread *top_threads[cpu::NumCores]; + auto &priority_queue = GetPriorityQueue(); + + /* We want to go over all cores, finding the highest priority thread and determining if scheduling is needed for that core. */ + for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) { + KThread *top_thread = priority_queue.GetScheduledFront(core_id); + if (top_thread != nullptr) { + /* If the thread has no waiters, we need to check if the process has a thread pinned by PreemptionState. */ + if (top_thread->GetNumKernelWaiters() == 0) { + if (KProcess *parent = top_thread->GetOwnerProcess(); parent != nullptr) { + if (KThread *suggested = parent->GetPreemptionStatePinnedThread(core_id); suggested != nullptr && suggested != top_thread) { + /* We prefer our parent's pinned thread possible. However, we also don't want to schedule un-runnable threads. */ + if (suggested->GetRawState() == KThread::ThreadState_Runnable) { + top_thread = suggested; + } else { + top_thread = nullptr; + } + } + } + } + } else { + idle_cores |= (1ul << core_id); + } + + top_threads[core_id] = top_thread; + cores_needing_scheduling |= Kernel::GetScheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); + } + + /* Idle cores are bad. We're going to try to migrate threads to each idle core in turn. */ + while (idle_cores != 0) { + s32 core_id = __builtin_ctzll(idle_cores); + + if (KThread *suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { + s32 migration_candidates[cpu::NumCores]; + size_t num_candidates = 0; + + /* While we have a suggested thread, try to migrate it! */ + while (suggested != nullptr) { + /* Check if the suggested thread is the top thread on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + if (KThread *top_thread = (suggested_core >= 0) ? top_threads[suggested_core] : nullptr; top_thread != suggested) { + /* Make sure we're not dealing with threads too high priority for migration. */ + if (top_thread != nullptr && top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) { + break; + } + + /* The suggested thread isn't bound to its core, so we can migrate it! */ + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested); + top_threads[core_id] = suggested; + cores_needing_scheduling |= Kernel::GetScheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); + break; + } + + /* Note this core as a candidate for migration. */ + MESOSPHERE_ASSERT(num_candidates < cpu::NumCores); + migration_candidates[num_candidates++] = suggested_core; + suggested = priority_queue.GetSuggestedNext(core_id, suggested); + } + + /* If suggested is nullptr, we failed to migrate a specific thread. So let's try all our candidate cores' top threads. */ + if (suggested == nullptr) { + for (size_t i = 0; i < num_candidates; i++) { + /* Check if there's some other thread that can run on the candidate core. */ + const s32 candidate_core = migration_candidates[i]; + suggested = top_threads[candidate_core]; + if (KThread *next_on_candidate_core = priority_queue.GetScheduledNext(candidate_core, suggested); next_on_candidate_core != nullptr) { + /* The candidate core can run some other thread! We'll migrate its current top thread to us. */ + top_threads[candidate_core] = next_on_candidate_core; + cores_needing_scheduling |= Kernel::GetScheduler(candidate_core).UpdateHighestPriorityThread(top_threads[candidate_core]); + + /* Perform the migration. */ + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(candidate_core, suggested); + top_threads[core_id] = suggested; + cores_needing_scheduling |= Kernel::GetScheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); + break; + } + } + } + } + + idle_cores &= ~(1ul << core_id); + } + + return cores_needing_scheduling; + } + + void KScheduler::SetInterruptTaskThreadRunnable() { + MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1); + + KThread *task_thread = Kernel::GetInterruptTaskManager().GetThread(); + { + KScopedSchedulerLock sl; + if (AMS_LIKELY(task_thread->GetState() == KThread::ThreadState_Waiting)) { + task_thread->SetState(KThread::ThreadState_Runnable); + } + } + } + + void KScheduler::SwitchThread(KThread *next_thread) { + KProcess *cur_process = GetCurrentProcessPointer(); + KThread *cur_thread = GetCurrentThreadPointer(); + + /* We never want to schedule a null thread, so use the idle thread if we don't have a next. */ + if (next_thread == nullptr) { + next_thread = this->idle_thread; + } + + /* If we're not actually switching thread, there's nothing to do. */ + if (next_thread == cur_thread) { + return; + } + + /* Next thread is now known not to be nullptr, and must not be dispatchable. */ + MESOSPHERE_ASSERT(next_thread->GetDisableDispatchCount() == 1); + + /* Update the CPU time tracking variables. */ + const s64 prev_tick = this->last_context_switch_time; + const s64 cur_tick = KHardwareTimer::GetTick(); + const s64 tick_diff = cur_tick - prev_tick; + cur_thread->AddCpuTime(tick_diff); + if (cur_process != nullptr) { + cur_process->AddCpuTime(tick_diff); + } + this->last_context_switch_time = cur_tick; + + /* Update our previous thread. */ + if (cur_process != nullptr) { + /* NOTE: Combining this into AMS_LIKELY(!... && ...) triggers an internal compiler error: Segmentation fault in GCC 9.2.0. */ + if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == this->core_id)) { + this->prev_thread = cur_thread; + } else { + this->prev_thread = nullptr; + } + } else if (cur_thread == this->idle_thread) { + this->prev_thread = nullptr; + } + + /* Switch the current process, if we're switching processes. */ + if (KProcess *next_process = next_thread->GetOwnerProcess(); next_process != cur_process) { + KProcess::Switch(cur_process, next_process); + } + + /* Set the new thread. */ + SetCurrentThread(next_thread); + + /* Set the new Thread Local region. */ + cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress())); + } + + void KScheduler::OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state) { + MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); + + /* Check if the state has changed, because if it hasn't there's nothing to do. */ + const KThread::ThreadState cur_state = thread->GetRawState(); + if (cur_state == old_state) { + return; + } + + /* Update the priority queues. */ + if (old_state == KThread::ThreadState_Runnable) { + /* If we were previously runnable, then we're not runnable now, and we should remove. */ + GetPriorityQueue().Remove(thread); + IncrementScheduledCount(thread); + SetSchedulerUpdateNeeded(); + } else if (cur_state == KThread::ThreadState_Runnable) { + /* If we're now runnable, then we weren't previously, and we should add. */ + GetPriorityQueue().PushBack(thread); + IncrementScheduledCount(thread); + SetSchedulerUpdateNeeded(); + } + } + + void KScheduler::OnThreadPriorityChanged(KThread *thread, s32 old_priority) { + MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); + + /* If the thread is runnable, we want to change its priority in the queue. */ + if (thread->GetRawState() == KThread::ThreadState_Runnable) { + GetPriorityQueue().ChangePriority(old_priority, thread == GetCurrentThreadPointer(), thread); + IncrementScheduledCount(thread); + SetSchedulerUpdateNeeded(); + } + } + + void KScheduler::OnThreadAffinityMaskChanged(KThread *thread, const KAffinityMask &old_affinity, s32 old_core) { + MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); + + /* If the thread is runnable, we want to change its affinity in the queue. */ + if (thread->GetRawState() == KThread::ThreadState_Runnable) { + GetPriorityQueue().ChangeAffinityMask(old_core, old_affinity, thread); + IncrementScheduledCount(thread); + SetSchedulerUpdateNeeded(); + } + } + + void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { + MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); + + /* Get a reference to the priority queue. */ + auto &priority_queue = GetPriorityQueue(); + + /* Rotate the front of the queue to the end. */ + KThread *top_thread = priority_queue.GetScheduledFront(core_id, priority); + KThread *next_thread = nullptr; + if (top_thread != nullptr) { + next_thread = priority_queue.MoveToScheduledBack(top_thread); + if (next_thread != top_thread) { + IncrementScheduledCount(top_thread); + IncrementScheduledCount(next_thread); + } + } + + /* While we have a suggested thread, try to migrate it! */ + { + KThread *suggested = priority_queue.GetSuggestedFront(core_id, priority); + while (suggested != nullptr) { + /* Check if the suggested thread is the top thread on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + if (KThread *top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) { + /* If the next thread is a new thread that has been waiting longer than our suggestion, we prefer it to our suggestion. */ + if (top_thread != next_thread && next_thread != nullptr && next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) { + suggested = nullptr; + break; + } + + /* If we're allowed to do a migration, do one. */ + /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion to the front of the queue. */ + if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested, true); + IncrementScheduledCount(suggested); + break; + } + } + + /* Get the next suggestion. */ + suggested = priority_queue.GetSamePriorityNext(core_id, suggested); + } + } + + /* Now that we might have migrated a thread with the same priority, check if we can do better. */ + { + KThread *best_thread = priority_queue.GetScheduledFront(core_id); + if (best_thread == GetCurrentThreadPointer()) { + best_thread = priority_queue.GetScheduledNext(core_id, best_thread); + } + + /* If the best thread we can choose has a priority the same or worse than ours, try to migrate a higher priority thread. */ + if (best_thread != nullptr && best_thread->GetPriority() >= priority) { + KThread *suggested = priority_queue.GetSuggestedFront(core_id); + while (suggested != nullptr) { + /* If the suggestion's priority is the same as ours, don't bother. */ + if (suggested->GetPriority() >= best_thread->GetPriority()) { + break; + } + + /* Check if the suggested thread is the top thread on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + if (KThread *top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) { + /* If we're allowed to do a migration, do one. */ + /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion to the front of the queue. */ + if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested, true); + IncrementScheduledCount(suggested); + break; + } + } + + /* Get the next suggestion. */ + suggested = priority_queue.GetSuggestedNext(core_id, suggested); + } + } + } + + /* After a rotation, we need a scheduler update. */ + SetSchedulerUpdateNeeded(); + } + + +} diff --git a/libraries/libmesosphere/source/kern_k_synchronization.cpp b/libraries/libmesosphere/source/kern_k_synchronization.cpp new file mode 100644 index 000000000..46817e035 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_synchronization.cpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + Result KSynchronization::Wait(s32 *out_index, KSynchronizationObject **objects, const s32 num_objects, s64 timeout) { + MESOSPHERE_ASSERT_THIS(); + + MESOSPHERE_UNIMPLEMENTED(); + } + + void KSynchronization::OnAvailable(KSynchronizationObject *object) { + MESOSPHERE_ASSERT_THIS(); + + KScopedSchedulerLock sl; + + /* If we're not signaled, we've nothing to notify. */ + if (!object->IsSignaled()) { + return; + } + + /* Iterate over each thread. */ + for (auto &thread : *object) { + if (thread.GetState() == KThread::ThreadState_Waiting) { + thread.SetSyncedObject(object, ResultSuccess()); + thread.SetState(KThread::ThreadState_Runnable); + } + } + } + + void KSynchronization::OnAbort(KSynchronizationObject *object, Result abort_reason) { + MESOSPHERE_ASSERT_THIS(); + + KScopedSchedulerLock sl; + + /* Iterate over each thread. */ + for (auto &thread : *object) { + if (thread.GetState() == KThread::ThreadState_Waiting) { + thread.SetSyncedObject(object, abort_reason); + thread.SetState(KThread::ThreadState_Runnable); + } + } + } + +} diff --git a/libraries/libmesosphere/source/kern_k_synchronization_object.cpp b/libraries/libmesosphere/source/kern_k_synchronization_object.cpp new file mode 100644 index 000000000..9b093f1a8 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_synchronization_object.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KSynchronizationObject::NotifyAvailable() { + MESOSPHERE_ASSERT_THIS(); + + Kernel::GetSynchronization().OnAvailable(this); + } + + void KSynchronizationObject::NotifyAbort(Result abort_reason) { + MESOSPHERE_ASSERT_THIS(); + + Kernel::GetSynchronization().OnAbort(this, abort_reason); + } + + void KSynchronizationObject::Finalize() { + MESOSPHERE_ASSERT_THIS(); + + this->OnFinalizeSynchronizationObject(); + KAutoObject::Finalize(); + } + + void KSynchronizationObject::DebugWaiters() { + MESOSPHERE_ASSERT_THIS(); + + MESOSPHERE_TODO("Do useful debug operation here."); + } + + KSynchronizationObject::iterator KSynchronizationObject::AddWaiterThread(KThread *thread) { + MESOSPHERE_ASSERT_THIS(); + + return this->thread_list.insert(this->thread_list.end(), *thread); + } + + KSynchronizationObject::iterator KSynchronizationObject::RemoveWaiterThread(KSynchronizationObject::iterator it) { + MESOSPHERE_ASSERT_THIS(); + + return this->thread_list.erase(it); + } + + KSynchronizationObject::iterator KSynchronizationObject::begin() { + MESOSPHERE_ASSERT_THIS(); + + return this->thread_list.begin(); + } + + KSynchronizationObject::iterator KSynchronizationObject::end() { + MESOSPHERE_ASSERT_THIS(); + + return this->thread_list.end(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_thread.cpp b/libraries/libmesosphere/source/kern_k_thread.cpp new file mode 100644 index 000000000..2fb52b283 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_thread.cpp @@ -0,0 +1,610 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + constexpr bool IsKernelAddressKey(KProcessAddress key) { + const uintptr_t key_uptr = GetInteger(key); + return KernelVirtualAddressSpaceBase <= key_uptr && key_uptr <= KernelVirtualAddressSpaceLast; + } + + void CleanupKernelStack(uintptr_t stack_top) { + const uintptr_t stack_bottom = stack_top - PageSize; + + KPhysicalAddress stack_paddr = Null; + MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(&stack_paddr, stack_bottom)); + + MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPages(stack_bottom, 1, KMemoryState_Kernel)); + + /* Free the stack page. */ + KPageBuffer::Free(KPageBuffer::FromPhysicalAddress(stack_paddr)); + } + + } + + Result KThread::Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) { + /* Assert parameters are valid. */ + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(kern_stack_top != nullptr); + MESOSPHERE_ASSERT((type == ThreadType_Main) || (ams::svc::HighestThreadPriority <= prio && prio <= ams::svc::LowestThreadPriority)); + MESOSPHERE_ASSERT((owner != nullptr) || (type != ThreadType_User)); + MESOSPHERE_ASSERT(0 <= core && core < static_cast(cpu::NumCores)); + + /* First, clear the TLS address. */ + this->tls_address = Null; + + const uintptr_t kern_stack_top_address = reinterpret_cast(kern_stack_top); + + /* Next, assert things based on the type. */ + switch (type) { + case ThreadType_Main: + { + MESOSPHERE_ASSERT(arg == 0); + } + [[fallthrough]]; + case ThreadType_HighPriority: + { + MESOSPHERE_ASSERT(core == GetCurrentCoreId()); + } + [[fallthrough]]; + case ThreadType_Kernel: + { + MESOSPHERE_ASSERT(user_stack_top == 0); + MESOSPHERE_ASSERT(util::IsAligned(kern_stack_top_address, PageSize)); + } + [[fallthrough]]; + case ThreadType_User: + { + MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetCoreMask() | (1ul << core)) == owner->GetCoreMask())); + MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetPriorityMask() | (1ul << prio)) == owner->GetPriorityMask())); + } + break; + default: + MESOSPHERE_PANIC("KThread::Initialize: Unknown ThreadType %u", static_cast(type)); + break; + } + + /* Set the ideal core ID and affinity mask. */ + this->ideal_core_id = core; + this->affinity_mask.SetAffinity(core, true); + + /* Set the thread state. */ + this->thread_state = (type == ThreadType_Main) ? ThreadState_Runnable : ThreadState_Initialized; + + /* Set TLS address and TLS heap address. */ + /* NOTE: Nintendo wrote TLS address above already, but official code really does write tls address twice. */ + this->tls_address = 0; + this->tls_heap_address = 0; + + /* Set parent and condvar tree. */ + this->parent = nullptr; + this->cond_var = nullptr; + + /* Set sync booleans. */ + this->signaled = false; + this->ipc_cancelled = false; + this->termination_requested = false; + this->wait_cancelled = false; + this->cancellable = false; + + /* Set core ID and wait result. */ + this->core_id = this->ideal_core_id; + this->wait_result = svc::ResultNoSynchronizationObject(); + + /* Set the stack top. */ + this->kernel_stack_top = kern_stack_top; + + /* Set priorities. */ + this->priority = prio; + this->base_priority = prio; + + /* Set sync object and waiting lock to null. */ + this->synced_object = nullptr; + this->waiting_lock = nullptr; + + /* Initialize sleeping queue. */ + this->sleeping_queue_entry.Initialize(); + this->sleeping_queue = nullptr; + + /* Set suspend flags. */ + this->suspend_request_flags = 0; + this->suspend_allowed_flags = ThreadState_SuspendFlagMask; + + /* We're neither debug attached, nor are we nesting our priority inheritance. */ + this->debug_attached = false; + this->priority_inheritance_count = 0; + + /* We haven't been scheduled, and we have done no light IPC. */ + this->schedule_count = -1; + this->last_scheduled_tick = 0; + this->light_ipc_data = nullptr; + + /* We're not waiting for a lock, and we haven't disabled migration. */ + this->lock_owner = nullptr; + this->num_core_migration_disables = 0; + + /* We have no waiters, but we do have an entrypoint. */ + this->num_kernel_waiters = 0; + this->entrypoint = reinterpret_cast(func); + + /* We don't need a release (probably), and we've spent no time on the cpu. */ + this->resource_limit_release_hint = 0; + this->cpu_time = 0; + + /* Clear our stack parameters. */ + std::memset(static_cast(std::addressof(this->GetStackParameters())), 0, sizeof(StackParameters)); + + /* Setup the TLS, if needed. */ + if (type == ThreadType_User) { + R_TRY(owner->CreateThreadLocalRegion(std::addressof(this->tls_address))); + this->tls_heap_address = owner->GetThreadLocalRegionPointer(this->tls_address); + std::memset(this->tls_heap_address, 0, ams::svc::ThreadLocalRegionSize); + } + + /* Set parent, if relevant. */ + if (owner != nullptr) { + this->parent = owner; + this->parent->Open(); + this->parent->IncrementThreadCount(); + } + + /* Initialize thread context. */ + constexpr bool IsDefault64Bit = sizeof(uintptr_t) == sizeof(u64); + const bool is_64_bit = this->parent ? this->parent->Is64Bit() : IsDefault64Bit; + const bool is_user = (type == ThreadType_User); + const bool is_main = (type == ThreadType_Main); + this->thread_context.Initialize(this->entrypoint, reinterpret_cast(this->GetStackTop()), GetInteger(user_stack_top), arg, is_user, is_64_bit, is_main); + + /* Setup the stack parameters. */ + StackParameters &sp = this->GetStackParameters(); + if (this->parent != nullptr) { + this->parent->CopySvcPermissionsTo(sp); + } + sp.context = std::addressof(this->thread_context); + sp.disable_count = 1; + this->SetInExceptionHandler(); + + /* Set thread ID. */ + this->thread_id = s_next_thread_id++; + + /* We initialized! */ + this->initialized = true; + + /* Register ourselves with our parent process. */ + if (this->parent != nullptr) { + this->parent->RegisterThread(this); + if (this->parent->IsSuspended()) { + this->RequestSuspend(SuspendType_Process); + } + } + + return ResultSuccess(); + } + + Result KThread::InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) { + /* Get stack region for the thread. */ + const auto &stack_region = KMemoryLayout::GetKernelStackRegion(); + + /* Allocate a page to use as the thread. */ + KPageBuffer *page = KPageBuffer::Allocate(); + R_UNLESS(page != nullptr, svc::ResultOutOfResource()); + + /* Map the stack page. */ + KProcessAddress stack_top = Null; + { + KProcessAddress stack_bottom = Null; + auto page_guard = SCOPE_GUARD { KPageBuffer::Free(page); }; + R_TRY(Kernel::GetKernelPageTable().MapPages(std::addressof(stack_bottom), 1, PageSize, page->GetPhysicalAddress(), stack_region.GetAddress(), + stack_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite)); + page_guard.Cancel(); + + + /* Calculate top of the stack. */ + stack_top = stack_bottom + PageSize; + } + + /* Initialize the thread. */ + auto map_guard = SCOPE_GUARD { CleanupKernelStack(GetInteger(stack_top)); }; + R_TRY(thread->Initialize(func, arg, GetVoidPointer(stack_top), user_stack_top, prio, core, owner, type)); + map_guard.Cancel(); + + return ResultSuccess(); + } + + void KThread::PostDestroy(uintptr_t arg) { + KProcess *owner = reinterpret_cast(arg & ~1ul); + const bool resource_limit_release_hint = (arg & 1); + const s64 hint_value = (resource_limit_release_hint ? 0 : 1); + if (owner != nullptr) { + owner->ReleaseResource(ams::svc::LimitableResource_ThreadCountMax, 1, hint_value); + owner->Close(); + } else { + Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_ThreadCountMax, 1, hint_value); + } + } + + void KThread::ResumeThreadsSuspendedForInit() { + KThread::ListAccessor list_accessor; + { + KScopedSchedulerLock sl; + + for (auto &thread : list_accessor) { + static_cast(thread).Resume(SuspendType_Init); + } + } + } + + void KThread::Finalize() { + MESOSPHERE_UNIMPLEMENTED(); + } + + bool KThread::IsSignaled() const { + return this->signaled; + } + + void KThread::Wakeup() { + MESOSPHERE_ASSERT_THIS(); + KScopedSchedulerLock sl; + + if (this->GetState() == ThreadState_Waiting) { + if (this->sleeping_queue != nullptr) { + this->sleeping_queue->WakeupThread(this); + } else { + this->SetState(ThreadState_Runnable); + } + } + } + + void KThread::OnTimer() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + this->Wakeup(); + } + + void KThread::DoWorkerTask() { + MESOSPHERE_UNIMPLEMENTED(); + } + + void KThread::DisableCoreMigration() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this == GetCurrentThreadPointer()); + + KScopedSchedulerLock sl; + MESOSPHERE_ASSERT(this->num_core_migration_disables >= 0); + if ((this->num_core_migration_disables++) == 0) { + /* Save our ideal state to restore when we can migrate again. */ + this->original_ideal_core_id = this->ideal_core_id; + this->original_affinity_mask = this->affinity_mask; + + /* Bind outselves to this core. */ + const s32 active_core = this->GetActiveCore(); + this->ideal_core_id = active_core; + this->affinity_mask.SetAffinityMask(1ul << active_core); + + if (this->affinity_mask.GetAffinityMask() != this->original_affinity_mask.GetAffinityMask()) { + KScheduler::OnThreadAffinityMaskChanged(this, this->original_affinity_mask, active_core); + } + } + } + + void KThread::EnableCoreMigration() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this == GetCurrentThreadPointer()); + + KScopedSchedulerLock sl; + MESOSPHERE_ASSERT(this->num_core_migration_disables > 0); + if ((--this->num_core_migration_disables) == 0) { + const KAffinityMask old_mask = this->affinity_mask; + + /* Restore our ideals. */ + this->ideal_core_id = this->original_ideal_core_id; + this->original_affinity_mask = this->affinity_mask; + + if (this->affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + const s32 active_core = this->GetActiveCore(); + + if (!this->affinity_mask.GetAffinity(active_core)) { + if (this->ideal_core_id >= 0) { + this->SetActiveCore(this->ideal_core_id); + } else { + this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->affinity_mask.GetAffinityMask())); + } + } + KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core); + } + } + } + + Result KThread::SetPriorityToIdle() { + MESOSPHERE_ASSERT_THIS(); + + KScopedSchedulerLock sl; + + /* Change both our priorities to the idle thread priority. */ + const s32 old_priority = this->priority; + this->priority = IdleThreadPriority; + this->base_priority = IdleThreadPriority; + KScheduler::OnThreadPriorityChanged(this, old_priority); + + return ResultSuccess(); + } + + void KThread::RequestSuspend(SuspendType type) { + MESOSPHERE_ASSERT_THIS(); + + KScopedSchedulerLock lk; + + /* Note the request in our flags. */ + this->suspend_request_flags |= (1u << (ThreadState_SuspendShift + type)); + + /* Try to perform the suspend. */ + this->TrySuspend(); + } + + void KThread::Resume(SuspendType type) { + MESOSPHERE_ASSERT_THIS(); + + KScopedSchedulerLock sl; + + /* Clear the request in our flags. */ + this->suspend_request_flags &= ~(1u << (ThreadState_SuspendShift + type)); + + /* Update our state. */ + const ThreadState old_state = this->thread_state; + this->thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); + if (this->thread_state != old_state) { + KScheduler::OnThreadStateChanged(this, old_state); + } + } + + void KThread::TrySuspend() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + MESOSPHERE_ASSERT(this->IsSuspended()); + + /* Ensure that we have no waiters. */ + if (this->GetNumKernelWaiters() > 0) { + return; + } + MESOSPHERE_ABORT_UNLESS(this->GetNumKernelWaiters() == 0); + + /* Perform the suspend. */ + this->Suspend(); + } + + void KThread::Suspend() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + MESOSPHERE_ASSERT(this->IsSuspended()); + + /* Set our suspend flags in state. */ + const auto old_state = this->thread_state; + this->thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); + + /* Note the state change in scheduler. */ + KScheduler::OnThreadStateChanged(this, old_state); + } + + void KThread::Continue() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Clear our suspend flags in state. */ + const auto old_state = this->thread_state; + this->thread_state = static_cast(old_state & ThreadState_Mask); + + /* Note the state change in scheduler. */ + KScheduler::OnThreadStateChanged(this, old_state); + } + + void KThread::AddWaiterImpl(KThread *thread) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Find the right spot to insert the waiter. */ + auto it = this->waiter_list.begin(); + while (it != this->waiter_list.end()) { + if (it->GetPriority() > thread->GetPriority()) { + break; + } + it++; + } + + /* Keep track of how many kernel waiters we have. */ + if (IsKernelAddressKey(thread->GetAddressKey())) { + MESOSPHERE_ABORT_UNLESS((this->num_kernel_waiters++) >= 0); + } + + /* Insert the waiter. */ + this->waiter_list.insert(it, *thread); + thread->SetLockOwner(this); + } + + void KThread::RemoveWaiterImpl(KThread *thread) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Keep track of how many kernel waiters we have. */ + if (IsKernelAddressKey(thread->GetAddressKey())) { + MESOSPHERE_ABORT_UNLESS((this->num_kernel_waiters--) > 0); + } + + /* Remove the waiter. */ + this->waiter_list.erase(this->waiter_list.iterator_to(*thread)); + thread->SetLockOwner(nullptr); + } + + void KThread::RestorePriority(KThread *thread) { + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + while (true) { + /* We want to inherit priority where possible. */ + s32 new_priority = thread->GetBasePriority(); + if (thread->HasWaiters()) { + new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority()); + } + + /* If the priority we would inherit is not different from ours, don't do anything. */ + if (new_priority == thread->GetPriority()) { + return; + } + + /* Ensure we don't violate condition variable red black tree invariants. */ + if (auto *cond_var = thread->GetConditionVariable(); cond_var != nullptr) { + cond_var->BeforeUpdatePriority(thread); + } + + /* Change the priority. */ + const s32 old_priority = thread->GetPriority(); + thread->SetPriority(new_priority); + + /* Restore the condition variable, if relevant. */ + if (auto *cond_var = thread->GetConditionVariable(); cond_var != nullptr) { + cond_var->AfterUpdatePriority(thread); + } + + /* Update the scheduler. */ + KScheduler::OnThreadPriorityChanged(thread, old_priority); + + /* Keep the lock owner up to date. */ + KThread *lock_owner = thread->GetLockOwner(); + if (lock_owner == nullptr) { + return; + } + + /* Update the thread in the lock owner's sorted list, and continue inheriting. */ + lock_owner->RemoveWaiterImpl(thread); + lock_owner->AddWaiterImpl(thread); + thread = lock_owner; + } + } + + void KThread::AddWaiter(KThread *thread) { + MESOSPHERE_ASSERT_THIS(); + this->AddWaiterImpl(thread); + RestorePriority(this); + } + + void KThread::RemoveWaiter(KThread *thread) { + MESOSPHERE_ASSERT_THIS(); + this->RemoveWaiterImpl(thread); + RestorePriority(this); + } + + KThread *KThread::RemoveWaiterByKey(s32 *out_num_waiters, KProcessAddress key) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + s32 num_waiters = 0; + KThread *next_lock_owner = nullptr; + auto it = this->waiter_list.begin(); + while (it != this->waiter_list.end()) { + if (it->GetAddressKey() == key) { + KThread *thread = std::addressof(*it); + + /* Keep track of how many kernel waiters we have. */ + if (IsKernelAddressKey(thread->GetAddressKey())) { + MESOSPHERE_ABORT_UNLESS((this->num_kernel_waiters--) > 0); + } + it = this->waiter_list.erase(it); + + /* Update the next lock owner. */ + if (next_lock_owner == nullptr) { + next_lock_owner = thread; + next_lock_owner->SetLockOwner(nullptr); + } else { + next_lock_owner->AddWaiterImpl(thread); + } + num_waiters++; + } else { + it++; + } + } + + /* Do priority updates, if we have a next owner. */ + if (next_lock_owner) { + RestorePriority(this); + RestorePriority(next_lock_owner); + } + + /* Return output. */ + *out_num_waiters = num_waiters; + return next_lock_owner; + } + + Result KThread::Run() { + MESOSPHERE_ASSERT_THIS(); + + /* If the kernel hasn't finished initializing, then we should suspend. */ + if (Kernel::GetState() != Kernel::State::Initialized) { + this->RequestSuspend(SuspendType_Init); + } + while (true) { + KScopedSchedulerLock lk; + + /* If either this thread or the current thread are requesting termination, note it. */ + R_UNLESS(!this->IsTerminationRequested(), svc::ResultTerminationRequested()); + R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested()); + + /* Ensure our thread state is correct. */ + R_UNLESS(this->GetState() == ThreadState_Initialized, svc::ResultInvalidState()); + + /* If the current thread has been asked to suspend, suspend it and retry. */ + if (GetCurrentThread().IsSuspended()) { + GetCurrentThread().Suspend(); + continue; + } + + /* If we're not a kernel thread and we've been asked to suspend, suspend ourselves. */ + if (this->IsUserThread() && this->IsSuspended()) { + this->Suspend(); + } + + /* Set our state and finish. */ + this->SetState(KThread::ThreadState_Runnable); + return ResultSuccess(); + } + } + + void KThread::Exit() { + MESOSPHERE_ASSERT_THIS(); + + MESOSPHERE_UNIMPLEMENTED(); + + MESOSPHERE_PANIC("KThread::Exit() would return"); + } + + void KThread::SetState(ThreadState state) { + MESOSPHERE_ASSERT_THIS(); + + KScopedSchedulerLock sl; + + const ThreadState old_state = this->thread_state; + this->thread_state = static_cast((old_state & ~ThreadState_Mask) | (state & ThreadState_Mask)); + if (this->thread_state != old_state) { + KScheduler::OnThreadStateChanged(this, old_state); + } + } + + KThreadContext *KThread::GetContextForSchedulerLoop() { + return std::addressof(this->GetContext()); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_thread_local_page.cpp b/libraries/libmesosphere/source/kern_k_thread_local_page.cpp new file mode 100644 index 000000000..d95811571 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_thread_local_page.cpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + Result KThreadLocalPage::Initialize(KProcess *process) { + MESOSPHERE_ASSERT_THIS(); + + /* Set that this process owns us. */ + this->owner = process; + + /* Allocate a new page. */ + KPageBuffer *page_buf = KPageBuffer::Allocate(); + R_UNLESS(page_buf != nullptr, svc::ResultOutOfMemory()); + auto page_buf_guard = SCOPE_GUARD { KPageBuffer::Free(page_buf); }; + + /* Map the address in. */ + R_TRY(this->owner->GetPageTable().MapPages(std::addressof(this->virt_addr), 1, PageSize, page_buf->GetPhysicalAddress(), KMemoryState_ThreadLocal, KMemoryPermission_UserReadWrite)); + + /* We succeeded. */ + page_buf_guard.Cancel(); + return ResultSuccess(); + } + + Result KThreadLocalPage::Finalize() { + MESOSPHERE_ASSERT_THIS(); + + /* Get the physical address of the page. */ + KPhysicalAddress phys_addr = Null; + MESOSPHERE_ABORT_UNLESS(this->owner->GetPageTable().GetPhysicalAddress(&phys_addr, this->GetAddress())); + + /* Unmap the page. */ + R_TRY(this->owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState_ThreadLocal)); + + /* Free the page. */ + KPageBuffer::Free(KPageBuffer::FromPhysicalAddress(phys_addr)); + return ResultSuccess(); + } + + KProcessAddress KThreadLocalPage::Reserve() { + MESOSPHERE_ASSERT_THIS(); + + for (size_t i = 0; i < util::size(this->is_region_free); i++) { + if (this->is_region_free[i]) { + this->is_region_free[i] = false; + return this->GetRegionAddress(i); + } + } + + return Null; + } + + void KThreadLocalPage::Release(KProcessAddress addr) { + MESOSPHERE_ASSERT_THIS(); + + this->is_region_free[this->GetRegionIndex(addr)] = true; + } + + void *KThreadLocalPage::GetPointer() const { + MESOSPHERE_ASSERT_THIS(); + + KPhysicalAddress phys_addr; + MESOSPHERE_ABORT_UNLESS(this->owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), this->GetAddress())); + return static_cast(KPageBuffer::FromPhysicalAddress(phys_addr)); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_wait_object.cpp b/libraries/libmesosphere/source/kern_k_wait_object.cpp new file mode 100644 index 000000000..dc7ef1f13 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_wait_object.cpp @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KWaitObject::OnTimer() { + MESOSPHERE_UNIMPLEMENTED(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_worker_task_manager.cpp b/libraries/libmesosphere/source/kern_k_worker_task_manager.cpp new file mode 100644 index 000000000..5a156cd53 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_worker_task_manager.cpp @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KWorkerTaskManager::Initialize(WorkerType wt, s32 priority) { + /* Set type, other members already initialized in constructor. */ + this->type = wt; + + /* Reserve a thread from the system limit. */ + MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1)); + + /* Create a new thread. */ + this->thread = KThread::Create(); + MESOSPHERE_ABORT_UNLESS(this->thread != nullptr); + + /* Launch the new thread. */ + MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(this->thread, ThreadFunction, reinterpret_cast(this), priority, cpu::NumCores - 1)); + + /* Register the new thread. */ + KThread::Register(this->thread); + + /* Run the thread. */ + this->thread->Run(); + } + + void KWorkerTaskManager::AddTask(WorkerType type, KWorkerTask *task) { + MESOSPHERE_ASSERT(type <= WorkerType_Count); + Kernel::GetWorkerTaskManager(type).AddTask(task); + } + + void KWorkerTaskManager::ThreadFunction(uintptr_t arg) { + reinterpret_cast(arg)->ThreadFunctionImpl(); + } + + void KWorkerTaskManager::ThreadFunctionImpl() { + while (true) { + KWorkerTask *task = nullptr; + + /* Get a worker task. */ + { + KScopedSchedulerLock sl; + task = this->GetTask(); + + if (task == nullptr) { + /* If there's nothing to do, set ourselves as waiting. */ + this->active = false; + this->thread->SetState(KThread::ThreadState_Waiting); + continue; + } + + this->active = true; + } + + /* Do the task. */ + task->DoWorkerTask(); + } + } + + KWorkerTask *KWorkerTaskManager::GetTask() { + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + KWorkerTask *next = this->head_task; + if (next) { + /* Advance the list. */ + if (this->head_task == this->tail_task) { + this->head_task = nullptr; + this->tail_task = nullptr; + } else { + this->head_task = this->head_task->GetNextTask(); + } + + /* Clear the next task's next. */ + next->SetNextTask(nullptr); + } + return next; + } + + void KWorkerTaskManager::AddTask(KWorkerTask *task) { + KScopedSchedulerLock sl; + MESOSPHERE_ASSERT(task->GetNextTask() == nullptr); + + /* Insert the task. */ + if (this->tail_task) { + this->tail_task->SetNextTask(task); + this->tail_task = task; + } else { + this->head_task = task; + this->tail_task = task; + + /* Make ourselves active if we need to. */ + if (!this->active) { + this->thread->SetState(KThread::ThreadState_Runnable); + } + } + } + +} diff --git a/libraries/libmesosphere/source/kern_kernel.cpp b/libraries/libmesosphere/source/kern_kernel.cpp new file mode 100644 index 000000000..d312d270d --- /dev/null +++ b/libraries/libmesosphere/source/kern_kernel.cpp @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + template + ALWAYS_INLINE void PrintMemoryRegion(const char *prefix, const T &extents) { + static_assert(std::is_same::value); + static_assert(std::is_same::value); + if constexpr (std::is_same::value) { + MESOSPHERE_LOG("%-24s0x%08x - 0x%08x\n", prefix, extents.GetAddress(), extents.GetLastAddress()); + } else if constexpr (std::is_same::value) { + MESOSPHERE_LOG("%-24s0x%016lx - 0x%016lx\n", prefix, extents.GetAddress(), extents.GetLastAddress()); + } else if constexpr (std::is_same::value) { + MESOSPHERE_LOG("%-24s0x%016llx - 0x%016llx\n", prefix, extents.GetAddress(), extents.GetLastAddress()); + } else { + static_assert(!std::is_same::value, "Unknown uintptr_t width!"); + } + } + + } + + void Kernel::InitializeCoreLocalRegion(s32 core_id) { + /* Construct the core local region object in place. */ + KCoreLocalContext *clc = GetPointer(KMemoryLayout::GetCoreLocalRegionAddress()); + new (clc) KCoreLocalContext; + + /* Set the core local region address into the global register. */ + cpu::SetCoreLocalRegionAddress(reinterpret_cast(clc)); + + /* Initialize current context. */ + clc->current.current_thread = nullptr; + clc->current.current_process = nullptr; + clc->current.scheduler = std::addressof(clc->scheduler); + clc->current.interrupt_task_manager = std::addressof(clc->interrupt_task_manager); + clc->current.core_id = core_id; + clc->current.exception_stack_top = GetVoidPointer(KMemoryLayout::GetExceptionStackTopAddress(core_id) - sizeof(KThread::StackParameters)); + + /* Clear debugging counters. */ + clc->num_sw_interrupts = 0; + clc->num_hw_interrupts = 0; + clc->num_svc = 0; + clc->num_process_switches = 0; + clc->num_thread_switches = 0; + clc->num_fpu_switches = 0; + + for (size_t i = 0; i < util::size(clc->perf_counters); i++) { + clc->perf_counters[i] = 0; + } + } + + void Kernel::InitializeMainAndIdleThreads(s32 core_id) { + /* This function wants to setup the main thread and the idle thread. */ + KThread *main_thread = std::addressof(Kernel::GetMainThread(core_id)); + void *main_thread_stack = GetVoidPointer(KMemoryLayout::GetMainStackTopAddress(core_id)); + KThread *idle_thread = std::addressof(Kernel::GetIdleThread(core_id)); + void *idle_thread_stack = GetVoidPointer(KMemoryLayout::GetIdleStackTopAddress(core_id)); + KAutoObject::Create(main_thread); + KAutoObject::Create(idle_thread); + main_thread->Initialize(nullptr, 0, main_thread_stack, 0, KThread::MainThreadPriority, core_id, nullptr, KThread::ThreadType_Main); + idle_thread->Initialize(nullptr, 0, idle_thread_stack, 0, KThread::IdleThreadPriority, core_id, nullptr, KThread::ThreadType_Main); + + /* Set the current thread to be the main thread, and we have no processes running yet. */ + SetCurrentThread(main_thread); + SetCurrentProcess(nullptr); + + /* Initialize the interrupt manager, hardware timer, and scheduler */ + GetInterruptManager().Initialize(core_id); + GetHardwareTimer().Initialize(core_id); + GetScheduler().Initialize(idle_thread); + } + + void Kernel::InitializeResourceManagers(KVirtualAddress address, size_t size) { + /* Ensure that the buffer is suitable for our use. */ + const size_t app_size = ApplicationMemoryBlockSlabHeapSize * sizeof(KMemoryBlock); + const size_t sys_size = SystemMemoryBlockSlabHeapSize * sizeof(KMemoryBlock); + const size_t info_size = BlockInfoSlabHeapSize * sizeof(KBlockInfo); + const size_t fixed_size = util::AlignUp(app_size + sys_size + info_size, PageSize); + MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), PageSize)); + MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize)); + MESOSPHERE_ABORT_UNLESS(fixed_size < size); + + size_t pt_size = size - fixed_size; + const size_t rc_size = util::AlignUp(KPageTableManager::CalculateReferenceCountSize(pt_size), PageSize); + MESOSPHERE_ABORT_UNLESS(rc_size < pt_size); + pt_size -= rc_size; + + /* Initialize the slabheaps. */ + s_app_memory_block_manager.Initialize(address + pt_size, app_size); + s_sys_memory_block_manager.Initialize(address + pt_size + app_size, sys_size); + s_block_info_manager.Initialize(address + pt_size + app_size + sys_size, info_size); + s_page_table_manager.Initialize(address, pt_size, GetPointer(address + pt_size + fixed_size)); + } + + void Kernel::PrintLayout() { + /* Print out the kernel version. */ + /* TODO: target firmware, if we support that? */ + MESOSPHERE_LOG("Horizon Kernel (Mesosphere)\n"); + MESOSPHERE_LOG("Built: %s %s\n", __DATE__, __TIME__); + MESOSPHERE_LOG("Atmosphere version: %d.%d.%d-%s\n", ATMOSPHERE_RELEASE_VERSION, ATMOSPHERE_GIT_REVISION); + MESOSPHERE_LOG("Supported OS version: %d.%d.%d\n", ATMOSPHERE_SUPPORTED_HOS_VERSION_MAJOR, ATMOSPHERE_SUPPORTED_HOS_VERSION_MINOR, ATMOSPHERE_SUPPORTED_HOS_VERSION_MICRO); + MESOSPHERE_LOG("\n"); + + /* Print relative memory usage. */ + const auto [total, kernel] = KMemoryLayout::GetTotalAndKernelMemorySizes(); + MESOSPHERE_LOG("Kernel Memory Usage: %zu/%zu MB\n", util::AlignUp(kernel, 1_MB) / 1_MB, util::AlignUp(total, 1_MB) / 1_MB); + MESOSPHERE_LOG("\n"); + + /* Print out important memory layout regions. */ + MESOSPHERE_LOG("Virtual Memory Layout\n"); + PrintMemoryRegion(" KernelRegion", KMemoryLayout::GetKernelRegionExtents()); + PrintMemoryRegion(" Code", KMemoryLayout::GetKernelCodeRegionExtents()); + PrintMemoryRegion(" Stack", KMemoryLayout::GetKernelStackRegionExtents()); + PrintMemoryRegion(" Misc", KMemoryLayout::GetKernelMiscRegionExtents()); + PrintMemoryRegion(" Slab", KMemoryLayout::GetKernelSlabRegionExtents()); + PrintMemoryRegion(" CoreLocalRegion", KMemoryLayout::GetCoreLocalRegion()); + PrintMemoryRegion(" LinearRegion", KMemoryLayout::GetLinearRegionExtents()); + MESOSPHERE_LOG("\n"); + + MESOSPHERE_LOG("Physical Memory Layout\n"); + PrintMemoryRegion(" LinearRegion", KMemoryLayout::GetLinearRegionPhysicalExtents()); + PrintMemoryRegion(" CarveoutRegion", KMemoryLayout::GetCarveoutRegionExtents()); + MESOSPHERE_LOG("\n"); + PrintMemoryRegion(" KernelRegion", KMemoryLayout::GetKernelRegionPhysicalExtents()); + PrintMemoryRegion(" Code", KMemoryLayout::GetKernelCodeRegionPhysicalExtents()); + PrintMemoryRegion(" Slab", KMemoryLayout::GetKernelSlabRegionPhysicalExtents()); + PrintMemoryRegion(" PageTableHeap", KMemoryLayout::GetKernelPageTableHeapRegionPhysicalExtents()); + PrintMemoryRegion(" InitPageTable", KMemoryLayout::GetKernelInitPageTableRegionPhysicalExtents()); + PrintMemoryRegion(" MemoryPoolRegion", KMemoryLayout::GetKernelPoolPartitionRegionPhysicalExtents()); + PrintMemoryRegion(" System", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents()); + PrintMemoryRegion(" Internal", KMemoryLayout::GetKernelMetadataPoolRegionPhysicalExtents()); + PrintMemoryRegion(" SystemUnsafe", KMemoryLayout::GetKernelSystemNonSecurePoolRegionPhysicalExtents()); + PrintMemoryRegion(" Applet", KMemoryLayout::GetKernelAppletPoolRegionPhysicalExtents()); + PrintMemoryRegion(" Application", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents()); + MESOSPHERE_LOG("\n"); + } + +} diff --git a/libraries/libmesosphere/source/kern_main.cpp b/libraries/libmesosphere/source/kern_main.cpp new file mode 100644 index 000000000..063e6b301 --- /dev/null +++ b/libraries/libmesosphere/source/kern_main.cpp @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + template + ALWAYS_INLINE void DoOnEachCoreInOrder(s32 core_id, F f) { + cpu::SynchronizeAllCores(); + for (size_t i = 0; i < cpu::NumCores; i++) { + if (static_cast(i) == core_id) { + f(); + } + cpu::SynchronizeAllCores(); + } + } + + } + + NORETURN void HorizonKernelMain(s32 core_id) { + /* Setup the Core Local Region, and note that we're initializing. */ + Kernel::InitializeCoreLocalRegion(core_id); + Kernel::SetState(Kernel::State::Initializing); + + /* Ensure that all cores get to this point before proceeding. */ + cpu::SynchronizeAllCores(); + + /* Initialize the main and idle thread for each core. */ + DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA { + Kernel::InitializeMainAndIdleThreads(core_id); + }); + + if (core_id == 0) { + /* Initialize the carveout and the system resource limit. */ + KSystemControl::InitializePhase1(); + + /* Initialize the memory manager and the KPageBuffer slabheap. */ + { + const auto &metadata_region = KMemoryLayout::GetMetadataPoolRegion(); + Kernel::GetMemoryManager().Initialize(metadata_region.GetAddress(), metadata_region.GetSize()); + init::InitializeKPageBufferSlabHeap(); + } + + /* Copy the Initial Process Binary to safe memory. */ + CopyInitialProcessBinaryToKernelMemory(); + + /* Print out information about the kernel. */ + Kernel::PrintLayout(); + + /* Initialize the KObject Slab Heaps. */ + init::InitializeSlabHeaps(); + + /* Initialize the Dynamic Slab Heaps. */ + { + const auto &pt_heap_region = KMemoryLayout::GetPageTableHeapRegion(); + Kernel::InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize()); + } + } + + /* Initialize the supervisor page table for each core. */ + DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA { + KPageTable::Initialize(core_id); + Kernel::GetKernelPageTable().Initialize(core_id); + }); + + /* Activate the supervisor page table for each core. */ + DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA { + Kernel::GetKernelPageTable().ActivateForInit(); + }); + + /* NOTE: Kernel calls on each core a nullsub here on retail kernel. */ + + /* Register the main/idle threads and initialize the interrupt task manager. */ + DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA { + KThread::Register(std::addressof(Kernel::GetMainThread(core_id))); + KThread::Register(std::addressof(Kernel::GetIdleThread(core_id))); + Kernel::GetInterruptTaskManager().Initialize(); + }); + + /* Activate the scheduler and enable interrupts. */ + DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA { + Kernel::GetScheduler().Activate(); + KInterruptManager::EnableInterrupts(); + }); + + /* Initialize cpu interrupt threads. */ + cpu::InitializeInterruptThreads(core_id); + + /* Initialize the DPC manager. */ + KDpcManager::Initialize(); + cpu::SynchronizeAllCores(); + + /* Perform more core-0 specific initialization. */ + if (core_id == 0) { + /* Initialize the exit worker manager, so that threads and processes may exit cleanly. */ + Kernel::GetWorkerTaskManager(KWorkerTaskManager::WorkerType_Exit).Initialize(KWorkerTaskManager::WorkerType_Exit, KWorkerTaskManager::ExitWorkerPriority); + + /* Setup so that we may sleep later, and reserve memory for secure applets. */ + KSystemControl::InitializePhase2(); + + /* Initialize the SMMU. */ + KDeviceAddressSpace::Initialize(); + + /* Load the initial processes. */ + CreateAndRunInitialProcesses(); + + /* We're done initializing! */ + Kernel::SetState(Kernel::State::Initialized); + + /* Resume all threads suspended while we initialized. */ + KThread::ResumeThreadsSuspendedForInit(); + } + cpu::SynchronizeAllCores(); + + /* Set the current thread priority to idle. */ + GetCurrentThread().SetPriorityToIdle(); + + /* Exit the main thread. */ + { + auto &main_thread = Kernel::GetMainThread(core_id); + main_thread.Open(); + main_thread.Exit(); + } + + /* Main() is done, and we should never get to this point. */ + MESOSPHERE_PANIC("Main Thread continued after exit."); + while (true) { /* ... */ } + } + +} diff --git a/libraries/libmesosphere/source/kern_panic.cpp b/libraries/libmesosphere/source/kern_panic.cpp index 0e7c952da..a60e88093 100644 --- a/libraries/libmesosphere/source/kern_panic.cpp +++ b/libraries/libmesosphere/source/kern_panic.cpp @@ -15,6 +15,8 @@ */ #include +extern "C" void _start(); + namespace ams::result::impl { NORETURN void OnResultAssertion(Result result) { @@ -27,14 +29,102 @@ namespace ams::kern { namespace { + constexpr std::array NegativeArray = [] { + std::array arr = {}; + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = -1; + } + return arr; + }(); + + std::atomic g_next_ticket = 0; + std::atomic g_current_ticket = 0; + + std::array g_core_tickets = NegativeArray; + + s32 GetCoreTicket() { + const s32 core_id = GetCurrentCoreId(); + if (g_core_tickets[core_id] == -1) { + g_core_tickets[core_id] = 2 * g_next_ticket.fetch_add(1); + } + return g_core_tickets[core_id]; + } + + void WaitCoreTicket() { + const s32 expected = GetCoreTicket(); + const s32 desired = expected + 1; + s32 compare = g_current_ticket; + do { + if (compare == desired) { + break; + } + compare = expected; + } while (!g_current_ticket.compare_exchange_weak(compare, desired)); + } + + void ReleaseCoreTicket() { + const s32 expected = GetCoreTicket() + 1; + const s32 desired = expected + 1; + s32 compare = g_current_ticket; + do { + if (compare != expected) { + break; + } + } while (!g_current_ticket.compare_exchange_weak(compare, desired)); + } + + [[gnu::unused]] void PrintCurrentState() { + /* Wait for it to be our turn to print. */ + WaitCoreTicket(); + + const s32 core_id = GetCurrentCoreId(); + MESOSPHERE_RELEASE_LOG("Core[%d] Current State:\n", core_id); + + /* TODO: Dump register state. */ + + #ifdef ATMOSPHERE_ARCH_ARM64 + MESOSPHERE_RELEASE_LOG(" Backtrace:\n"); + uintptr_t fp = reinterpret_cast(__builtin_frame_address(0)); + for (size_t i = 0; i < 32 && fp && util::IsAligned(fp, 0x10) && cpu::GetPhysicalAddressWritable(nullptr, fp, true); i++) { + struct { + uintptr_t fp; + uintptr_t lr; + } *stack_frame = reinterpret_cast(fp); + MESOSPHERE_RELEASE_LOG(" [%02zx]: %p\n", i, reinterpret_cast(stack_frame->lr)); + fp = stack_frame->fp; + } + #endif + + MESOSPHERE_RELEASE_LOG("\n"); + + /* Allow the next core to print. */ + ReleaseCoreTicket(); + } + NORETURN void StopSystem() { + #ifdef MESOSPHERE_BUILD_FOR_DEBUGGING + /* Print the current core. */ + PrintCurrentState(); + #endif + KSystemControl::StopSystem(); } } NORETURN WEAK_SYMBOL void Panic(const char *file, int line, const char *format, ...) { - /* TODO: Implement printing, log this information. */ + #ifdef MESOSPHERE_BUILD_FOR_DEBUGGING + /* Wait for it to be our turn to print. */ + WaitCoreTicket(); + + ::std::va_list vl; + va_start(vl, format); + MESOSPHERE_RELEASE_LOG("Core[%d]: Kernel Panic at %s:%d\n", GetCurrentCoreId(), file, line); + MESOSPHERE_RELEASE_VLOG(format, vl); + MESOSPHERE_RELEASE_LOG("\n"); + va_end(vl); + #endif + StopSystem(); } diff --git a/libraries/libmesosphere/source/libc/kern_cxx.cpp b/libraries/libmesosphere/source/libc/kern_cxx.cpp new file mode 100644 index 000000000..9262db6f8 --- /dev/null +++ b/libraries/libmesosphere/source/libc/kern_cxx.cpp @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#ifdef __cplusplus +extern "C" { +#endif + +/* cxx implementation details to be stubbed here, as needed. */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/libraries/libmesosphere/source/svc/kern_svc_activity.cpp b/libraries/libmesosphere/source/svc/kern_svc_activity.cpp new file mode 100644 index 000000000..68a9e5e0a --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_activity.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result SetThreadActivity64(ams::svc::Handle thread_handle, ams::svc::ThreadActivity thread_activity) { + MESOSPHERE_PANIC("Stubbed SvcSetThreadActivity64 was called."); + } + + Result SetProcessActivity64(ams::svc::Handle process_handle, ams::svc::ProcessActivity process_activity) { + MESOSPHERE_PANIC("Stubbed SvcSetProcessActivity64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result SetThreadActivity64From32(ams::svc::Handle thread_handle, ams::svc::ThreadActivity thread_activity) { + MESOSPHERE_PANIC("Stubbed SvcSetThreadActivity64From32 was called."); + } + + Result SetProcessActivity64From32(ams::svc::Handle process_handle, ams::svc::ProcessActivity process_activity) { + MESOSPHERE_PANIC("Stubbed SvcSetProcessActivity64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_address_arbiter.cpp b/libraries/libmesosphere/source/svc/kern_svc_address_arbiter.cpp new file mode 100644 index 000000000..3943f0421 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_address_arbiter.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result WaitForAddress64(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) { + MESOSPHERE_PANIC("Stubbed SvcWaitForAddress64 was called."); + } + + Result SignalToAddress64(ams::svc::Address address, ams::svc::SignalType signal_type, int32_t value, int32_t count) { + MESOSPHERE_PANIC("Stubbed SvcSignalToAddress64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result WaitForAddress64From32(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) { + MESOSPHERE_PANIC("Stubbed SvcWaitForAddress64From32 was called."); + } + + Result SignalToAddress64From32(ams::svc::Address address, ams::svc::SignalType signal_type, int32_t value, int32_t count) { + MESOSPHERE_PANIC("Stubbed SvcSignalToAddress64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_address_translation.cpp b/libraries/libmesosphere/source/svc/kern_svc_address_translation.cpp new file mode 100644 index 000000000..ce2eb87ea --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_address_translation.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result QueryPhysicalAddress64(ams::svc::lp64::PhysicalMemoryInfo *out_info, ams::svc::Address address) { + MESOSPHERE_PANIC("Stubbed SvcQueryPhysicalAddress64 was called."); + } + + Result QueryIoMapping64(ams::svc::Address *out_address, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcQueryIoMapping64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result QueryPhysicalAddress64From32(ams::svc::ilp32::PhysicalMemoryInfo *out_info, ams::svc::Address address) { + MESOSPHERE_PANIC("Stubbed SvcQueryPhysicalAddress64From32 was called."); + } + + Result QueryIoMapping64From32(ams::svc::Address *out_address, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcQueryIoMapping64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_cache.cpp b/libraries/libmesosphere/source/svc/kern_svc_cache.cpp new file mode 100644 index 000000000..9ba6d2463 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_cache.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + void FlushEntireDataCache64() { + MESOSPHERE_PANIC("Stubbed SvcFlushEntireDataCache64 was called."); + } + + Result FlushDataCache64(ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcFlushDataCache64 was called."); + } + + Result InvalidateProcessDataCache64(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { + MESOSPHERE_PANIC("Stubbed SvcInvalidateProcessDataCache64 was called."); + } + + Result StoreProcessDataCache64(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { + MESOSPHERE_PANIC("Stubbed SvcStoreProcessDataCache64 was called."); + } + + Result FlushProcessDataCache64(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { + MESOSPHERE_PANIC("Stubbed SvcFlushProcessDataCache64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + void FlushEntireDataCache64From32() { + MESOSPHERE_PANIC("Stubbed SvcFlushEntireDataCache64From32 was called."); + } + + Result FlushDataCache64From32(ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcFlushDataCache64From32 was called."); + } + + Result InvalidateProcessDataCache64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { + MESOSPHERE_PANIC("Stubbed SvcInvalidateProcessDataCache64From32 was called."); + } + + Result StoreProcessDataCache64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { + MESOSPHERE_PANIC("Stubbed SvcStoreProcessDataCache64From32 was called."); + } + + Result FlushProcessDataCache64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { + MESOSPHERE_PANIC("Stubbed SvcFlushProcessDataCache64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_code_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_code_memory.cpp new file mode 100644 index 000000000..e47135698 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_code_memory.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result CreateCodeMemory64(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcCreateCodeMemory64 was called."); + } + + Result ControlCodeMemory64(ams::svc::Handle code_memory_handle, ams::svc::CodeMemoryOperation operation, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) { + MESOSPHERE_PANIC("Stubbed SvcControlCodeMemory64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result CreateCodeMemory64From32(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcCreateCodeMemory64From32 was called."); + } + + Result ControlCodeMemory64From32(ams::svc::Handle code_memory_handle, ams::svc::CodeMemoryOperation operation, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) { + MESOSPHERE_PANIC("Stubbed SvcControlCodeMemory64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_condition_variable.cpp b/libraries/libmesosphere/source/svc/kern_svc_condition_variable.cpp new file mode 100644 index 000000000..8307dccae --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_condition_variable.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result WaitProcessWideKeyAtomic64(ams::svc::Address address, ams::svc::Address cv_key, uint32_t tag, int64_t timeout_ns) { + MESOSPHERE_PANIC("Stubbed SvcWaitProcessWideKeyAtomic64 was called."); + } + + void SignalProcessWideKey64(ams::svc::Address cv_key, int32_t count) { + MESOSPHERE_PANIC("Stubbed SvcSignalProcessWideKey64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result WaitProcessWideKeyAtomic64From32(ams::svc::Address address, ams::svc::Address cv_key, uint32_t tag, int64_t timeout_ns) { + MESOSPHERE_PANIC("Stubbed SvcWaitProcessWideKeyAtomic64From32 was called."); + } + + void SignalProcessWideKey64From32(ams::svc::Address cv_key, int32_t count) { + MESOSPHERE_PANIC("Stubbed SvcSignalProcessWideKey64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_debug.cpp b/libraries/libmesosphere/source/svc/kern_svc_debug.cpp new file mode 100644 index 000000000..c995bd015 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_debug.cpp @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result DebugActiveProcess64(ams::svc::Handle *out_handle, uint64_t process_id) { + MESOSPHERE_PANIC("Stubbed SvcDebugActiveProcess64 was called."); + } + + Result BreakDebugProcess64(ams::svc::Handle debug_handle) { + MESOSPHERE_PANIC("Stubbed SvcBreakDebugProcess64 was called."); + } + + Result TerminateDebugProcess64(ams::svc::Handle debug_handle) { + MESOSPHERE_PANIC("Stubbed SvcTerminateDebugProcess64 was called."); + } + + Result GetDebugEvent64(KUserPointer out_info, ams::svc::Handle debug_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetDebugEvent64 was called."); + } + + Result ContinueDebugEvent64(ams::svc::Handle debug_handle, uint32_t flags, KUserPointer thread_ids, int32_t num_thread_ids) { + MESOSPHERE_PANIC("Stubbed SvcContinueDebugEvent64 was called."); + } + + Result GetDebugThreadContext64(KUserPointer out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) { + MESOSPHERE_PANIC("Stubbed SvcGetDebugThreadContext64 was called."); + } + + Result SetDebugThreadContext64(ams::svc::Handle debug_handle, uint64_t thread_id, KUserPointer context, uint32_t context_flags) { + MESOSPHERE_PANIC("Stubbed SvcSetDebugThreadContext64 was called."); + } + + Result QueryDebugProcessMemory64(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, ams::svc::Address address) { + MESOSPHERE_PANIC("Stubbed SvcQueryDebugProcessMemory64 was called."); + } + + Result ReadDebugProcessMemory64(ams::svc::Address buffer, ams::svc::Handle debug_handle, ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcReadDebugProcessMemory64 was called."); + } + + Result WriteDebugProcessMemory64(ams::svc::Handle debug_handle, ams::svc::Address buffer, ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcWriteDebugProcessMemory64 was called."); + } + + Result SetHardwareBreakPoint64(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) { + MESOSPHERE_PANIC("Stubbed SvcSetHardwareBreakPoint64 was called."); + } + + Result GetDebugThreadParam64(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) { + MESOSPHERE_PANIC("Stubbed SvcGetDebugThreadParam64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result DebugActiveProcess64From32(ams::svc::Handle *out_handle, uint64_t process_id) { + MESOSPHERE_PANIC("Stubbed SvcDebugActiveProcess64From32 was called."); + } + + Result BreakDebugProcess64From32(ams::svc::Handle debug_handle) { + MESOSPHERE_PANIC("Stubbed SvcBreakDebugProcess64From32 was called."); + } + + Result TerminateDebugProcess64From32(ams::svc::Handle debug_handle) { + MESOSPHERE_PANIC("Stubbed SvcTerminateDebugProcess64From32 was called."); + } + + Result GetDebugEvent64From32(KUserPointer out_info, ams::svc::Handle debug_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetDebugEvent64From32 was called."); + } + + Result ContinueDebugEvent64From32(ams::svc::Handle debug_handle, uint32_t flags, KUserPointer thread_ids, int32_t num_thread_ids) { + MESOSPHERE_PANIC("Stubbed SvcContinueDebugEvent64From32 was called."); + } + + Result GetDebugThreadContext64From32(KUserPointer out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) { + MESOSPHERE_PANIC("Stubbed SvcGetDebugThreadContext64From32 was called."); + } + + Result SetDebugThreadContext64From32(ams::svc::Handle debug_handle, uint64_t thread_id, KUserPointer context, uint32_t context_flags) { + MESOSPHERE_PANIC("Stubbed SvcSetDebugThreadContext64From32 was called."); + } + + Result QueryDebugProcessMemory64From32(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, ams::svc::Address address) { + MESOSPHERE_PANIC("Stubbed SvcQueryDebugProcessMemory64From32 was called."); + } + + Result ReadDebugProcessMemory64From32(ams::svc::Address buffer, ams::svc::Handle debug_handle, ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcReadDebugProcessMemory64From32 was called."); + } + + Result WriteDebugProcessMemory64From32(ams::svc::Handle debug_handle, ams::svc::Address buffer, ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcWriteDebugProcessMemory64From32 was called."); + } + + Result SetHardwareBreakPoint64From32(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) { + MESOSPHERE_PANIC("Stubbed SvcSetHardwareBreakPoint64From32 was called."); + } + + Result GetDebugThreadParam64From32(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) { + MESOSPHERE_PANIC("Stubbed SvcGetDebugThreadParam64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_debug_string.cpp b/libraries/libmesosphere/source/svc/kern_svc_debug_string.cpp new file mode 100644 index 000000000..547ae5f14 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_debug_string.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result OutputDebugString64(KUserPointer debug_str, ams::svc::Size len) { + MESOSPHERE_PANIC("Stubbed SvcOutputDebugString64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result OutputDebugString64From32(KUserPointer debug_str, ams::svc::Size len) { + MESOSPHERE_PANIC("Stubbed SvcOutputDebugString64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_device_address_space.cpp b/libraries/libmesosphere/source/svc/kern_svc_device_address_space.cpp new file mode 100644 index 000000000..60cbebde9 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_device_address_space.cpp @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result CreateDeviceAddressSpace64(ams::svc::Handle *out_handle, uint64_t das_address, uint64_t das_size) { + MESOSPHERE_PANIC("Stubbed SvcCreateDeviceAddressSpace64 was called."); + } + + Result AttachDeviceAddressSpace64(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) { + MESOSPHERE_PANIC("Stubbed SvcAttachDeviceAddressSpace64 was called."); + } + + Result DetachDeviceAddressSpace64(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) { + MESOSPHERE_PANIC("Stubbed SvcDetachDeviceAddressSpace64 was called."); + } + + Result MapDeviceAddressSpaceByForce64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { + MESOSPHERE_PANIC("Stubbed SvcMapDeviceAddressSpaceByForce64 was called."); + } + + Result MapDeviceAddressSpaceAligned64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { + MESOSPHERE_PANIC("Stubbed SvcMapDeviceAddressSpaceAligned64 was called."); + } + + Result MapDeviceAddressSpace64(ams::svc::Size *out_mapped_size, ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { + MESOSPHERE_PANIC("Stubbed SvcMapDeviceAddressSpace64 was called."); + } + + Result UnmapDeviceAddressSpace64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address) { + MESOSPHERE_PANIC("Stubbed SvcUnmapDeviceAddressSpace64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result CreateDeviceAddressSpace64From32(ams::svc::Handle *out_handle, uint64_t das_address, uint64_t das_size) { + MESOSPHERE_PANIC("Stubbed SvcCreateDeviceAddressSpace64From32 was called."); + } + + Result AttachDeviceAddressSpace64From32(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) { + MESOSPHERE_PANIC("Stubbed SvcAttachDeviceAddressSpace64From32 was called."); + } + + Result DetachDeviceAddressSpace64From32(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) { + MESOSPHERE_PANIC("Stubbed SvcDetachDeviceAddressSpace64From32 was called."); + } + + Result MapDeviceAddressSpaceByForce64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { + MESOSPHERE_PANIC("Stubbed SvcMapDeviceAddressSpaceByForce64From32 was called."); + } + + Result MapDeviceAddressSpaceAligned64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { + MESOSPHERE_PANIC("Stubbed SvcMapDeviceAddressSpaceAligned64From32 was called."); + } + + Result MapDeviceAddressSpace64From32(ams::svc::Size *out_mapped_size, ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { + MESOSPHERE_PANIC("Stubbed SvcMapDeviceAddressSpace64From32 was called."); + } + + Result UnmapDeviceAddressSpace64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address) { + MESOSPHERE_PANIC("Stubbed SvcUnmapDeviceAddressSpace64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_event.cpp b/libraries/libmesosphere/source/svc/kern_svc_event.cpp new file mode 100644 index 000000000..77aead4d5 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_event.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result SignalEvent64(ams::svc::Handle event_handle) { + MESOSPHERE_PANIC("Stubbed SvcSignalEvent64 was called."); + } + + Result ClearEvent64(ams::svc::Handle event_handle) { + MESOSPHERE_PANIC("Stubbed SvcClearEvent64 was called."); + } + + Result CreateEvent64(ams::svc::Handle *out_write_handle, ams::svc::Handle *out_read_handle) { + MESOSPHERE_PANIC("Stubbed SvcCreateEvent64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result SignalEvent64From32(ams::svc::Handle event_handle) { + MESOSPHERE_PANIC("Stubbed SvcSignalEvent64From32 was called."); + } + + Result ClearEvent64From32(ams::svc::Handle event_handle) { + MESOSPHERE_PANIC("Stubbed SvcClearEvent64From32 was called."); + } + + Result CreateEvent64From32(ams::svc::Handle *out_write_handle, ams::svc::Handle *out_read_handle) { + MESOSPHERE_PANIC("Stubbed SvcCreateEvent64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_exception.cpp b/libraries/libmesosphere/source/svc/kern_svc_exception.cpp new file mode 100644 index 000000000..4df8e6d6b --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_exception.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + void Break64(ams::svc::BreakReason break_reason, ams::svc::Address arg, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcBreak64 was called."); + } + + void ReturnFromException64(ams::Result result) { + MESOSPHERE_PANIC("Stubbed SvcReturnFromException64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + void Break64From32(ams::svc::BreakReason break_reason, ams::svc::Address arg, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcBreak64From32 was called."); + } + + void ReturnFromException64From32(ams::Result result) { + MESOSPHERE_PANIC("Stubbed SvcReturnFromException64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_info.cpp b/libraries/libmesosphere/source/svc/kern_svc_info.cpp new file mode 100644 index 000000000..b1dc3a534 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_info.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result GetInfo64(uint64_t *out, ams::svc::InfoType info_type, ams::svc::Handle handle, uint64_t info_subtype) { + MESOSPHERE_PANIC("Stubbed SvcGetInfo64 was called."); + } + + Result GetSystemInfo64(uint64_t *out, ams::svc::SystemInfoType info_type, ams::svc::Handle handle, uint64_t info_subtype) { + MESOSPHERE_PANIC("Stubbed SvcGetSystemInfo64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result GetInfo64From32(uint64_t *out, ams::svc::InfoType info_type, ams::svc::Handle handle, uint64_t info_subtype) { + MESOSPHERE_PANIC("Stubbed SvcGetInfo64From32 was called."); + } + + Result GetSystemInfo64From32(uint64_t *out, ams::svc::SystemInfoType info_type, ams::svc::Handle handle, uint64_t info_subtype) { + MESOSPHERE_PANIC("Stubbed SvcGetSystemInfo64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_interrupt_event.cpp b/libraries/libmesosphere/source/svc/kern_svc_interrupt_event.cpp new file mode 100644 index 000000000..f724e223e --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_interrupt_event.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result CreateInterruptEvent64(ams::svc::Handle *out_read_handle, int32_t interrupt_id, ams::svc::InterruptType interrupt_type) { + MESOSPHERE_PANIC("Stubbed SvcCreateInterruptEvent64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result CreateInterruptEvent64From32(ams::svc::Handle *out_read_handle, int32_t interrupt_id, ams::svc::InterruptType interrupt_type) { + MESOSPHERE_PANIC("Stubbed SvcCreateInterruptEvent64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_ipc.cpp b/libraries/libmesosphere/source/svc/kern_svc_ipc.cpp new file mode 100644 index 000000000..0d51e6a55 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_ipc.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result SendSyncRequest64(ams::svc::Handle session_handle) { + MESOSPHERE_PANIC("Stubbed SvcSendSyncRequest64 was called."); + } + + Result SendSyncRequestWithUserBuffer64(ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) { + MESOSPHERE_PANIC("Stubbed SvcSendSyncRequestWithUserBuffer64 was called."); + } + + Result SendAsyncRequestWithUserBuffer64(ams::svc::Handle *out_event_handle, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) { + MESOSPHERE_PANIC("Stubbed SvcSendAsyncRequestWithUserBuffer64 was called."); + } + + Result ReplyAndReceive64(int32_t *out_index, KUserPointer handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) { + MESOSPHERE_PANIC("Stubbed SvcReplyAndReceive64 was called."); + } + + Result ReplyAndReceiveWithUserBuffer64(int32_t *out_index, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, KUserPointer handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) { + MESOSPHERE_PANIC("Stubbed SvcReplyAndReceiveWithUserBuffer64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result SendSyncRequest64From32(ams::svc::Handle session_handle) { + MESOSPHERE_PANIC("Stubbed SvcSendSyncRequest64From32 was called."); + } + + Result SendSyncRequestWithUserBuffer64From32(ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) { + MESOSPHERE_PANIC("Stubbed SvcSendSyncRequestWithUserBuffer64From32 was called."); + } + + Result SendAsyncRequestWithUserBuffer64From32(ams::svc::Handle *out_event_handle, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) { + MESOSPHERE_PANIC("Stubbed SvcSendAsyncRequestWithUserBuffer64From32 was called."); + } + + Result ReplyAndReceive64From32(int32_t *out_index, KUserPointer handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) { + MESOSPHERE_PANIC("Stubbed SvcReplyAndReceive64From32 was called."); + } + + Result ReplyAndReceiveWithUserBuffer64From32(int32_t *out_index, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, KUserPointer handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) { + MESOSPHERE_PANIC("Stubbed SvcReplyAndReceiveWithUserBuffer64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_kernel_debug.cpp b/libraries/libmesosphere/source/svc/kern_svc_kernel_debug.cpp new file mode 100644 index 000000000..ced75f3e2 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_kernel_debug.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + void KernelDebug64(ams::svc::KernelDebugType kern_debug_type, uint64_t arg0, uint64_t arg1, uint64_t arg2) { + MESOSPHERE_PANIC("Stubbed SvcKernelDebug64 was called."); + } + + void ChangeKernelTraceState64(ams::svc::KernelTraceState kern_trace_state) { + MESOSPHERE_PANIC("Stubbed SvcChangeKernelTraceState64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + void KernelDebug64From32(ams::svc::KernelDebugType kern_debug_type, uint64_t arg0, uint64_t arg1, uint64_t arg2) { + MESOSPHERE_PANIC("Stubbed SvcKernelDebug64From32 was called."); + } + + void ChangeKernelTraceState64From32(ams::svc::KernelTraceState kern_trace_state) { + MESOSPHERE_PANIC("Stubbed SvcChangeKernelTraceState64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_light_ipc.cpp b/libraries/libmesosphere/source/svc/kern_svc_light_ipc.cpp new file mode 100644 index 000000000..8e80e3932 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_light_ipc.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result SendSyncRequestLight64(ams::svc::Handle session_handle) { + MESOSPHERE_PANIC("Stubbed SvcSendSyncRequestLight64 was called."); + } + + Result ReplyAndReceiveLight64(ams::svc::Handle handle) { + MESOSPHERE_PANIC("Stubbed SvcReplyAndReceiveLight64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result SendSyncRequestLight64From32(ams::svc::Handle session_handle) { + MESOSPHERE_PANIC("Stubbed SvcSendSyncRequestLight64From32 was called."); + } + + Result ReplyAndReceiveLight64From32(ams::svc::Handle handle) { + MESOSPHERE_PANIC("Stubbed SvcReplyAndReceiveLight64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_lock.cpp b/libraries/libmesosphere/source/svc/kern_svc_lock.cpp new file mode 100644 index 000000000..1c264dcc4 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_lock.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result ArbitrateLock64(ams::svc::Handle thread_handle, ams::svc::Address address, uint32_t tag) { + MESOSPHERE_PANIC("Stubbed SvcArbitrateLock64 was called."); + } + + Result ArbitrateUnlock64(ams::svc::Address address) { + MESOSPHERE_PANIC("Stubbed SvcArbitrateUnlock64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result ArbitrateLock64From32(ams::svc::Handle thread_handle, ams::svc::Address address, uint32_t tag) { + MESOSPHERE_PANIC("Stubbed SvcArbitrateLock64From32 was called."); + } + + Result ArbitrateUnlock64From32(ams::svc::Address address) { + MESOSPHERE_PANIC("Stubbed SvcArbitrateUnlock64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_memory.cpp new file mode 100644 index 000000000..a3770ddf5 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_memory.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result SetMemoryPermission64(ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission perm) { + MESOSPHERE_PANIC("Stubbed SvcSetMemoryPermission64 was called."); + } + + Result SetMemoryAttribute64(ams::svc::Address address, ams::svc::Size size, uint32_t mask, uint32_t attr) { + MESOSPHERE_PANIC("Stubbed SvcSetMemoryAttribute64 was called."); + } + + Result MapMemory64(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcMapMemory64 was called."); + } + + Result UnmapMemory64(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapMemory64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result SetMemoryPermission64From32(ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission perm) { + MESOSPHERE_PANIC("Stubbed SvcSetMemoryPermission64From32 was called."); + } + + Result SetMemoryAttribute64From32(ams::svc::Address address, ams::svc::Size size, uint32_t mask, uint32_t attr) { + MESOSPHERE_PANIC("Stubbed SvcSetMemoryAttribute64From32 was called."); + } + + Result MapMemory64From32(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcMapMemory64From32 was called."); + } + + Result UnmapMemory64From32(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapMemory64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_physical_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_physical_memory.cpp new file mode 100644 index 000000000..1c800096c --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_physical_memory.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result SetHeapSize64(ams::svc::Address *out_address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcSetHeapSize64 was called."); + } + + Result MapPhysicalMemory64(ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcMapPhysicalMemory64 was called."); + } + + Result UnmapPhysicalMemory64(ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapPhysicalMemory64 was called."); + } + + Result MapPhysicalMemoryUnsafe64(ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcMapPhysicalMemoryUnsafe64 was called."); + } + + Result UnmapPhysicalMemoryUnsafe64(ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapPhysicalMemoryUnsafe64 was called."); + } + + Result SetUnsafeLimit64(ams::svc::Size limit) { + MESOSPHERE_PANIC("Stubbed SvcSetUnsafeLimit64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result SetHeapSize64From32(ams::svc::Address *out_address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcSetHeapSize64From32 was called."); + } + + Result MapPhysicalMemory64From32(ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcMapPhysicalMemory64From32 was called."); + } + + Result UnmapPhysicalMemory64From32(ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapPhysicalMemory64From32 was called."); + } + + Result MapPhysicalMemoryUnsafe64From32(ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcMapPhysicalMemoryUnsafe64From32 was called."); + } + + Result UnmapPhysicalMemoryUnsafe64From32(ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapPhysicalMemoryUnsafe64From32 was called."); + } + + Result SetUnsafeLimit64From32(ams::svc::Size limit) { + MESOSPHERE_PANIC("Stubbed SvcSetUnsafeLimit64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_port.cpp b/libraries/libmesosphere/source/svc/kern_svc_port.cpp new file mode 100644 index 000000000..e8b1a73a2 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_port.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result ConnectToNamedPort64(ams::svc::Handle *out_handle, KUserPointer name) { + MESOSPHERE_PANIC("Stubbed SvcConnectToNamedPort64 was called."); + } + + Result CreatePort64(ams::svc::Handle *out_server_handle, ams::svc::Handle *out_client_handle, int32_t max_sessions, bool is_light, ams::svc::Address name) { + MESOSPHERE_PANIC("Stubbed SvcCreatePort64 was called."); + } + + Result ManageNamedPort64(ams::svc::Handle *out_server_handle, KUserPointer name, int32_t max_sessions) { + MESOSPHERE_PANIC("Stubbed SvcManageNamedPort64 was called."); + } + + Result ConnectToPort64(ams::svc::Handle *out_handle, ams::svc::Handle port) { + MESOSPHERE_PANIC("Stubbed SvcConnectToPort64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result ConnectToNamedPort64From32(ams::svc::Handle *out_handle, KUserPointer name) { + MESOSPHERE_PANIC("Stubbed SvcConnectToNamedPort64From32 was called."); + } + + Result CreatePort64From32(ams::svc::Handle *out_server_handle, ams::svc::Handle *out_client_handle, int32_t max_sessions, bool is_light, ams::svc::Address name) { + MESOSPHERE_PANIC("Stubbed SvcCreatePort64From32 was called."); + } + + Result ManageNamedPort64From32(ams::svc::Handle *out_server_handle, KUserPointer name, int32_t max_sessions) { + MESOSPHERE_PANIC("Stubbed SvcManageNamedPort64From32 was called."); + } + + Result ConnectToPort64From32(ams::svc::Handle *out_handle, ams::svc::Handle port) { + MESOSPHERE_PANIC("Stubbed SvcConnectToPort64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_power_management.cpp b/libraries/libmesosphere/source/svc/kern_svc_power_management.cpp new file mode 100644 index 000000000..41b0b8e87 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_power_management.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + void SleepSystem64() { + MESOSPHERE_PANIC("Stubbed SvcSleepSystem64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + void SleepSystem64From32() { + MESOSPHERE_PANIC("Stubbed SvcSleepSystem64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_process.cpp b/libraries/libmesosphere/source/svc/kern_svc_process.cpp new file mode 100644 index 000000000..17350db51 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_process.cpp @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + void ExitProcess64() { + MESOSPHERE_PANIC("Stubbed SvcExitProcess64 was called."); + } + + Result GetProcessId64(uint64_t *out_process_id, ams::svc::Handle process_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetProcessId64 was called."); + } + + Result GetProcessList64(int32_t *out_num_processes, KUserPointer out_process_ids, int32_t max_out_count) { + MESOSPHERE_PANIC("Stubbed SvcGetProcessList64 was called."); + } + + Result CreateProcess64(ams::svc::Handle *out_handle, KUserPointer parameters, KUserPointer caps, int32_t num_caps) { + MESOSPHERE_PANIC("Stubbed SvcCreateProcess64 was called."); + } + + Result StartProcess64(ams::svc::Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size) { + MESOSPHERE_PANIC("Stubbed SvcStartProcess64 was called."); + } + + Result TerminateProcess64(ams::svc::Handle process_handle) { + MESOSPHERE_PANIC("Stubbed SvcTerminateProcess64 was called."); + } + + Result GetProcessInfo64(int64_t *out_info, ams::svc::Handle process_handle, ams::svc::ProcessInfoType info_type) { + MESOSPHERE_PANIC("Stubbed SvcGetProcessInfo64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + void ExitProcess64From32() { + MESOSPHERE_PANIC("Stubbed SvcExitProcess64From32 was called."); + } + + Result GetProcessId64From32(uint64_t *out_process_id, ams::svc::Handle process_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetProcessId64From32 was called."); + } + + Result GetProcessList64From32(int32_t *out_num_processes, KUserPointer out_process_ids, int32_t max_out_count) { + MESOSPHERE_PANIC("Stubbed SvcGetProcessList64From32 was called."); + } + + Result CreateProcess64From32(ams::svc::Handle *out_handle, KUserPointer parameters, KUserPointer caps, int32_t num_caps) { + MESOSPHERE_PANIC("Stubbed SvcCreateProcess64From32 was called."); + } + + Result StartProcess64From32(ams::svc::Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size) { + MESOSPHERE_PANIC("Stubbed SvcStartProcess64From32 was called."); + } + + Result TerminateProcess64From32(ams::svc::Handle process_handle) { + MESOSPHERE_PANIC("Stubbed SvcTerminateProcess64From32 was called."); + } + + Result GetProcessInfo64From32(int64_t *out_info, ams::svc::Handle process_handle, ams::svc::ProcessInfoType info_type) { + MESOSPHERE_PANIC("Stubbed SvcGetProcessInfo64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_process_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_process_memory.cpp new file mode 100644 index 000000000..2e6348493 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_process_memory.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result SetProcessMemoryPermission64(ams::svc::Handle process_handle, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) { + MESOSPHERE_PANIC("Stubbed SvcSetProcessMemoryPermission64 was called."); + } + + Result MapProcessMemory64(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcMapProcessMemory64 was called."); + } + + Result UnmapProcessMemory64(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapProcessMemory64 was called."); + } + + Result MapProcessCodeMemory64(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { + MESOSPHERE_PANIC("Stubbed SvcMapProcessCodeMemory64 was called."); + } + + Result UnmapProcessCodeMemory64(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapProcessCodeMemory64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result SetProcessMemoryPermission64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) { + MESOSPHERE_PANIC("Stubbed SvcSetProcessMemoryPermission64From32 was called."); + } + + Result MapProcessMemory64From32(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcMapProcessMemory64From32 was called."); + } + + Result UnmapProcessMemory64From32(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapProcessMemory64From32 was called."); + } + + Result MapProcessCodeMemory64From32(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { + MESOSPHERE_PANIC("Stubbed SvcMapProcessCodeMemory64From32 was called."); + } + + Result UnmapProcessCodeMemory64From32(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapProcessCodeMemory64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_processor.cpp b/libraries/libmesosphere/source/svc/kern_svc_processor.cpp new file mode 100644 index 000000000..c89185f51 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_processor.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + int32_t GetCurrentProcessorNumber64() { + MESOSPHERE_PANIC("Stubbed SvcGetCurrentProcessorNumber64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + int32_t GetCurrentProcessorNumber64From32() { + MESOSPHERE_PANIC("Stubbed SvcGetCurrentProcessorNumber64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_query_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_query_memory.cpp new file mode 100644 index 000000000..cf1480ef4 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_query_memory.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + Result QueryProcessMemory(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uintptr_t address) { + MESOSPHERE_LOG("%s: QueryProcessMemory(0x%08x, 0x%zx) was called\n", GetCurrentProcess().GetName(), process_handle, address); + + /* Get the process. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Query the mapping's info. */ + KMemoryInfo info; + R_TRY(process->GetPageTable().QueryInfo(std::addressof(info), out_page_info, address)); + + /* Write output. */ + *out_memory_info = info.GetSvcMemoryInfo(); + return ResultSuccess(); + } + + Result QueryMemory(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, uintptr_t address) { + /* Query memory is just QueryProcessMemory on the current process. */ + return QueryProcessMemory(out_memory_info, out_page_info, ams::svc::PseudoHandle::CurrentProcess, address); + } + + } + + /* ============================= 64 ABI ============================= */ + + Result QueryMemory64(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Address address) { + /* Get an ams::svc::MemoryInfo for the region. */ + ams::svc::MemoryInfo info = {}; + R_TRY(QueryMemory(std::addressof(info), out_page_info, address)); + + /* Try to copy to userspace. In the 64-bit case, ams::svc::lp64::MemoryInfo is the same as ams::svc::MemoryInfo. */ + static_assert(sizeof(ams::svc::MemoryInfo) == sizeof(ams::svc::lp64::MemoryInfo)); + R_TRY(out_memory_info.CopyFrom(std::addressof(info))); + + return ResultSuccess(); + } + + Result QueryProcessMemory64(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uint64_t address) { + MESOSPHERE_PANIC("Stubbed SvcQueryProcessMemory64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result QueryMemory64From32(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Address address) { + MESOSPHERE_PANIC("Stubbed SvcQueryMemory64From32 was called."); + } + + Result QueryProcessMemory64From32(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uint64_t address) { + MESOSPHERE_PANIC("Stubbed SvcQueryProcessMemory64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_register.cpp b/libraries/libmesosphere/source/svc/kern_svc_register.cpp new file mode 100644 index 000000000..fedf164b1 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_register.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result ReadWriteRegister64(uint32_t *out_value, ams::svc::PhysicalAddress address, uint32_t mask, uint32_t value) { + MESOSPHERE_PANIC("Stubbed SvcReadWriteRegister64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result ReadWriteRegister64From32(uint32_t *out_value, ams::svc::PhysicalAddress address, uint32_t mask, uint32_t value) { + MESOSPHERE_PANIC("Stubbed SvcReadWriteRegister64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_resource_limit.cpp b/libraries/libmesosphere/source/svc/kern_svc_resource_limit.cpp new file mode 100644 index 000000000..af51a61b1 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_resource_limit.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result GetResourceLimitLimitValue64(int64_t *out_limit_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) { + MESOSPHERE_PANIC("Stubbed SvcGetResourceLimitLimitValue64 was called."); + } + + Result GetResourceLimitCurrentValue64(int64_t *out_current_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) { + MESOSPHERE_PANIC("Stubbed SvcGetResourceLimitCurrentValue64 was called."); + } + + Result CreateResourceLimit64(ams::svc::Handle *out_handle) { + MESOSPHERE_PANIC("Stubbed SvcCreateResourceLimit64 was called."); + } + + Result SetResourceLimitLimitValue64(ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which, int64_t limit_value) { + MESOSPHERE_PANIC("Stubbed SvcSetResourceLimitLimitValue64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result GetResourceLimitLimitValue64From32(int64_t *out_limit_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) { + MESOSPHERE_PANIC("Stubbed SvcGetResourceLimitLimitValue64From32 was called."); + } + + Result GetResourceLimitCurrentValue64From32(int64_t *out_current_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) { + MESOSPHERE_PANIC("Stubbed SvcGetResourceLimitCurrentValue64From32 was called."); + } + + Result CreateResourceLimit64From32(ams::svc::Handle *out_handle) { + MESOSPHERE_PANIC("Stubbed SvcCreateResourceLimit64From32 was called."); + } + + Result SetResourceLimitLimitValue64From32(ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which, int64_t limit_value) { + MESOSPHERE_PANIC("Stubbed SvcSetResourceLimitLimitValue64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_secure_monitor_call.cpp b/libraries/libmesosphere/source/svc/kern_svc_secure_monitor_call.cpp new file mode 100644 index 000000000..ccbb1b906 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_secure_monitor_call.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + void CallSecureMonitor64(ams::svc::lp64::SecureMonitorArguments *args) { + MESOSPHERE_PANIC("Stubbed SvcCallSecureMonitor64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + void CallSecureMonitor64From32(ams::svc::ilp32::SecureMonitorArguments *args) { + MESOSPHERE_PANIC("Stubbed SvcCallSecureMonitor64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_session.cpp b/libraries/libmesosphere/source/svc/kern_svc_session.cpp new file mode 100644 index 000000000..36e540c0b --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_session.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result CreateSession64(ams::svc::Handle *out_server_session_handle, ams::svc::Handle *out_client_session_handle, bool is_light, ams::svc::Address name) { + MESOSPHERE_PANIC("Stubbed SvcCreateSession64 was called."); + } + + Result AcceptSession64(ams::svc::Handle *out_handle, ams::svc::Handle port) { + MESOSPHERE_PANIC("Stubbed SvcAcceptSession64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result CreateSession64From32(ams::svc::Handle *out_server_session_handle, ams::svc::Handle *out_client_session_handle, bool is_light, ams::svc::Address name) { + MESOSPHERE_PANIC("Stubbed SvcCreateSession64From32 was called."); + } + + Result AcceptSession64From32(ams::svc::Handle *out_handle, ams::svc::Handle port) { + MESOSPHERE_PANIC("Stubbed SvcAcceptSession64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_shared_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_shared_memory.cpp new file mode 100644 index 000000000..fbd77c023 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_shared_memory.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result MapSharedMemory64(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) { + MESOSPHERE_PANIC("Stubbed SvcMapSharedMemory64 was called."); + } + + Result UnmapSharedMemory64(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapSharedMemory64 was called."); + } + + Result CreateSharedMemory64(ams::svc::Handle *out_handle, ams::svc::Size size, ams::svc::MemoryPermission owner_perm, ams::svc::MemoryPermission remote_perm) { + MESOSPHERE_PANIC("Stubbed SvcCreateSharedMemory64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result MapSharedMemory64From32(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) { + MESOSPHERE_PANIC("Stubbed SvcMapSharedMemory64From32 was called."); + } + + Result UnmapSharedMemory64From32(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapSharedMemory64From32 was called."); + } + + Result CreateSharedMemory64From32(ams::svc::Handle *out_handle, ams::svc::Size size, ams::svc::MemoryPermission owner_perm, ams::svc::MemoryPermission remote_perm) { + MESOSPHERE_PANIC("Stubbed SvcCreateSharedMemory64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_synchronization.cpp b/libraries/libmesosphere/source/svc/kern_svc_synchronization.cpp new file mode 100644 index 000000000..7564da89a --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_synchronization.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result CloseHandle64(ams::svc::Handle handle) { + MESOSPHERE_PANIC("Stubbed SvcCloseHandle64 was called."); + } + + Result ResetSignal64(ams::svc::Handle handle) { + MESOSPHERE_PANIC("Stubbed SvcResetSignal64 was called."); + } + + Result WaitSynchronization64(int32_t *out_index, KUserPointer handles, int32_t numHandles, int64_t timeout_ns) { + MESOSPHERE_PANIC("Stubbed SvcWaitSynchronization64 was called."); + } + + Result CancelSynchronization64(ams::svc::Handle handle) { + MESOSPHERE_PANIC("Stubbed SvcCancelSynchronization64 was called."); + } + + void SynchronizePreemptionState64() { + MESOSPHERE_PANIC("Stubbed SvcSynchronizePreemptionState64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result CloseHandle64From32(ams::svc::Handle handle) { + MESOSPHERE_PANIC("Stubbed SvcCloseHandle64From32 was called."); + } + + Result ResetSignal64From32(ams::svc::Handle handle) { + MESOSPHERE_PANIC("Stubbed SvcResetSignal64From32 was called."); + } + + Result WaitSynchronization64From32(int32_t *out_index, KUserPointer handles, int32_t numHandles, int64_t timeout_ns) { + MESOSPHERE_PANIC("Stubbed SvcWaitSynchronization64From32 was called."); + } + + Result CancelSynchronization64From32(ams::svc::Handle handle) { + MESOSPHERE_PANIC("Stubbed SvcCancelSynchronization64From32 was called."); + } + + void SynchronizePreemptionState64From32() { + MESOSPHERE_PANIC("Stubbed SvcSynchronizePreemptionState64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_thread.cpp b/libraries/libmesosphere/source/svc/kern_svc_thread.cpp new file mode 100644 index 000000000..5c25342ee --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_thread.cpp @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result CreateThread64(ams::svc::Handle *out_handle, ams::svc::ThreadFunc func, ams::svc::Address arg, ams::svc::Address stack_bottom, int32_t priority, int32_t core_id) { + MESOSPHERE_PANIC("Stubbed SvcCreateThread64 was called."); + } + + Result StartThread64(ams::svc::Handle thread_handle) { + MESOSPHERE_PANIC("Stubbed SvcStartThread64 was called."); + } + + void ExitThread64() { + MESOSPHERE_PANIC("Stubbed SvcExitThread64 was called."); + } + + void SleepThread64(int64_t ns) { + MESOSPHERE_PANIC("Stubbed SvcSleepThread64 was called."); + } + + Result GetThreadPriority64(int32_t *out_priority, ams::svc::Handle thread_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetThreadPriority64 was called."); + } + + Result SetThreadPriority64(ams::svc::Handle thread_handle, int32_t priority) { + MESOSPHERE_PANIC("Stubbed SvcSetThreadPriority64 was called."); + } + + Result GetThreadCoreMask64(int32_t *out_core_id, uint64_t *out_affinity_mask, ams::svc::Handle thread_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetThreadCoreMask64 was called."); + } + + Result SetThreadCoreMask64(ams::svc::Handle thread_handle, int32_t core_id, uint64_t affinity_mask) { + MESOSPHERE_PANIC("Stubbed SvcSetThreadCoreMask64 was called."); + } + + Result GetThreadId64(uint64_t *out_thread_id, ams::svc::Handle thread_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetThreadId64 was called."); + } + + Result GetDebugFutureThreadInfo64(ams::svc::lp64::LastThreadContext *out_context, uint64_t *thread_id, ams::svc::Handle debug_handle, int64_t ns) { + MESOSPHERE_PANIC("Stubbed SvcGetDebugFutureThreadInfo64 was called."); + } + + Result GetLastThreadInfo64(ams::svc::lp64::LastThreadContext *out_context, ams::svc::Address *out_tls_address, uint32_t *out_flags) { + MESOSPHERE_PANIC("Stubbed SvcGetLastThreadInfo64 was called."); + } + + Result GetThreadContext364(KUserPointer out_context, ams::svc::Handle thread_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetThreadContext364 was called."); + } + + Result GetThreadList64(int32_t *out_num_threads, KUserPointer out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetThreadList64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result CreateThread64From32(ams::svc::Handle *out_handle, ams::svc::ThreadFunc func, ams::svc::Address arg, ams::svc::Address stack_bottom, int32_t priority, int32_t core_id) { + MESOSPHERE_PANIC("Stubbed SvcCreateThread64From32 was called."); + } + + Result StartThread64From32(ams::svc::Handle thread_handle) { + MESOSPHERE_PANIC("Stubbed SvcStartThread64From32 was called."); + } + + void ExitThread64From32() { + MESOSPHERE_PANIC("Stubbed SvcExitThread64From32 was called."); + } + + void SleepThread64From32(int64_t ns) { + MESOSPHERE_PANIC("Stubbed SvcSleepThread64From32 was called."); + } + + Result GetThreadPriority64From32(int32_t *out_priority, ams::svc::Handle thread_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetThreadPriority64From32 was called."); + } + + Result SetThreadPriority64From32(ams::svc::Handle thread_handle, int32_t priority) { + MESOSPHERE_PANIC("Stubbed SvcSetThreadPriority64From32 was called."); + } + + Result GetThreadCoreMask64From32(int32_t *out_core_id, uint64_t *out_affinity_mask, ams::svc::Handle thread_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetThreadCoreMask64From32 was called."); + } + + Result SetThreadCoreMask64From32(ams::svc::Handle thread_handle, int32_t core_id, uint64_t affinity_mask) { + MESOSPHERE_PANIC("Stubbed SvcSetThreadCoreMask64From32 was called."); + } + + Result GetThreadId64From32(uint64_t *out_thread_id, ams::svc::Handle thread_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetThreadId64From32 was called."); + } + + Result GetDebugFutureThreadInfo64From32(ams::svc::ilp32::LastThreadContext *out_context, uint64_t *thread_id, ams::svc::Handle debug_handle, int64_t ns) { + MESOSPHERE_PANIC("Stubbed SvcGetDebugFutureThreadInfo64From32 was called."); + } + + Result GetLastThreadInfo64From32(ams::svc::ilp32::LastThreadContext *out_context, ams::svc::Address *out_tls_address, uint32_t *out_flags) { + MESOSPHERE_PANIC("Stubbed SvcGetLastThreadInfo64From32 was called."); + } + + Result GetThreadContext364From32(KUserPointer out_context, ams::svc::Handle thread_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetThreadContext364From32 was called."); + } + + Result GetThreadList64From32(int32_t *out_num_threads, KUserPointer out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) { + MESOSPHERE_PANIC("Stubbed SvcGetThreadList64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_tick.cpp b/libraries/libmesosphere/source/svc/kern_svc_tick.cpp new file mode 100644 index 000000000..991e9b5e4 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_tick.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + int64_t GetSystemTick64() { + MESOSPHERE_PANIC("Stubbed SvcGetSystemTick64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + int64_t GetSystemTick64From32() { + MESOSPHERE_PANIC("Stubbed SvcGetSystemTick64From32 was called."); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_transfer_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_transfer_memory.cpp new file mode 100644 index 000000000..922c06ba7 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_transfer_memory.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + + + } + + /* ============================= 64 ABI ============================= */ + + Result MapTransferMemory64From32(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission owner_perm) { + MESOSPHERE_PANIC("Stubbed SvcMapTransferMemory64From32 was called."); + } + + Result UnmapTransferMemory64From32(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapTransferMemory64From32 was called."); + } + + Result CreateTransferMemory64(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) { + MESOSPHERE_PANIC("Stubbed SvcCreateTransferMemory64 was called."); + } + + /* ============================= 64From32 ABI ============================= */ + + Result MapTransferMemory64(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission owner_perm) { + MESOSPHERE_PANIC("Stubbed SvcMapTransferMemory64 was called."); + } + + Result UnmapTransferMemory64(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size) { + MESOSPHERE_PANIC("Stubbed SvcUnmapTransferMemory64 was called."); + } + + Result CreateTransferMemory64From32(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) { + MESOSPHERE_PANIC("Stubbed SvcCreateTransferMemory64From32 was called."); + } + +} diff --git a/libraries/libstratosphere/include/stratosphere/ncm/ncm_types.hpp b/libraries/libstratosphere/include/stratosphere/ncm/ncm_types.hpp index 5c1b9dd06..10992f9e6 100644 --- a/libraries/libstratosphere/include/stratosphere/ncm/ncm_types.hpp +++ b/libraries/libstratosphere/include/stratosphere/ncm/ncm_types.hpp @@ -34,9 +34,9 @@ namespace ams::ncm { /* Program IDs (Formerly: Title IDs). */ struct ProgramId { - u64 value; + svc::ProgramId value; - inline explicit operator u64() const { + inline explicit operator svc::ProgramId() const { return this->value; } diff --git a/libraries/libstratosphere/include/stratosphere/util/util_tinymt.hpp b/libraries/libstratosphere/include/stratosphere/util/util_tinymt.hpp index a5fea59d4..6c1a86233 100644 --- a/libraries/libstratosphere/include/stratosphere/util/util_tinymt.hpp +++ b/libraries/libstratosphere/include/stratosphere/util/util_tinymt.hpp @@ -41,6 +41,14 @@ namespace ams::util { static constexpr int MinimumInitIterations = 8; static constexpr int NumDiscardedInitOutputs = 8; + + constexpr inline u32 XorByShifted27(u32 value) { + return value ^ (value >> 27); + } + + constexpr inline u32 XorByShifted30(u32 value) { + return value ^ (value >> 30); + } private: State state; private: @@ -49,22 +57,147 @@ namespace ams::util { u32 GenerateRandomU24() { return (this->GenerateRandomU32() >> 8); } - static void GenerateInitialValuePlus(TinyMT::State *state, int index, u32 value); - static void GenerateInitialValueXor(TinyMT::State *state, int index); + static void GenerateInitialValuePlus(TinyMT::State *state, int index, u32 value) { + u32 &state0 = state->data[(index + 0) % NumStateWords]; + u32 &state1 = state->data[(index + 1) % NumStateWords]; + u32 &state2 = state->data[(index + 2) % NumStateWords]; + u32 &state3 = state->data[(index + 3) % NumStateWords]; + + const u32 x = XorByShifted27(state0 ^ state1 ^ state3) * ParamPlus; + const u32 y = x + index + value; + + state0 = y; + state1 += x; + state2 += y; + } + + static void GenerateInitialValueXor(TinyMT::State *state, int index) { + u32 &state0 = state->data[(index + 0) % NumStateWords]; + u32 &state1 = state->data[(index + 1) % NumStateWords]; + u32 &state2 = state->data[(index + 2) % NumStateWords]; + u32 &state3 = state->data[(index + 3) % NumStateWords]; + + const u32 x = XorByShifted27(state0 + state1 + state3) * ParamXor; + const u32 y = x - index; + + state0 = y; + state1 ^= x; + state2 ^= y; + } public: + constexpr TinyMT() : state() { /* ... */ } + /* Public API. */ /* Initialization. */ - void Initialize(u32 seed); - void Initialize(const u32 *seed, int seed_count); + void Initialize(u32 seed) { + this->state.data[0] = seed; + this->state.data[1] = ParamMat1; + this->state.data[2] = ParamMat2; + this->state.data[3] = ParamTmat; + + for (int i = 1; i < MinimumInitIterations; i++) { + const u32 mixed = XorByShifted30(this->state.data[(i - 1) % NumStateWords]); + this->state.data[i % NumStateWords] ^= mixed * ParamMult + i; + } + + this->FinalizeInitialization(); + } + + void Initialize(const u32 *seed, int seed_count) { + this->state.data[0] = 0; + this->state.data[1] = ParamMat1; + this->state.data[2] = ParamMat2; + this->state.data[3] = ParamTmat; + + { + const int num_init_iterations = std::max(seed_count + 1, MinimumInitIterations) - 1; + + GenerateInitialValuePlus(&this->state, 0, seed_count); + + for (int i = 0; i < num_init_iterations; i++) { + GenerateInitialValuePlus(&this->state, (i + 1) % NumStateWords, (i < seed_count) ? seed[i] : 0); + } + + for (int i = 0; i < static_cast(NumStateWords); i++) { + GenerateInitialValueXor(&this->state, (i + 1 + num_init_iterations) % NumStateWords); + } + } + + this->FinalizeInitialization(); + } /* State management. */ - void GetState(TinyMT::State *out) const; - void SetState(const TinyMT::State *state); + void GetState(TinyMT::State *out) const { + std::memcpy(out->data, this->state.data, sizeof(this->state)); + } + + void SetState(const TinyMT::State *state) { + std::memcpy(this->state.data, state->data, sizeof(this->state)); + } /* Random generation. */ - void GenerateRandomBytes(void *dst, size_t size); - u32 GenerateRandomU32(); + NOINLINE void GenerateRandomBytes(void *dst, size_t size) { + const uintptr_t start = reinterpret_cast(dst); + const uintptr_t end = start + size; + const uintptr_t aligned_start = util::AlignUp(start, 4); + const uintptr_t aligned_end = util::AlignDown(end, 4); + + /* Make sure we're aligned. */ + if (start < aligned_start) { + const u32 rnd = this->GenerateRandomU32(); + std::memcpy(dst, &rnd, aligned_start - start); + } + + /* Write as many aligned u32s as we can. */ + { + u32 * cur_dst = reinterpret_cast(aligned_start); + u32 * const end_dst = reinterpret_cast(aligned_end); + + while (cur_dst < end_dst) { + *(cur_dst++) = this->GenerateRandomU32(); + } + } + + /* Handle any leftover unaligned data. */ + if (aligned_end < end) { + const u32 rnd = this->GenerateRandomU32(); + std::memcpy(reinterpret_cast(aligned_end), &rnd, end - aligned_end); + } + } + + NOINLINE u32 GenerateRandomU32() { + /* Advance state. */ + const u32 x0 = (this->state.data[0] & TopBitmask) ^ this->state.data[1] ^ this->state.data[2]; + const u32 y0 = this->state.data[3]; + const u32 x1 = x0 ^ (x0 << 1); + const u32 y1 = y0 ^ (y0 >> 1) ^ x1; + + const u32 state0 = this->state.data[1]; + u32 state1 = this->state.data[2]; + u32 state2 = x1 ^ (y1 << 10); + const u32 state3 = y1; + + if ((y1 & 1) != 0) { + state1 ^= ParamMat1; + state2 ^= ParamMat2; + } + + this->state.data[0] = state0; + this->state.data[1] = state1; + this->state.data[2] = state2; + this->state.data[3] = state3; + + /* Temper. */ + const u32 t1 = state0 + (state2 >> 8); + u32 t0 = state3 ^ t1; + + if ((t1 & 1) != 0) { + t0 ^= ParamTmat; + } + + return t0; + } inline u64 GenerateRandomU64() { const u32 lo = this->GenerateRandomU32(); diff --git a/libraries/libstratosphere/source/map/map_api.cpp b/libraries/libstratosphere/source/map/map_api.cpp index 2bbffff44..5c482c903 100644 --- a/libraries/libstratosphere/source/map/map_api.cpp +++ b/libraries/libstratosphere/source/map/map_api.cpp @@ -100,7 +100,7 @@ namespace ams::map { MappedCodeMemory tmp_mcm(process_handle, try_address, base_address, size); R_TRY_CATCH(tmp_mcm.GetResult()) { - R_CATCH(svc::ResultInvalidCurrentMemoryState) { continue; } + R_CATCH(svc::ResultInvalidCurrentMemory) { continue; } } R_END_TRY_CATCH; if (!CanAddGuardRegionsInProcess(process_handle, try_address, size)) { @@ -136,7 +136,7 @@ namespace ams::map { MappedCodeMemory tmp_mcm(process_handle, try_address, base_address, size); R_TRY_CATCH(tmp_mcm.GetResult()) { - R_CATCH(svc::ResultInvalidCurrentMemoryState) { continue; } + R_CATCH(svc::ResultInvalidCurrentMemory) { continue; } } R_END_TRY_CATCH; if (!CanAddGuardRegionsInProcess(process_handle, try_address, size)) { diff --git a/libraries/libstratosphere/source/util/util_tinymt.cpp b/libraries/libstratosphere/source/util/util_tinymt.cpp deleted file mode 100644 index 9506dc69e..000000000 --- a/libraries/libstratosphere/source/util/util_tinymt.cpp +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright (c) 2018-2020 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include - -namespace ams::util { - - namespace { - - constexpr inline u32 XorByShifted27(u32 value) { - return value ^ (value >> 27); - } - - constexpr inline u32 XorByShifted30(u32 value) { - return value ^ (value >> 30); - } - - } - - void TinyMT::GenerateInitialValuePlus(TinyMT::State *state, int index, u32 value) { - u32 &state0 = state->data[(index + 0) % NumStateWords]; - u32 &state1 = state->data[(index + 1) % NumStateWords]; - u32 &state2 = state->data[(index + 2) % NumStateWords]; - u32 &state3 = state->data[(index + 3) % NumStateWords]; - - const u32 x = XorByShifted27(state0 ^ state1 ^ state3) * ParamPlus; - const u32 y = x + index + value; - - state0 = y; - state1 += x; - state2 += y; - } - - void TinyMT::GenerateInitialValueXor(TinyMT::State *state, int index) { - u32 &state0 = state->data[(index + 0) % NumStateWords]; - u32 &state1 = state->data[(index + 1) % NumStateWords]; - u32 &state2 = state->data[(index + 2) % NumStateWords]; - u32 &state3 = state->data[(index + 3) % NumStateWords]; - - const u32 x = XorByShifted27(state0 + state1 + state3) * ParamXor; - const u32 y = x - index; - - state0 = y; - state1 ^= x; - state2 ^= y; - } - - void TinyMT::Initialize(u32 seed) { - this->state.data[0] = seed; - this->state.data[1] = ParamMat1; - this->state.data[2] = ParamMat2; - this->state.data[3] = ParamTmat; - - for (int i = 1; i < MinimumInitIterations; i++) { - const u32 mixed = XorByShifted30(this->state.data[(i - 1) % NumStateWords]); - this->state.data[i % NumStateWords] ^= mixed * ParamMult + i; - } - - this->FinalizeInitialization(); - } - - void TinyMT::Initialize(const u32 *seed, int seed_count) { - this->state.data[0] = 0; - this->state.data[1] = ParamMat1; - this->state.data[2] = ParamMat2; - this->state.data[3] = ParamTmat; - - { - const int num_init_iterations = std::max(seed_count + 1, MinimumInitIterations) - 1; - - GenerateInitialValuePlus(&this->state, 0, seed_count); - - for (int i = 0; i < num_init_iterations; i++) { - GenerateInitialValuePlus(&this->state, (i + 1) % NumStateWords, (i < seed_count) ? seed[i] : 0); - } - - for (int i = 0; i < static_cast(NumStateWords); i++) { - GenerateInitialValueXor(&this->state, (i + 1 + num_init_iterations) % NumStateWords); - } - } - - this->FinalizeInitialization(); - } - - void TinyMT::FinalizeInitialization() { - const u32 state0 = this->state.data[0] & TopBitmask; - const u32 state1 = this->state.data[1]; - const u32 state2 = this->state.data[2]; - const u32 state3 = this->state.data[3]; - - if (state0 == 0 && state1 == 0 && state2 == 0 && state3 == 0) { - this->state.data[0] = 'T'; - this->state.data[1] = 'I'; - this->state.data[2] = 'N'; - this->state.data[3] = 'Y'; - } - - for (int i = 0; i < NumDiscardedInitOutputs; i++) { - this->GenerateRandomU32(); - } - } - - - void TinyMT::GetState(TinyMT::State *out) const { - std::memcpy(out->data, this->state.data, sizeof(this->state)); - } - - void TinyMT::SetState(const TinyMT::State *state) { - std::memcpy(this->state.data, state->data, sizeof(this->state)); - } - - void TinyMT::GenerateRandomBytes(void *dst, size_t size) { - const uintptr_t start = reinterpret_cast(dst); - const uintptr_t end = start + size; - const uintptr_t aligned_start = util::AlignUp(start, 4); - const uintptr_t aligned_end = util::AlignDown(end, 4); - - /* Make sure we're aligned. */ - if (start < aligned_start) { - const u32 rnd = this->GenerateRandomU32(); - std::memcpy(dst, &rnd, aligned_start - start); - } - - /* Write as many aligned u32s as we can. */ - { - u32 * cur_dst = reinterpret_cast(aligned_start); - u32 * const end_dst = reinterpret_cast(aligned_end); - - while (cur_dst < end_dst) { - *(cur_dst++) = this->GenerateRandomU32(); - } - } - - /* Handle any leftover unaligned data. */ - if (aligned_end < end) { - const u32 rnd = this->GenerateRandomU32(); - std::memcpy(reinterpret_cast(aligned_end), &rnd, end - aligned_end); - } - } - - u32 TinyMT::GenerateRandomU32() { - /* Advance state. */ - const u32 x0 = (this->state.data[0] & TopBitmask) ^ this->state.data[1] ^ this->state.data[2]; - const u32 y0 = this->state.data[3]; - const u32 x1 = x0 ^ (x0 << 1); - const u32 y1 = y0 ^ (y0 >> 1) ^ x1; - - const u32 state0 = this->state.data[1]; - u32 state1 = this->state.data[2]; - u32 state2 = x1 ^ (y1 << 10); - const u32 state3 = y1; - - if ((y1 & 1) != 0) { - state1 ^= ParamMat1; - state2 ^= ParamMat2; - } - - this->state.data[0] = state0; - this->state.data[1] = state1; - this->state.data[2] = state2; - this->state.data[3] = state3; - - /* Temper. */ - const u32 t1 = state0 + (state2 >> 8); - u32 t0 = state3 ^ t1; - - if ((t1 & 1) != 0) { - t0 ^= ParamTmat; - } - - return t0; - } - -} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours.hpp b/libraries/libvapours/include/vapours.hpp index 431313b80..68cadda3e 100644 --- a/libraries/libvapours/include/vapours.hpp +++ b/libraries/libvapours/include/vapours.hpp @@ -19,6 +19,8 @@ #include "vapours/defines.hpp" #include "vapours/literals.hpp" +#include "vapours/timespan.hpp" + #include "vapours/util.hpp" #include "vapours/results.hpp" #include "vapours/svc.hpp" diff --git a/libraries/libvapours/include/vapours/results/results_common.hpp b/libraries/libvapours/include/vapours/results/results_common.hpp index ed34f13d1..cb977ab99 100644 --- a/libraries/libvapours/include/vapours/results/results_common.hpp +++ b/libraries/libvapours/include/vapours/results/results_common.hpp @@ -57,8 +57,8 @@ namespace ams { using BaseType = typename ResultTraits::BaseType; static constexpr BaseType SuccessValue = ResultTraits::SuccessValue; public: - constexpr inline BaseType GetModule() const { return ResultTraits::GetModuleFromValue(static_cast(this)->GetValue()); } - constexpr inline BaseType GetDescription() const { return ResultTraits::GetDescriptionFromValue(static_cast(this)->GetValue()); } + constexpr ALWAYS_INLINE BaseType GetModule() const { return ResultTraits::GetModuleFromValue(static_cast(this)->GetValue()); } + constexpr ALWAYS_INLINE BaseType GetDescription() const { return ResultTraits::GetDescriptionFromValue(static_cast(this)->GetValue()); } }; class ResultConstructor; @@ -81,15 +81,15 @@ namespace ams { /* TODO: It sure would be nice to make this private. */ constexpr Result(typename Base::BaseType v) : value(v) { static_assert(std::is_same::value); } - constexpr inline operator ResultSuccess() const; + constexpr ALWAYS_INLINE operator ResultSuccess() const; NX_CONSTEXPR bool CanAccept(Result result) { return true; } - constexpr inline bool IsSuccess() const { return this->GetValue() == Base::SuccessValue; } - constexpr inline bool IsFailure() const { return !this->IsSuccess(); } - constexpr inline typename Base::BaseType GetModule() const { return Base::GetModule(); } - constexpr inline typename Base::BaseType GetDescription() const { return Base::GetDescription(); } + constexpr ALWAYS_INLINE bool IsSuccess() const { return this->GetValue() == Base::SuccessValue; } + constexpr ALWAYS_INLINE bool IsFailure() const { return !this->IsSuccess(); } + constexpr ALWAYS_INLINE typename Base::BaseType GetModule() const { return Base::GetModule(); } + constexpr ALWAYS_INLINE typename Base::BaseType GetDescription() const { return Base::GetDescription(); } - constexpr inline typename Base::BaseType GetValue() const { return this->value; } + constexpr ALWAYS_INLINE typename Base::BaseType GetValue() const { return this->value; } }; static_assert(sizeof(Result) == sizeof(Result::Base::BaseType), "sizeof(Result) == sizeof(Result::Base::BaseType)"); static_assert(std::is_trivially_destructible::value, "std::is_trivially_destructible::value"); @@ -98,12 +98,12 @@ namespace ams { class ResultConstructor { public: - static constexpr inline Result MakeResult(ResultTraits::BaseType value) { + static constexpr ALWAYS_INLINE Result MakeResult(ResultTraits::BaseType value) { return Result(value); } }; - constexpr inline Result MakeResult(ResultTraits::BaseType value) { + constexpr ALWAYS_INLINE Result MakeResult(ResultTraits::BaseType value) { return ResultConstructor::MakeResult(value); } @@ -116,12 +116,12 @@ namespace ams { constexpr operator Result() const { return result::impl::MakeResult(Base::SuccessValue); } NX_CONSTEXPR bool CanAccept(Result result) { return result.IsSuccess(); } - constexpr inline bool IsSuccess() const { return true; } - constexpr inline bool IsFailure() const { return !this->IsSuccess(); } - constexpr inline typename Base::BaseType GetModule() const { return Base::GetModule(); } - constexpr inline typename Base::BaseType GetDescription() const { return Base::GetDescription(); } + constexpr ALWAYS_INLINE bool IsSuccess() const { return true; } + constexpr ALWAYS_INLINE bool IsFailure() const { return !this->IsSuccess(); } + constexpr ALWAYS_INLINE typename Base::BaseType GetModule() const { return Base::GetModule(); } + constexpr ALWAYS_INLINE typename Base::BaseType GetDescription() const { return Base::GetDescription(); } - constexpr inline typename Base::BaseType GetValue() const { return Base::SuccessValue; } + constexpr ALWAYS_INLINE typename Base::BaseType GetValue() const { return Base::SuccessValue; } }; namespace result::impl { @@ -130,7 +130,7 @@ namespace ams { } - constexpr inline Result::operator ResultSuccess() const { + constexpr ALWAYS_INLINE Result::operator ResultSuccess() const { if (!ResultSuccess::CanAccept(*this)) { result::impl::OnResultAssertion(*this); } @@ -151,10 +151,10 @@ namespace ams { constexpr operator Result() const { return MakeResult(Value); } constexpr operator ResultSuccess() const { OnResultAssertion(Value); } - constexpr inline bool IsSuccess() const { return false; } - constexpr inline bool IsFailure() const { return !this->IsSuccess(); } + constexpr ALWAYS_INLINE bool IsSuccess() const { return false; } + constexpr ALWAYS_INLINE bool IsFailure() const { return !this->IsSuccess(); } - constexpr inline typename Base::BaseType GetValue() const { return Value; } + constexpr ALWAYS_INLINE typename Base::BaseType GetValue() const { return Value; } }; template diff --git a/libraries/libvapours/include/vapours/results/svc_results.hpp b/libraries/libvapours/include/vapours/results/svc_results.hpp index 6dfbe9b6c..715232ed2 100644 --- a/libraries/libvapours/include/vapours/results/svc_results.hpp +++ b/libraries/libvapours/include/vapours/results/svc_results.hpp @@ -27,7 +27,9 @@ namespace ams::svc { R_DEFINE_ERROR_RESULT(NotImplemented, 33); - R_DEFINE_ERROR_RESULT(ThreadTerminating, 59); + R_DEFINE_ERROR_RESULT(NoSynchronizationObject, 57); + + R_DEFINE_ERROR_RESULT(TerminationRequested, 59); R_DEFINE_ERROR_RESULT(NoEvent, 70); @@ -36,7 +38,7 @@ namespace ams::svc { R_DEFINE_ERROR_RESULT(OutOfResource, 103); R_DEFINE_ERROR_RESULT(OutOfMemory, 104); R_DEFINE_ERROR_RESULT(OutOfHandles, 105); - R_DEFINE_ERROR_RESULT(InvalidCurrentMemoryState, 106); + R_DEFINE_ERROR_RESULT(InvalidCurrentMemory, 106); R_DEFINE_ERROR_RESULT(InvalidNewMemoryPermissions, 108); @@ -56,7 +58,7 @@ namespace ams::svc { R_DEFINE_ERROR_RESULT(SessionClosed, 123); R_DEFINE_ERROR_RESULT(NotHandled, 124); R_DEFINE_ERROR_RESULT(InvalidState, 125); - R_DEFINE_ERROR_RESULT(ReservedValue, 126); + R_DEFINE_ERROR_RESULT(ReservedUsed, 126); R_DEFINE_ERROR_RESULT(NotSupported, 127); R_DEFINE_ERROR_RESULT(Debug, 128); R_DEFINE_ERROR_RESULT(ThreadNotOwned, 129); diff --git a/libraries/libvapours/include/vapours/svc/board/nintendo/switch/svc_device_name.hpp b/libraries/libvapours/include/vapours/svc/board/nintendo/nx/svc_device_name.hpp similarity index 97% rename from libraries/libvapours/include/vapours/svc/board/nintendo/switch/svc_device_name.hpp rename to libraries/libvapours/include/vapours/svc/board/nintendo/nx/svc_device_name.hpp index 9f4ffafb8..fb289dd83 100644 --- a/libraries/libvapours/include/vapours/svc/board/nintendo/switch/svc_device_name.hpp +++ b/libraries/libvapours/include/vapours/svc/board/nintendo/nx/svc_device_name.hpp @@ -16,7 +16,7 @@ #pragma once #include -namespace ams::svc { +namespace ams::svc::board::nintendo::nx { enum DeviceName { DeviceName_Afi = 0, @@ -62,4 +62,4 @@ namespace ams::svc { DeviceName_Count, }; -} \ No newline at end of file +} diff --git a/libraries/libvapours/include/vapours/svc/board/nintendo/nx/svc_hardware_constants.hpp b/libraries/libvapours/include/vapours/svc/board/nintendo/nx/svc_hardware_constants.hpp new file mode 100644 index 000000000..8b270b125 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/board/nintendo/nx/svc_hardware_constants.hpp @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::svc::board::nintendo::nx { + + constexpr inline const s64 TicksPerSecond = 19'200'000; + +} diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp new file mode 100644 index 000000000..e435c7569 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" + +namespace ams::svc::codegen::impl { + + #define SVC_CODEGEN_FOR_I_FROM_0_TO_64(HANDLER, ...) \ + HANDLER( 0, ## __VA_ARGS__); HANDLER( 1, ## __VA_ARGS__); HANDLER( 2, ## __VA_ARGS__); HANDLER( 3, ## __VA_ARGS__); \ + HANDLER( 4, ## __VA_ARGS__); HANDLER( 5, ## __VA_ARGS__); HANDLER( 6, ## __VA_ARGS__); HANDLER( 7, ## __VA_ARGS__); \ + HANDLER( 8, ## __VA_ARGS__); HANDLER( 9, ## __VA_ARGS__); HANDLER(10, ## __VA_ARGS__); HANDLER(11, ## __VA_ARGS__); \ + HANDLER(12, ## __VA_ARGS__); HANDLER(13, ## __VA_ARGS__); HANDLER(14, ## __VA_ARGS__); HANDLER(15, ## __VA_ARGS__); \ + HANDLER(16, ## __VA_ARGS__); HANDLER(17, ## __VA_ARGS__); HANDLER(18, ## __VA_ARGS__); HANDLER(19, ## __VA_ARGS__); \ + HANDLER(20, ## __VA_ARGS__); HANDLER(21, ## __VA_ARGS__); HANDLER(22, ## __VA_ARGS__); HANDLER(23, ## __VA_ARGS__); \ + HANDLER(24, ## __VA_ARGS__); HANDLER(25, ## __VA_ARGS__); HANDLER(26, ## __VA_ARGS__); HANDLER(27, ## __VA_ARGS__); \ + HANDLER(28, ## __VA_ARGS__); HANDLER(29, ## __VA_ARGS__); HANDLER(30, ## __VA_ARGS__); HANDLER(31, ## __VA_ARGS__); \ + HANDLER(32, ## __VA_ARGS__); HANDLER(33, ## __VA_ARGS__); HANDLER(34, ## __VA_ARGS__); HANDLER(35, ## __VA_ARGS__); \ + HANDLER(36, ## __VA_ARGS__); HANDLER(37, ## __VA_ARGS__); HANDLER(38, ## __VA_ARGS__); HANDLER(39, ## __VA_ARGS__); \ + HANDLER(40, ## __VA_ARGS__); HANDLER(41, ## __VA_ARGS__); HANDLER(42, ## __VA_ARGS__); HANDLER(43, ## __VA_ARGS__); \ + HANDLER(44, ## __VA_ARGS__); HANDLER(45, ## __VA_ARGS__); HANDLER(46, ## __VA_ARGS__); HANDLER(47, ## __VA_ARGS__); \ + HANDLER(48, ## __VA_ARGS__); HANDLER(49, ## __VA_ARGS__); HANDLER(50, ## __VA_ARGS__); HANDLER(51, ## __VA_ARGS__); \ + HANDLER(52, ## __VA_ARGS__); HANDLER(53, ## __VA_ARGS__); HANDLER(54, ## __VA_ARGS__); HANDLER(55, ## __VA_ARGS__); \ + HANDLER(56, ## __VA_ARGS__); HANDLER(57, ## __VA_ARGS__); HANDLER(58, ## __VA_ARGS__); HANDLER(59, ## __VA_ARGS__); \ + HANDLER(60, ## __VA_ARGS__); HANDLER(61, ## __VA_ARGS__); HANDLER(62, ## __VA_ARGS__); HANDLER(63, ## __VA_ARGS__); + + + class Aarch64CodeGenerator { + private: + struct RegisterPair { + size_t First; + size_t Second; + }; + + template + struct RegisterPairHelper; + + template + struct RegisterPairHelper { + static constexpr size_t PairCount = 1 + RegisterPairHelper::PairCount; + static constexpr std::array Pairs = [] { + std::array pairs = {}; + pairs[0] = RegisterPair{First, Second}; + if constexpr (RegisterPairHelper::PairCount) { + for (size_t i = 0; i < RegisterPairHelper::PairCount; i++) { + pairs[1+i] = RegisterPairHelper::Pairs[i]; + } + } + return pairs; + }(); + }; + + template + struct RegisterPairHelper { + static constexpr size_t PairCount = 1; + static constexpr std::array Pairs = { RegisterPair{First, Second} }; + }; + + template + struct RegisterPairHelper { + static constexpr size_t PairCount = 0; + static constexpr std::array Pairs = {}; + }; + + template + static ALWAYS_INLINE void ClearRegister() { + __asm__ __volatile__("mov x%c[r], xzr" :: [r]"i"(Reg) : "memory"); + } + + template + static ALWAYS_INLINE void SaveRegister() { + __asm__ __volatile__("str x%c[r], [sp, -16]!" :: [r]"i"(Reg) : "memory"); + } + + template + static ALWAYS_INLINE void RestoreRegister() { + __asm__ __volatile__("ldr x%c[r], [sp], 16" :: [r]"i"(Reg) : "memory"); + } + + template + static ALWAYS_INLINE void SaveRegisterPair() { + __asm__ __volatile__("stp x%c[r0], x%c[r1], [sp, -16]!" :: [r0]"i"(Reg0), [r1]"i"(Reg1) : "memory"); + } + + template + static ALWAYS_INLINE void RestoreRegisterPair() { + __asm__ __volatile__("ldp x%c[r0], x%c[r1], [sp], 16" :: [r0]"i"(Reg0), [r1]"i"(Reg1) : "memory"); + } + + template + static ALWAYS_INLINE void SaveRegistersImpl() { + #define SVC_CODEGEN_HANDLER(n) \ + do { if constexpr ((63 - n) < Pairs.size()) { SaveRegisterPair(); } } while (0) + + if constexpr (sizeof...(Rest) % 2 == 1) { + /* Even number of registers. */ + constexpr auto Pairs = RegisterPairHelper::Pairs; + static_assert(Pairs.size() <= 8); + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + } else if constexpr (sizeof...(Rest) > 0) { + /* Odd number of registers. */ + constexpr auto Pairs = RegisterPairHelper::Pairs; + static_assert(Pairs.size() <= 8); + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + + SaveRegister(); + } else { + /* Only one register. */ + SaveRegister(); + } + + #undef SVC_CODEGEN_HANDLER + } + + template + static ALWAYS_INLINE void RestoreRegistersImpl() { + #define SVC_CODEGEN_HANDLER(n) \ + do { if constexpr (n < Pairs.size()) { RestoreRegisterPair(); } } while (0) + + if constexpr (sizeof...(Rest) % 2 == 1) { + /* Even number of registers. */ + constexpr auto Pairs = RegisterPairHelper::Pairs; + static_assert(Pairs.size() <= 8); + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + } else if constexpr (sizeof...(Rest) > 0) { + /* Odd number of registers. */ + RestoreRegister(); + + constexpr auto Pairs = RegisterPairHelper::Pairs; + static_assert(Pairs.size() <= 8); + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + } else { + /* Only one register. */ + RestoreRegister(); + } + + #undef SVC_CODEGEN_HANDLER + } + + public: + template + static ALWAYS_INLINE void SaveRegisters() { + if constexpr (sizeof...(Registers) > 0) { + SaveRegistersImpl(); + } + } + + template + static ALWAYS_INLINE void RestoreRegisters() { + if constexpr (sizeof...(Registers) > 0) { + RestoreRegistersImpl(); + } + } + + template + static ALWAYS_INLINE void ClearRegisters() { + static_assert(sizeof...(Registers) <= 8); + (ClearRegister(), ...); + } + + template + static ALWAYS_INLINE void AllocateStackSpace() { + if constexpr (Size > 0) { + __asm__ __volatile__("sub sp, sp, %c[size]" :: [size]"i"(util::AlignUp(Size, 16)) : "memory"); + } + } + + template + static ALWAYS_INLINE void FreeStackSpace() { + if constexpr (Size > 0) { + __asm__ __volatile__("add sp, sp, %c[size]" :: [size]"i"(util::AlignUp(Size, 16)) : "memory"); + } + } + + template + static ALWAYS_INLINE void MoveRegister() { + __asm__ __volatile__("mov x%c[dst], x%c[src]" :: [dst]"i"(Dst), [src]"i"(Src) : "memory"); + } + + template + static ALWAYS_INLINE void LoadFromStack() { + if constexpr (Size == 4) { + __asm__ __volatile__("ldr w%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory"); + } else if constexpr (Size == 8) { + __asm__ __volatile__("ldr x%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory"); + } else { + static_assert(Size != Size); + } + } + + template + static ALWAYS_INLINE void LoadPairFromStack() { + if constexpr (Size == 4) { + __asm__ __volatile__("ldp w%c[r0], w%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory"); + } else if constexpr (Size == 8) { + __asm__ __volatile__("ldp x%c[r0], x%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory"); + } else { + static_assert(Size != Size); + } + } + + template + static ALWAYS_INLINE void StoreToStack() { + if constexpr (Size == 4) { + __asm__ __volatile__("str w%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory"); + } else if constexpr (Size == 8) { + __asm__ __volatile__("str x%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory"); + } else { + static_assert(Size != Size); + } + } + + template + static ALWAYS_INLINE void StorePairToStack() { + if constexpr (Size == 4) { + __asm__ __volatile__("stp w%c[r0], w%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory"); + } else if constexpr (Size == 8) { + __asm__ __volatile__("stp x%c[r0], x%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory"); + } else { + static_assert(Size != Size); + } + } + + template + static ALWAYS_INLINE void Pack() { + __asm__ __volatile__("orr x%c[dst], x%c[low], x%c[high], lsl #32" :: [dst]"i"(Dst), [low]"i"(Low), [high]"i"(High) : "memory"); + } + + template + static ALWAYS_INLINE void Unpack() { + if constexpr (Src != Low) { + MoveRegister(); + } + + __asm__ __volatile__("lsr x%c[high], x%c[src], #32" :: [high]"i"(High), [src]"i"(Src) : "memory"); + } + + template + static ALWAYS_INLINE void LoadStackAddress() { + if constexpr (Offset > 0) { + __asm__ __volatile__("add x%c[dst], sp, %c[offset]" :: [dst]"i"(Dst), [offset]"i"(Offset) : "memory"); + } else if constexpr (Offset == 0) { + __asm__ __volatile__("mov x%c[dst], sp" :: [dst]"i"(Dst) : "memory"); + } + } + }; + + class Aarch32CodeGenerator { + /* TODO */ + }; + + template + static ALWAYS_INLINE void GenerateCodeForMetaCode(MetaCodeHolder) { + constexpr auto MetaCode = UNWRAP_TEMPLATE_CONSTANT(MetaCodeHolder); + constexpr size_t NumOperations = MetaCode.GetNumOperations(); + static_assert(NumOperations <= 64); + #define SVC_CODEGEN_HANDLER(n) do { if constexpr (n < NumOperations) { constexpr auto Operation = MetaCode.GetOperation(n); GenerateCodeForOperation(WRAP_TEMPLATE_CONSTANT(Operation)); } } while (0) + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + #undef SVC_CODEGEN_HANDLER + } + + #undef SVC_CODEGEN_FOR_I_FROM_0_TO_64 + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp new file mode 100644 index 000000000..6e2bf8d7f --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +namespace ams::svc::codegen::impl { + + template + constexpr inline bool IsIntegral = std::is_integral::value; + + template<> + constexpr inline bool IsIntegral<::ams::svc::Address> = true; + + template<> + constexpr inline bool IsIntegral<::ams::svc::Size> = true; + + template + constexpr inline bool IsKUserPointer = std::is_base_of::value; + + template + constexpr inline bool IsIntegralOrUserPointer = IsIntegral || IsUserPointer || IsKUserPointer; + + template + constexpr std::index_sequence IndexSequenceCat(std::index_sequence, std::index_sequence) { + return std::index_sequence{}; + } + + template + constexpr inline std::array ConvertToArray(std::index_sequence) { + return std::array{ Is... }; + } + + template + class FunctionTraits { + private: + template + static R GetReturnTypeImpl(R(*)(A...)); + + template + static std::tuple GetArgsImpl(R(*)(A...)); + public: + using ReturnType = decltype(GetReturnTypeImpl(Function)); + using ArgsType = decltype(GetArgsImpl(Function)); + }; + + enum class CodeGenerationKind { + SvcInvocationToKernelProcedure, + PrepareForKernelProcedureToSvcInvocation, + KernelProcedureToSvcInvocation, + Invalid, + }; + + enum class ArgumentType { + In, + Out, + InUserPointer, + OutUserPointer, + Invalid, + }; + + template + constexpr inline ArgumentType GetArgumentType = [] { + static_assert(!std::is_reference::value, "SVC ABI: Reference types not allowed."); + static_assert(sizeof(T) <= sizeof(uint64_t), "SVC ABI: Type too large"); + if constexpr (std::is_pointer::value) { + static_assert(!std::is_const::type>::value, "SVC ABI: Output (T*) must not be const"); + return ArgumentType::Out; + } else if constexpr (IsUserPointer || IsKUserPointer) { + if constexpr (T::IsInput) { + return ArgumentType::InUserPointer; + } else { + return ArgumentType::OutUserPointer; + } + } else { + return ArgumentType::In; + } + }(); + + template + struct AbiType { + static constexpr size_t RegisterSize = RS; + static constexpr size_t RegisterCount = RC; + static constexpr size_t ArgumentRegisterCount = ARC; + static constexpr size_t PointerSize = PC; + + template + static constexpr size_t GetSize() { + if constexpr (std::is_same::value || std::is_same::value || IsUserPointer || IsKUserPointer) { + return PointerSize; + } else if constexpr(std::is_pointer::value) { + /* Out parameter. */ + return GetSize::type>(); + } else if constexpr (std::is_same::value) { + return 0; + } else { + return sizeof(T); + } + } + + template + static constexpr inline size_t Size = GetSize(); + }; + + using Aarch64Lp64Abi = AbiType<8, 8, 8, 8>; + using Aarch64Ilp32Abi = AbiType<8, 8, 8, 4>; + using Aarch32Ilp32Abi = AbiType<4, 4, 4, 4>; + + using Aarch64SvcInvokeAbi = AbiType<8, 8, 8, 8>; + using Aarch32SvcInvokeAbi = AbiType<4, 8, 4, 4>; + + struct Abi { + size_t register_size; + size_t register_count; + size_t pointer_size; + + template + static constexpr Abi Convert() { return { AbiType::RegisterSize, AbiType::RegisterCount, AbiType::PointerSize }; } + }; + + template + constexpr inline bool IsPassedByPointer = [] { + if (GetArgumentType != ArgumentType::In) { + return true; + } + + return (!IsIntegral && AbiType::template Size > AbiType::RegisterSize); + }(); + + template + class RegisterAllocator { + private: + std::array map; + public: + constexpr explicit RegisterAllocator() : map() { /* ... */ } + + constexpr bool IsAllocated(size_t i) const { return this->map[i]; } + constexpr bool IsFree(size_t i) const { return !this->IsAllocated(i); } + + constexpr void Allocate(size_t i) { + if (this->IsAllocated(i)) { + std::abort(); + } + + this->map[i] = true; + } + + constexpr bool TryAllocate(size_t i) { + if (this->IsAllocated(i)) { + return false; + } + + this->map[i] = true; + return true; + } + + constexpr size_t AllocateFirstFree() { + for (size_t i = 0; i < N; i++) { + if (!this->IsAllocated(i)) { + this->map[i] = true; + return i; + } + } + + std::abort(); + } + + constexpr void Free(size_t i) { + if (!this->IsAllocated(i)) { + std::abort(); + } + + this->map[i] = false; + } + + constexpr size_t GetRegisterCount() const { + return N; + } + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp new file mode 100644 index 000000000..58d45246b --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp @@ -0,0 +1,540 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" +#include "svc_codegen_impl_parameter.hpp" +#include "svc_codegen_impl_layout.hpp" +#include "svc_codegen_impl_meta_code.hpp" +#include "svc_codegen_impl_layout_conversion.hpp" +#include "svc_codegen_impl_code_generator.hpp" + +namespace ams::svc::codegen::impl { + + template + class KernelSvcWrapperHelperImpl; + + template + class KernelSvcWrapperHelperImpl<_SvcAbiType, _UserAbiType, _KernelAbiType, ReturnType, std::tuple> { + private: + static constexpr bool TryToPerformCoalescingOptimizations = true; + + template + static constexpr void CoalesceOperations(MetaCodeGenerator &out_mcg, const std::array stack_modified, size_t stack_top) { + enum class State { WaitingForRegister, ParsingRegister, ParsedRegister, EmittingCode }; + State cur_state = State::WaitingForRegister; + size_t num_regs = 0; + size_t registers[2] = { InvalidRegisterId, InvalidRegisterId }; + size_t widths[2] = {}; + size_t index = 0; + size_t store_base = 0; + while (index < stack_top) { + if (cur_state == State::WaitingForRegister) { + while (stack_modified[index] == InvalidRegisterId && index < stack_top) { + index++; + } + cur_state = State::ParsingRegister; + } else if (cur_state == State::ParsingRegister) { + const size_t start_index = index; + if (num_regs == 0) { + store_base = start_index; + } + const size_t reg = stack_modified[index]; + registers[num_regs] = reg; + while (index < stack_top && index < start_index + KernelAbiType::RegisterSize && stack_modified[index] == reg) { + widths[num_regs]++; + index++; + } + num_regs++; + cur_state = State::ParsedRegister; + } else if (cur_state == State::ParsedRegister) { + if (num_regs == 2 || stack_modified[index] == InvalidRegisterId) { + cur_state = State::EmittingCode; + } else { + cur_state = State::ParsingRegister; + } + } else if (cur_state == State::EmittingCode) { + /* Emit an operation! */ + MetaCode::Operation st_op = {}; + + if (num_regs == 2) { + if (registers[0] == registers[1]) { + std::abort(); + } + if (widths[0] == widths[1]) { + st_op.kind = PairKind; + st_op.num_parameters = 4; + st_op.parameters[0] = registers[0]; + st_op.parameters[1] = registers[1]; + st_op.parameters[2] = store_base; + st_op.parameters[3] = widths[0]; + } else { + std::abort(); + } + } else if (num_regs == 1) { + st_op.kind = SingleKind; + st_op.num_parameters = 3; + st_op.parameters[0] = registers[0]; + st_op.parameters[1] = store_base; + st_op.parameters[2] = widths[0]; + } else { + std::abort(); + } + + out_mcg.AddOperationDirectly(st_op); + + /* Go back to beginning of parse. */ + for (size_t i = 0; i < num_regs; i++) { + registers[i] = InvalidRegisterId; + widths[i] = 0; + } + num_regs = 0; + cur_state = State::WaitingForRegister; + } else { + std::abort(); + } + } + + if (cur_state == State::ParsedRegister) { + /* Emit an operation! */ + if (num_regs == 2 && widths[0] == widths[1]) { + MetaCode::Operation st_op = {}; + st_op.kind = PairKind; + st_op.num_parameters = 4; + st_op.parameters[0] = registers[0]; + st_op.parameters[1] = registers[1]; + st_op.parameters[2] = store_base; + st_op.parameters[3] = widths[0]; + out_mcg.AddOperationDirectly(st_op); + } else { + for (size_t i = 0; i < num_regs; i++) { + MetaCode::Operation st_op = {}; + st_op.kind = SingleKind; + st_op.num_parameters = 3; + st_op.parameters[0] = registers[i]; + st_op.parameters[1] = store_base; + st_op.parameters[2] = widths[i]; + + store_base += widths[i]; + out_mcg.AddOperationDirectly(st_op); + } + } + } + } + + /* Basic optimization of store coalescing. */ + template + static constexpr bool TryPrepareForKernelProcedureToSvcInvocationCoalescing(std::tuple, MetaCodeGenerator &out_mcg, RegisterAllocator &out_allocator) { + /* For debugging, allow ourselves to disable these optimizations. */ + if constexpr (!TryToPerformCoalescingOptimizations) { + return false; + } + + /* Generate expected code. */ + MetaCodeGenerator mcg; + RegisterAllocator allocator = out_allocator; + (Conversion::template GenerateCode(mcg, allocator), ...); + MetaCode mc = mcg.GetMetaCode(); + + /* This is a naive optimization pass. */ + /* We want to reorder code of the form: */ + /* - Store to Stack sequence 0... */ + /* - Load Stack Address 0 */ + /* - Store to Stack 1... */ + /* - Load Stack Address 1 */ + /* Into the form: */ + /* - Store to stack Sequence 0 + 1... */ + /* - Load Stack Address 0 + 1... */ + /* But only if they are semantically equivalent. */ + + /* We'll do a simple, naive pass to check if any registers are stored to stack that are modified. */ + /* This shouldn't happen in any cases we care about, so we can probably get away with it. */ + /* TODO: Eventually this should be e.g. operation.ModifiesRegister() / operation.CanReorderBefore() */ + /* However, this will be more work, and if it's not necessary it can be put off until it is. */ + constexpr size_t MaxStackIndex = 0x100; + constexpr size_t InvalidRegisterId = N; + bool register_modified[N] = {}; + std::array stack_address_loaded = {}; + for (size_t i = 0; i < N; i++) { stack_address_loaded[i] = MaxStackIndex; } + std::array stack_modified = {}; + for (size_t i = 0; i < MaxStackIndex; i++) { stack_modified[i] = InvalidRegisterId; } + size_t stack_top = 0; + for (size_t i = 0; i < mc.GetNumOperations(); i++) { + const auto mco = mc.GetOperation(i); + if (mco.kind == MetaCode::OperationKind::StoreToStack) { + if (register_modified[mco.parameters[0]]) { + return false; + } + const size_t offset = mco.parameters[1]; + const size_t width = mco.parameters[2] == 0 ? KernelAbiType::RegisterSize : mco.parameters[2]; + for (size_t j = 0; j < width; j++) { + const size_t index = offset + j; + if (index >= MaxStackIndex) { + std::abort(); + } + if (stack_modified[index] != InvalidRegisterId) { + return false; + } + stack_modified[index] = mco.parameters[0]; + stack_top = std::max(index + 1, stack_top); + } + } else if (mco.kind == MetaCode::OperationKind::LoadStackAddress) { + if (stack_address_loaded[mco.parameters[0]] != MaxStackIndex) { + return false; + } + if (register_modified[mco.parameters[0]]) { + return false; + } + if (mco.parameters[1] >= MaxStackIndex) { + std::abort(); + } + stack_address_loaded[mco.parameters[0]] = mco.parameters[1]; + register_modified[mco.parameters[0]] = true; + } else { + /* TODO: Better operation reasoning process. */ + return false; + } + } + + /* Looks like we can reorder! */ + /* Okay, let's do this the naive way, too. */ + constexpr auto PairKind = MetaCode::OperationKind::StorePairToStack; + constexpr auto SingleKind = MetaCode::OperationKind::StoreToStack; + CoalesceOperations(out_mcg, stack_modified, stack_top); + for (size_t i = 0; i < N; i++) { + if (stack_address_loaded[i] != MaxStackIndex) { + MetaCode::Operation load_op = {}; + load_op.kind = MetaCode::OperationKind::LoadStackAddress; + load_op.num_parameters = 2; + load_op.parameters[0] = i; + load_op.parameters[1] = stack_address_loaded[i]; + out_mcg.AddOperationDirectly(load_op); + } + } + + /* Ensure the out allocator state is correct. */ + out_allocator = allocator; + + return true; + } + + /* Basic optimization of load coalescing. */ + template + static constexpr bool TryKernelProcedureToSvcInvocationCoalescing(std::tuple, MetaCodeGenerator &out_mcg, RegisterAllocator &out_allocator) { + /* For debugging, allow ourselves to disable these optimizations. */ + if constexpr (!TryToPerformCoalescingOptimizations) { + return false; + } + + /* Generate expected code. */ + MetaCodeGenerator mcg; + RegisterAllocator allocator = out_allocator; + (Conversion::template GenerateCode(mcg, allocator), ...); + MetaCode mc = mcg.GetMetaCode(); + + /* This is a naive optimization pass. */ + /* We want to coalesce all sequential stack loads, if possible. */ + /* But only if they are semantically equivalent. */ + + /* We'll do a simple, naive pass to check if any registers are used after being loaded from stack that. */ + /* This shouldn't happen in any cases we care about, so we can probably get away with it. */ + /* TODO: Eventually this should be e.g. operation.ModifiesRegister() / operation.CanReorderBefore() */ + /* However, this will be more work, and if it's not necessary it can be put off until it is. */ + constexpr size_t MaxStackIndex = 0x100; + constexpr size_t InvalidRegisterId = N; + bool register_modified[N] = {}; + std::array stack_offset_loaded = {}; + for (size_t i = 0; i < N; i++) { stack_offset_loaded[i] = MaxStackIndex; } + std::array stack_modified = {}; + for (size_t i = 0; i < MaxStackIndex; i++) { stack_modified[i] = InvalidRegisterId; } + size_t stack_top = 0; + for (size_t i = 0; i < mc.GetNumOperations(); i++) { + const auto mco = mc.GetOperation(i); + if (mco.kind == MetaCode::OperationKind::Unpack) { + if (register_modified[mco.parameters[0]] || register_modified[mco.parameters[1]] || register_modified[mco.parameters[2]]) { + return false; + } + register_modified[mco.parameters[0]] = true; + register_modified[mco.parameters[1]] = true; + } else if (mco.kind == MetaCode::OperationKind::LoadFromStack) { + if (stack_offset_loaded[mco.parameters[0]] != MaxStackIndex) { + return false; + } + if (register_modified[mco.parameters[0]] != false) { + return false; + } + if (mco.parameters[1] >= MaxStackIndex) { + std::abort(); + } + stack_offset_loaded[mco.parameters[0]] = mco.parameters[1]; + register_modified[mco.parameters[0]] = true; + + const size_t offset = mco.parameters[1]; + const size_t width = mco.parameters[2] == 0 ? KernelAbiType::RegisterSize : mco.parameters[2]; + for (size_t j = 0; j < width; j++) { + const size_t index = offset + j; + if (index >= MaxStackIndex) { + std::abort(); + } + if (stack_modified[index] != InvalidRegisterId) { + return false; + } + stack_modified[index] = mco.parameters[0]; + stack_top = std::max(index + 1, stack_top); + } + } else { + /* TODO: Better operation reasoning process. */ + return false; + } + } + + /* Any operations that don't load from stack, we can just re-add. */ + for (size_t i = 0; i < mc.GetNumOperations(); i++) { + const auto mco = mc.GetOperation(i); + if (mco.kind != MetaCode::OperationKind::LoadFromStack) { + out_mcg.AddOperationDirectly(mco); + } + } + constexpr auto PairKind = MetaCode::OperationKind::LoadPairFromStack; + constexpr auto SingleKind = MetaCode::OperationKind::LoadFromStack; + CoalesceOperations(out_mcg, stack_modified, stack_top); + + /* Ensure the out allocator state is correct. */ + out_allocator = allocator; + + return true; + } + + template + struct TypeIndexFilter { + template + static constexpr auto GetFilteredTupleImpl(UseArrayHolder, std::tuple, std::index_sequence) { + constexpr auto UseArray = UNWRAP_TEMPLATE_CONSTANT(UseArrayHolder); + static_assert(sizeof...(TailType) == sizeof...(TailIndex)); + static_assert(HeadIndex <= UseArray.size()); + + if constexpr (sizeof...(TailType) == 0) { + if constexpr (!UseArray[HeadIndex]) { + return std::tuple{}; + } else { + return std::tuple<>{}; + } + } else { + auto tail_tuple = GetFilteredTupleImpl(UseArrayHolder{}, std::tuple{}, std::index_sequence{}); + if constexpr (!UseArray[HeadIndex]) { + return std::tuple_cat(std::tuple{}, tail_tuple); + } else { + return std::tuple_cat(std::tuple<>{}, tail_tuple); + } + } + } + + template + static constexpr auto GetFilteredTuple(UseArrayHolder) { + return GetFilteredTupleImpl(UseArrayHolder{}, std::tuple{}, std::make_index_sequence()); + } + }; + + template + static constexpr auto GetModifiedOperations(AllocatorHolder, std::tuple ops) { + constexpr size_t ModifyRegister = [] { + auto allocator = UNWRAP_TEMPLATE_CONSTANT(AllocatorHolder); + return allocator.AllocateFirstFree(); + }(); + + using ModifiedFirstOperation = typename FirstOperation::template ModifiedType; + using NewMoveOperation = typename LayoutConversionBase::template OperationMove; + return std::tuple{}; + } + + template + static constexpr auto GenerateBeforeOperations(MetaCodeGenerator &mcg, AllocatorHolder, std::tuple ops) -> RegisterAllocator { + constexpr size_t NumOperations = 1 + sizeof...(OtherOperations); + using OperationsTuple = decltype(ops); + using FilterHelper = TypeIndexFilter; + + constexpr auto ProcessOperation = [](MetaCodeGenerator &pr_mcg, auto &allocator, Operation) { + if (Conversion::template CanGenerateCode(allocator)) { + Conversion::template GenerateCode(pr_mcg, allocator); + return true; + } + return false; + }; + + constexpr auto ProcessResults = [ProcessOperation](std::tuple) { + auto allocator = UNWRAP_TEMPLATE_CONSTANT(AllocatorHolder); + MetaCodeGenerator pr_mcg; + auto use_array = std::array{ ProcessOperation(pr_mcg, allocator, Operations{})... }; + return std::make_tuple(use_array, allocator, pr_mcg); + }(OperationsTuple{}); + + constexpr auto CanGenerate = std::get<0>(ProcessResults); + constexpr auto AfterAllocator = std::get<1>(ProcessResults); + constexpr auto GeneratedCode = std::get<2>(ProcessResults).GetMetaCode(); + + for (size_t i = 0; i < GeneratedCode.GetNumOperations(); i++) { + mcg.AddOperationDirectly(GeneratedCode.GetOperation(i)); + } + + constexpr auto FilteredOperations = FilterHelper::template GetFilteredTuple(WRAP_TEMPLATE_CONSTANT(CanGenerate)); + static_assert(std::tuple_size::value <= NumOperations); + if constexpr (std::tuple_size::value > 0) { + if constexpr (std::tuple_size::value != NumOperations) { + return GenerateBeforeOperations(mcg, WRAP_TEMPLATE_CONSTANT(AfterAllocator), FilteredOperations); + } else { + /* No progress was made, so we need to make a change. */ + constexpr auto ModifiedOperations = GetModifiedOperations(WRAP_TEMPLATE_CONSTANT(AfterAllocator), FilteredOperations); + return GenerateBeforeOperations(mcg, WRAP_TEMPLATE_CONSTANT(AfterAllocator), ModifiedOperations); + } + } else { + return AfterAllocator; + } + } + + static constexpr MetaCode GenerateOriginalBeforeMetaCode() { + MetaCodeGenerator mcg; + RegisterAllocator allocator; + static_assert(SvcAbiType::RegisterCount == KernelAbiType::RegisterCount); + + /* Reserve registers used by the input layout. */ + constexpr auto InitialAllocator = [] { + RegisterAllocator initial_allocator; + for (size_t i = 0; i < SvcAbiType::RegisterCount; i++) { + if (Conversion::LayoutForSvc.GetInputLayout().UsesRegister(i)) { + initial_allocator.Allocate(i); + } + } + return initial_allocator; + }(); + + /* Save every register that needs to be preserved to the stack. */ + if constexpr (Conversion::NumPreserveRegisters > 0) { + [&mcg](std::index_sequence) { + mcg.template SaveRegisters(); + }(typename Conversion::PreserveRegisters{}); + } + + /* Allocate space on the stack for parameters that need it. */ + if constexpr (UsedStackSpace > 0) { + mcg.template AllocateStackSpace(); + } + + /* Generate code for before operations. */ + if constexpr (Conversion::NumBeforeOperations > 0) { + allocator = GenerateBeforeOperations(mcg, WRAP_TEMPLATE_CONSTANT(InitialAllocator), typename Conversion::BeforeOperations{}); + } else { + allocator = InitialAllocator; + } + + /* Generate code for after operations. */ + if constexpr (Conversion::NumAfterOperations > 0) { + if (!TryPrepareForKernelProcedureToSvcInvocationCoalescing(typename Conversion::AfterOperations{}, mcg, allocator)) { + /* We're not eligible for the straightforward optimization. */ + [&mcg, &allocator](std::index_sequence) { + (Conversion::template GenerateCode::type, CodeGenerationKind::PrepareForKernelProcedureToSvcInvocation>(mcg, allocator), ...); + }(std::make_index_sequence()); + } + } + + return mcg.GetMetaCode(); + } + public: + using SvcAbiType = _SvcAbiType; + using UserAbiType = _UserAbiType; + using KernelAbiType = _KernelAbiType; + + using Conversion = LayoutConversion; + + static constexpr size_t UsedStackSpace = Conversion::NonAbiUsedStackIndices * KernelAbiType::RegisterSize; + + static constexpr MetaCode OriginalBeforeMetaCode = [] { + return GenerateOriginalBeforeMetaCode(); + }(); + + static constexpr MetaCode OriginalAfterMetaCode = [] { + MetaCodeGenerator mcg; + RegisterAllocator allocator; + static_assert(SvcAbiType::RegisterCount == KernelAbiType::RegisterCount); + + /* Generate code for after operations. */ + if constexpr (Conversion::NumAfterOperations > 0) { + if (!TryKernelProcedureToSvcInvocationCoalescing(typename Conversion::AfterOperations{}, mcg, allocator)) { + [&mcg, &allocator](std::index_sequence) { + (Conversion::template GenerateCode::type, CodeGenerationKind::KernelProcedureToSvcInvocation>(mcg, allocator), ...); + }(std::make_index_sequence()); + } + } + + /* Allocate space on the stack for parameters that need it. */ + if constexpr (UsedStackSpace > 0) { + mcg.template FreeStackSpace(); + } + + if constexpr (Conversion::NumClearRegisters > 0) { + [&mcg](std::index_sequence) { + mcg.template ClearRegisters(); + }(typename Conversion::ClearRegisters{}); + } + + /* Restore registers we previously saved to the stack. */ + if constexpr (Conversion::NumPreserveRegisters > 0) { + [&mcg](std::index_sequence) { + mcg.template RestoreRegisters(); + }(typename Conversion::PreserveRegisters{}); + } + + return mcg.GetMetaCode(); + }(); + + /* TODO: Implement meta code optimization via separate layer. */ + /* Right now some basic optimizations are just implemented by the above generators. */ + static constexpr MetaCode OptimizedBeforeMetaCode = OriginalBeforeMetaCode; + static constexpr MetaCode OptimizedAfterMetaCode = OriginalAfterMetaCode; + }; + + template + class KernelSvcWrapperHelper { + private: + using Traits = FunctionTraits; + public: + using Impl = KernelSvcWrapperHelperImpl<_SvcAbiType, _UserAbiType, _KernelAbiType, typename Traits::ReturnType, typename Traits::ArgsType>; + + static constexpr bool IsAarch64Kernel = std::is_same<_KernelAbiType, Aarch64Lp64Abi>::value; + static constexpr bool IsAarch32Kernel = std::is_same<_KernelAbiType, Aarch32Ilp32Abi>::value; + static_assert(IsAarch64Kernel || IsAarch32Kernel); + + using CodeGenerator = typename std::conditional::type; + + static constexpr auto BeforeMetaCode = Impl::OptimizedBeforeMetaCode; + static constexpr auto AfterMetaCode = Impl::OptimizedAfterMetaCode; + + +/* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */ +#pragma GCC push_options +#pragma GCC optimize ("omit-frame-pointer") + + static ALWAYS_INLINE void WrapSvcFunction() { + /* Generate appropriate assembly. */ + GenerateCodeForMetaCode(WRAP_TEMPLATE_CONSTANT(BeforeMetaCode)); + ON_SCOPE_EXIT { GenerateCodeForMetaCode(WRAP_TEMPLATE_CONSTANT(AfterMetaCode)); }; + + return reinterpret_cast(Function)(); + } + +#pragma GCC pop_options + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp new file mode 100644 index 000000000..54ef0e020 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" +#include "svc_codegen_impl_parameter.hpp" + +namespace ams::svc::codegen::impl { + + class ParameterLayout { + public: + static constexpr size_t MaxParameters = 8; + private: + static constexpr size_t InvalidIndex = std::numeric_limits::max(); + private: + /* ABI parameters. */ + Abi abi; + + /* Parameter storage. */ + size_t num_parameters; + Parameter parameters[MaxParameters]; + public: + constexpr explicit ParameterLayout(Abi a) + : abi(a), num_parameters(0), parameters() + { /* ... */ } + + constexpr void AddSingle(Parameter::Identifier id, ArgumentType type, size_t ts, size_t ps, bool p, Storage s, size_t idx) { + for (size_t i = 0; i < this->num_parameters; i++) { + if (this->parameters[i].Is(id)) { + this->parameters[i].AddLocation(Location(s, idx)); + return; + } + } + this->parameters[this->num_parameters++] = Parameter(id, type, ts, ps, p, Location(s, idx)); + } + + constexpr size_t Add(Parameter::Identifier id, ArgumentType type, size_t ts, size_t ps, bool p, Storage s, size_t i) { + size_t required_registers = 0; + + while (required_registers * this->abi.register_size < ps) { + this->AddSingle(id, type, ts, ps, p, s, i++); + required_registers++; + } + + return required_registers; + } + + constexpr bool UsesLocation(Location l) const { + for (size_t i = 0; i < this->num_parameters; i++) { + if (this->parameters[i].UsesLocation(l)) { + return true; + } + } + return false; + } + + constexpr bool UsesRegister(size_t i) const { + return this->UsesLocation(Location(Storage::Register, i)); + } + + constexpr bool IsRegisterFree(size_t i) const { + return !(this->UsesRegister(i)); + } + + constexpr size_t GetNumParameters() const { + return this->num_parameters; + } + + constexpr Parameter GetParameter(size_t i) const { + return this->parameters[i]; + } + + constexpr bool HasParameter(Parameter::Identifier id) const { + for (size_t i = 0; i < this->num_parameters; i++) { + if (this->parameters[i].Is(id)) { + return true; + } + } + return false; + } + + constexpr Parameter GetParameter(Parameter::Identifier id) const { + for (size_t i = 0; i < this->num_parameters; i++) { + if (this->parameters[i].Is(id)) { + return this->parameters[i]; + } + } + std::abort(); + } + }; + + class ProcedureLayout { + private: + Abi abi; + ParameterLayout input; + ParameterLayout output; + private: + template + constexpr void ProcessArgument(size_t i, size_t &NGRN, size_t &NSAA) { + /* We currently don't implement support for floating point types. */ + static_assert(!std::is_floating_point::value); + static_assert(!std::is_same::value); + + constexpr size_t ArgumentTypeSize = AbiType::template Size; + constexpr bool PassedByPointer = IsPassedByPointer; + constexpr size_t ArgumentPassSize = PassedByPointer ? AbiType::PointerSize : ArgumentTypeSize; + + /* TODO: Is there ever a case where this is not the correct alignment? */ + constexpr size_t ArgumentAlignment = ArgumentPassSize; + + /* Ensure NGRN is aligned. */ + if constexpr (ArgumentAlignment > AbiType::RegisterSize) { + NGRN += (NGRN & 1); + } + + /* TODO: We don't support splitting arguments between registers and stack, but AAPCS32 does. */ + /* Is this a problem? Nintendo seems to not ever do this. */ + + auto id = Parameter::Identifier("FunctionParameter", i); + + /* Allocate integral types specially per aapcs. */ + constexpr ArgumentType Type = GetArgumentType; + const size_t registers_available = AbiType::RegisterCount - NGRN; + if constexpr (!PassedByPointer && IsIntegralOrUserPointer && ArgumentTypeSize > AbiType::RegisterSize) { + if (registers_available >= 2) { + this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Register, NGRN); + NGRN += 2; + } else { + /* Argument went on stack, so stop allocating arguments in registers. */ + NGRN = AbiType::RegisterCount; + + NSAA += (NSAA & 1); + this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Stack, NSAA); + NSAA += 2; + } + } else { + if (ArgumentPassSize <= AbiType::RegisterSize * registers_available) { + NGRN += this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Register, NGRN); + } else { + /* Argument went on stack, so stop allocating arguments in registers. */ + NGRN = AbiType::RegisterCount; + + /* TODO: Stack pointer alignment is only ensured for aapcs64. */ + /* What should we do here? */ + + NSAA += this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Stack, NSAA); + } + } + } + public: + constexpr explicit ProcedureLayout(Abi a) : abi(a), input(a), output(a) { /* ... */ } + + template + static constexpr ProcedureLayout Create() { + ProcedureLayout layout(Abi::Convert()); + + /* 1. The Next General-purpose Register Number (NGRN) is set to zero. */ + [[maybe_unused]] size_t NGRN = 0; + + /* 2. The next stacked argument address (NSAA) is set to the current stack-pointer value (SP). */ + [[maybe_unused]] size_t NSAA = 0; /* Should be considered an offset from stack pointer. */ + + /* 3. Handle the return type. */ + /* TODO: It's unclear how to handle the non-integral and too-large case. */ + if constexpr (!std::is_same::value) { + constexpr size_t ReturnTypeSize = AbiType::template Size; + layout.output.Add(Parameter::Identifier("ReturnType"), ArgumentType::Invalid, ReturnTypeSize, ReturnTypeSize, false, Storage::Register, 0); + static_assert(IsIntegral || ReturnTypeSize <= AbiType::RegisterSize); + } + + /* Process all arguments, in order. */ + size_t i = 0; + (layout.ProcessArgument(i++, NGRN, NSAA), ...); + + return layout; + } + + constexpr ParameterLayout GetInputLayout() const { + return this->input; + } + + constexpr ParameterLayout GetOutputLayout() const { + return this->output; + } + + constexpr Parameter GetParameter(Parameter::Identifier id) const { + if (this->input.HasParameter(id)) { + return this->input.GetParameter(id); + } else { + return this->output.GetParameter(id); + } + } + }; + + class SvcInvocationLayout { + private: + Abi abi; + ParameterLayout input; + ParameterLayout output; + private: + template + constexpr void ForEachInputArgument(ParameterLayout param_layout, F f) { + /* We want to iterate over the parameters in sorted order. */ + std::array map = {}; + const size_t num_parameters = param_layout.GetNumParameters(); + for (size_t i = 0; i < num_parameters; i++) { + map[i] = i; + } + for (size_t i = 1; i < num_parameters; i++) { + for (size_t j = i; j > 0 && param_layout.GetParameter(map[j-1]).GetLocation(0) > param_layout.GetParameter(map[j]).GetLocation(0); j--) { + /* std::swap is not constexpr until c++20 :( */ + /* TODO: std::swap(map[j], map[j-1]); */ + const size_t tmp = map[j]; + map[j] = map[j-1]; + map[j-1] = tmp; + } + } + + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(map[i]); + if (Parameter.GetArgumentType() == ArgumentType::In && !Parameter.IsPassedByPointer()) { + f(Parameter); + } + } + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(map[i]); + if (Parameter.GetArgumentType() == ArgumentType::InUserPointer) { + f(Parameter); + } + } + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(map[i]); + if (Parameter.GetArgumentType() == ArgumentType::OutUserPointer) { + f(Parameter); + } + } + } + + template + constexpr void ForEachInputPointerArgument(ParameterLayout param_layout, F f) { + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(i); + if (Parameter.GetArgumentType() == ArgumentType::In && Parameter.IsPassedByPointer()) { + f(Parameter); + } + } + } + + template + constexpr void ForEachOutputArgument(ParameterLayout param_layout, F f) { + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(i); + if (Parameter.GetArgumentType() == ArgumentType::Out) { + f(Parameter); + } + } + } + + template + static constexpr void AddRegisterParameter(ParameterLayout &dst_layout, RegisterAllocator ®_allocator, Parameter param) { + for (size_t i = 0; i < param.GetNumLocations(); i++) { + const auto location = param.GetLocation(i); + if (location.GetStorage() == Storage::Register) { + reg_allocator.Allocate(location.GetIndex()); + dst_layout.AddSingle(param.GetIdentifier(), param.GetArgumentType(), param.GetTypeSize(), param.GetPassedSize(), param.IsPassedByPointer(), Storage::Register, location.GetIndex()); + } + } + } + + template + static constexpr void AddStackParameter(ParameterLayout &dst_layout, RegisterAllocator ®_allocator, Parameter param) { + for (size_t i = 0; i < param.GetNumLocations(); i++) { + const auto location = param.GetLocation(i); + if (location.GetStorage() == Storage::Stack) { + const size_t free_reg = reg_allocator.AllocateFirstFree(); + dst_layout.AddSingle(param.GetIdentifier(), param.GetArgumentType(), param.GetTypeSize(), param.GetPassedSize(), param.IsPassedByPointer(), Storage::Register, free_reg); + } + } + } + + template + static constexpr void AddIndirectParameter(ParameterLayout &dst_layout, RegisterAllocator ®_allocator, Parameter param) { + const size_t type_size = param.GetTypeSize(); + for (size_t sz = 0; sz < type_size; sz += AbiType::RegisterSize) { + const size_t free_reg = reg_allocator.AllocateFirstFree(); + dst_layout.AddSingle(param.GetIdentifier(), param.GetArgumentType(), type_size, type_size, false, Storage::Register, free_reg); + } + } + public: + constexpr explicit SvcInvocationLayout(Abi a) : abi(a), input(a), output(a) { /* ... */ } + + template + static constexpr SvcInvocationLayout Create(ProcedureLayout procedure_layout) { + SvcInvocationLayout layout(Abi::Convert()); + RegisterAllocator input_register_allocator, output_register_allocator; + + /* Input first wants to map in register -> register */ + layout.ForEachInputArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) { + AddRegisterParameter(layout.input, input_register_allocator, parameter); + }); + + /* And then input wants to map in stack -> stack */ + layout.ForEachInputArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) { + AddStackParameter(layout.input, input_register_allocator, parameter); + }); + + /* And then input wants to map in indirects -> register */ + layout.ForEachInputPointerArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) { + AddIndirectParameter(layout.input, input_register_allocator, parameter); + }); + + /* Handle the return type. */ + if (procedure_layout.GetOutputLayout().GetNumParameters() > 0) { + if (procedure_layout.GetOutputLayout().GetNumParameters() != 1) { + std::abort(); + } + const auto return_param = procedure_layout.GetOutputLayout().GetParameter(0); + if (return_param.GetIdentifier() != Parameter::Identifier("ReturnType")) { + std::abort(); + } + AddRegisterParameter(layout.output, output_register_allocator, return_param); + } + + /* Handle other outputs. */ + layout.ForEachOutputArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) { + AddIndirectParameter(layout.output, output_register_allocator, parameter); + }); + + return layout; + } + + constexpr ParameterLayout GetInputLayout() const { + return this->input; + } + + constexpr ParameterLayout GetOutputLayout() const { + return this->output; + } + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp new file mode 100644 index 000000000..d4abc7a18 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp @@ -0,0 +1,491 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" +#include "svc_codegen_impl_parameter.hpp" +#include "svc_codegen_impl_layout.hpp" +#include "svc_codegen_impl_meta_code.hpp" + +namespace ams::svc::codegen::impl { + + class LayoutConversionBase { + public: + enum class OperationKind { + Move, + LoadAndStore, + PackAndUnpack, + Scatter, + Invalid, + }; + + class OperationMoveImpl; + class OperationLoadAndStoreImpl; + class OperationPackAndUnpackImpl; + class OperationScatterImpl; + + class OperationBase{}; + + template + class Operation : public OperationBase { + public: + static constexpr OperationKind Kind = _Kind; + static constexpr size_t RegisterSize = RS; + static constexpr size_t PassedSize = PS; + static constexpr size_t StackOffset = SO; + static constexpr size_t ProcedureIndex = PIdx; + + static constexpr size_t NumSvcIndices = sizeof...(SIdx); + static constexpr std::array SvcIndices = { SIdx... }; + static constexpr std::index_sequence SvcIndexSequence = {}; + + template + static constexpr size_t SvcIndex = SvcIndices[I]; + + template + static void ForEachSvcIndex(F f) { + (f(SIdx), ...); + } + + using ImplType = typename std::conditional::type>::type>::type>::type; + + template + using ModifiedType = Operation; + }; + + template + using OperationMove = Operation; + + template + using OperationLoadAndStore = Operation; + + template + using OperationPackAndUnpack = Operation; + + template + using OperationScatter = Operation; + + class OperationMoveImpl { + public: + template + static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) { + static_assert(Operation::Kind == OperationKind::Move); + allocator.Free(Operation::template SvcIndex<0>); + return allocator.TryAllocate(Operation::ProcedureIndex); + } + + template + static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + static_assert(Operation::Kind == OperationKind::Move); + allocator.Free(Operation::template SvcIndex<0>); + allocator.Allocate(Operation::ProcedureIndex); + mcg.template MoveRegister>(); + } + }; + + class OperationLoadAndStoreImpl { + public: + template + static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) { + static_assert(Operation::Kind == OperationKind::LoadAndStore); + allocator.Free(Operation::template SvcIndex<0>); + return true; + } + + template + static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + static_assert(Operation::Kind == OperationKind::LoadAndStore); + allocator.Free(Operation::template SvcIndex<0>); + constexpr size_t StackOffset = Operation::ProcedureIndex * Operation::RegisterSize; + mcg.template StoreToStack, StackOffset>(); + } + }; + + class OperationPackAndUnpackImpl { + public: + template + static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) { + static_assert(Operation::Kind == OperationKind::PackAndUnpack); + allocator.Free(Operation::template SvcIndex<0>); + allocator.Free(Operation::template SvcIndex<1>); + return allocator.TryAllocate(Operation::ProcedureIndex); + } + + template + static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + static_assert(Operation::Kind == OperationKind::PackAndUnpack); + allocator.Free(Operation::template SvcIndex<0>); + allocator.Free(Operation::template SvcIndex<1>); + allocator.Allocate(Operation::ProcedureIndex); + mcg.template Pack, Operation::template SvcIndex<1>>(); + } + + template + static constexpr void GenerateCodeForPrepareForKernelProcedureToSvcInvocation(MetaCodeGenerator &mcg) { + static_assert(Operation::Kind == OperationKind::PackAndUnpack); + /* ... */ + } + + template + static constexpr void GenerateCodeForKernelProcedureToSvcInvocation(MetaCodeGenerator &mcg) { + static_assert(Operation::Kind == OperationKind::PackAndUnpack); + mcg.template Unpack, Operation::template SvcIndex<1>, Operation::ProcedureIndex>(); + } + }; + + class OperationScatterImpl { + public: + template + static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) { + static_assert(Operation::Kind == OperationKind::Scatter); + [&allocator](std::index_sequence) { + (allocator.Free(SvcIndex), ...); + }(Operation::SvcIndexSequence); + return allocator.TryAllocate(Operation::ProcedureIndex); + } + + template + static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + static_assert(Operation::Kind == OperationKind::Scatter); + [&allocator](std::index_sequence) { + (allocator.Free(SvcIndex), ...); + }(Operation::SvcIndexSequence); + allocator.Allocate(Operation::ProcedureIndex); + + [&mcg](std::index_sequence) { + (mcg.template StoreToStack, Operation::StackOffset + Operation::RegisterSize * (Is), Operation::RegisterSize>(), ...); + }(std::make_index_sequence()); + + mcg.template LoadStackAddress(); + } + + template + static constexpr void GenerateCodeForPrepareForKernelProcedureToSvcInvocation(MetaCodeGenerator &mcg) { + static_assert(Operation::Kind == OperationKind::Scatter); + + [&mcg](std::index_sequence) { + (mcg.template StoreToStack, Operation::StackOffset + Operation::RegisterSize * (Is), Operation::RegisterSize>(), ...); + }(std::make_index_sequence()); + + mcg.template LoadStackAddress(); + } + + template + static constexpr void GenerateCodeForKernelProcedureToSvcInvocation(MetaCodeGenerator &mcg) { + static_assert(Operation::Kind == OperationKind::Scatter); + + [&mcg](std::index_sequence) { + (mcg.template LoadFromStack, Operation::StackOffset + Operation::RegisterSize * (Is), Operation::RegisterSize>(), ...); + }(std::make_index_sequence()); + } + }; + }; + + template + class LayoutConversion { + public: + using SvcAbiType = _SvcAbiType; + using UserAbiType = _UserAbiType; + using KernelAbiType = _KernelAbiType; + + static constexpr auto LayoutForUser = ProcedureLayout::Create(); + static constexpr auto LayoutForSvc = SvcInvocationLayout::Create(LayoutForUser); + static constexpr auto LayoutForKernel = ProcedureLayout::Create(); + private: + template + static constexpr size_t DetermineUsedStackIndices() { + [[maybe_unused]] constexpr auto Procedure = LayoutForKernel; + [[maybe_unused]] constexpr ParameterLayout Svc = Input ? LayoutForSvc.GetInputLayout() : LayoutForSvc.GetOutputLayout(); + + if constexpr (ParameterIndex >= Svc.GetNumParameters()) { + /* Base case: we're done. */ + return Used; + } else { + /* We're processing more parameters. */ + constexpr Parameter SvcParam = Svc.GetParameter(ParameterIndex); + constexpr Parameter ProcedureParam = Procedure.GetParameter(SvcParam.GetIdentifier()); + + if constexpr (SvcParam.IsPassedByPointer() == ProcedureParam.IsPassedByPointer()) { + /* We're not scattering, so stack won't be used. */ + return DetermineUsedStackIndices(); + } else { + /* We're scattering, and so we're using stack. */ + static_assert(ProcedureParam.GetNumLocations() == 1); + + constexpr size_t IndicesPerRegister = KernelAbiType::RegisterSize / SvcAbiType::RegisterSize; + static_assert(IndicesPerRegister > 0); + + constexpr size_t RequiredCount = util::AlignUp(SvcParam.GetNumLocations(), IndicesPerRegister) / IndicesPerRegister; + + return DetermineUsedStackIndices(); + } + } + } + + static constexpr size_t AbiUsedStackIndices = [] { + constexpr auto KernLayout = LayoutForKernel.GetInputLayout(); + + size_t used = 0; + for (size_t i = 0; i < KernLayout.GetNumParameters(); i++) { + const auto Param = KernLayout.GetParameter(i); + for (size_t j = 0; j < Param.GetNumLocations(); j++) { + const auto Loc = Param.GetLocation(j); + if (Loc.GetStorage() == Storage::Stack) { + used = std::max(used, Loc.GetIndex() + 1); + } + } + } + + return used; + }(); + + static constexpr size_t BeforeUsedStackIndices = DetermineUsedStackIndices(); + static constexpr size_t AfterUsedStackIndices = DetermineUsedStackIndices(); + + template + static constexpr auto ZipMoveOperations() { + constexpr auto Procedure = LayoutForKernel; + constexpr ParameterLayout Svc = Input ? LayoutForSvc.GetInputLayout() : LayoutForSvc.GetOutputLayout(); + + static_assert(ParameterIndex < Svc.GetNumParameters()); + + constexpr Parameter SvcParam = Svc.GetParameter(ParameterIndex); + constexpr Parameter ProcedureParam = Procedure.GetParameter(SvcParam.GetIdentifier()); + + static_assert(SvcParam.IsPassedByPointer() == ProcedureParam.IsPassedByPointer()); + static_assert(SvcParam.GetNumLocations() == ProcedureParam.GetNumLocations()); + + if constexpr (LocationIndex >= SvcParam.GetNumLocations()) { + /* Base case: we're done. */ + return std::tuple<>{}; + } else { + constexpr Location SvcLoc = SvcParam.GetLocation(LocationIndex); + constexpr Location ProcedureLoc = ProcedureParam.GetLocation(LocationIndex); + + if constexpr (SvcLoc == ProcedureLoc) { + /* No need to emit an operation if we're not changing where we are. */ + return ZipMoveOperations(); + } else { + /* Svc location needs to be in a register. */ + static_assert(SvcLoc.GetStorage() == Storage::Register); + + constexpr size_t Size = KernelAbiType::RegisterSize; + + if constexpr (ProcedureLoc.GetStorage() == Storage::Register) { + using OperationType = LayoutConversionBase::OperationMove; + constexpr auto cur_op = std::make_tuple(OperationType{}); + return std::tuple_cat(cur_op, ZipMoveOperations()); + } else { + using OperationType = LayoutConversionBase::OperationLoadAndStore; + constexpr auto cur_op = std::make_tuple(OperationType{}); + return std::tuple_cat(cur_op, ZipMoveOperations()); + } + } + } + } + + template + static constexpr auto DetermineConversionOperations() { + [[maybe_unused]] constexpr auto Procedure = LayoutForKernel; + [[maybe_unused]] constexpr ParameterLayout Svc = Input ? LayoutForSvc.GetInputLayout() : LayoutForSvc.GetOutputLayout(); + [[maybe_unused]] constexpr std::array ParameterMap = [](SvcHolder){ + /* We want to iterate over the parameters in sorted order. */ + constexpr ParameterLayout CapturedSvc = UNWRAP_TEMPLATE_CONSTANT(SvcHolder); + std::array map{}; + const size_t num_parameters = CapturedSvc.GetNumParameters(); + for (size_t i = 0; i < num_parameters; i++) { + map[i] = i; + } + for (size_t i = 1; i < num_parameters; i++) { + for (size_t j = i; j > 0 && CapturedSvc.GetParameter(map[j-1]).GetLocation(0) > CapturedSvc.GetParameter(map[j]).GetLocation(0); j--) { + /* std::swap is not constexpr until c++20 :( */ + /* TODO: std::swap(map[j], map[j-1]); */ + const size_t tmp = map[j]; + map[j] = map[j-1]; + map[j-1] = tmp; + } + } + return map; + }(WRAP_TEMPLATE_CONSTANT(Svc)); + + if constexpr (ParameterIndex >= Svc.GetNumParameters()) { + /* Base case: we're done. */ + if constexpr (Input) { + static_assert(StackIndex == BeforeUsedStackIndices + AbiUsedStackIndices); + } else { + static_assert(StackIndex == AfterUsedStackIndices + BeforeUsedStackIndices + AbiUsedStackIndices); + } + return std::tuple<>{}; + } else { + /* We're processing more parameters. */ + constexpr Parameter SvcParam = Svc.GetParameter(ParameterMap[ParameterIndex]); + constexpr Parameter ProcedureParam = Procedure.GetParameter(SvcParam.GetIdentifier()); + + if constexpr (SvcParam.IsPassedByPointer() == ProcedureParam.IsPassedByPointer()) { + if constexpr (SvcParam.GetNumLocations() == ProcedureParam.GetNumLocations()) { + /* Normal moves and loads/stores. */ + return std::tuple_cat(ZipMoveOperations(), DetermineConversionOperations()); + } else { + /* We're packing. */ + /* Make sure we're handling the 2 -> 1 case. */ + static_assert(SvcParam.GetNumLocations() == 2); + static_assert(ProcedureParam.GetNumLocations() == 1); + + constexpr Location ProcedureLoc = ProcedureParam.GetLocation(0); + constexpr Location SvcLoc0 = SvcParam.GetLocation(0); + constexpr Location SvcLoc1 = SvcParam.GetLocation(1); + static_assert(ProcedureLoc.GetStorage() == Storage::Register); + static_assert(SvcLoc0.GetStorage() == Storage::Register); + static_assert(SvcLoc1.GetStorage() == Storage::Register); + + constexpr size_t Size = KernelAbiType::RegisterSize; + + using OperationType = LayoutConversionBase::OperationPackAndUnpack; + + constexpr auto cur_op = std::make_tuple(OperationType{}); + + return std::tuple_cat(cur_op, DetermineConversionOperations()); + } + } else { + /* One operation, since we're scattering. */ + static_assert(ProcedureParam.GetNumLocations() == 1); + constexpr Location ProcedureLoc = ProcedureParam.GetLocation(0); + + constexpr size_t IndicesPerRegister = KernelAbiType::RegisterSize / SvcAbiType::RegisterSize; + static_assert(IndicesPerRegister > 0); + + constexpr size_t RequiredCount = util::AlignUp(SvcParam.GetNumLocations(), IndicesPerRegister) / IndicesPerRegister; + + if constexpr (ProcedureLoc.GetStorage() == Storage::Register) { + /* Scattering. In register during kernel call. */ + constexpr size_t RegisterSize = SvcAbiType::RegisterSize; + constexpr size_t PassedSize = ProcedureParam.GetTypeSize(); + + /* TODO: C++20 templated lambdas. For now, use GCC extension syntax. */ + constexpr auto SvcIndexSequence = [](SvcParamWrapper, std::index_sequence) { + constexpr Parameter CapturedSvcParam = UNWRAP_TEMPLATE_CONSTANT(SvcParamWrapper); + return std::index_sequence{}; + }(WRAP_TEMPLATE_CONSTANT(SvcParam), std::make_index_sequence()); + + constexpr auto OperationValue = [](ProcedureLocWrapper, std::index_sequence) { + constexpr Location CapturedProcedureLoc = UNWRAP_TEMPLATE_CONSTANT(ProcedureLocWrapper); + return LayoutConversionBase::OperationScatter{}; + }(WRAP_TEMPLATE_CONSTANT(ProcedureLoc), SvcIndexSequence); + + constexpr auto cur_op = std::make_tuple(OperationValue); + + return std::tuple_cat(cur_op, DetermineConversionOperations()); + } else { + /* TODO: How should on-stack-during-kernel-call be handled? */ + static_assert(ProcedureLoc.GetStorage() == Storage::Register); + } + } + } + } + + static constexpr size_t PreserveRegisterStartIndex = SvcAbiType::ArgumentRegisterCount; + static constexpr size_t PreserveRegisterEndIndex = std::min(KernelAbiType::ArgumentRegisterCount, SvcAbiType::RegisterCount); + static constexpr size_t ClearRegisterStartIndex = 0; + static constexpr size_t ClearRegisterEndIndex = std::min(KernelAbiType::ArgumentRegisterCount, SvcAbiType::RegisterCount); + + template + static constexpr bool ShouldPreserveRegister = (PreserveRegisterStartIndex <= Index && Index < PreserveRegisterEndIndex) && + LayoutForSvc.GetInputLayout().IsRegisterFree(Index) && LayoutForSvc.GetOutputLayout().IsRegisterFree(Index); + + template + static constexpr bool ShouldClearRegister = (ClearRegisterStartIndex <= Index && Index < ClearRegisterEndIndex) && + LayoutForSvc.GetOutputLayout().IsRegisterFree(Index) && !ShouldPreserveRegister; + + template + static constexpr auto DeterminePreserveRegisters() { + static_assert(PreserveRegisterStartIndex <= Index && Index <= PreserveRegisterEndIndex); + + if constexpr (Index >= PreserveRegisterEndIndex) { + /* Base case: we're done. */ + return std::index_sequence<>{}; + } else { + if constexpr (ShouldPreserveRegister) { + /* Preserve this register. */ + return IndexSequenceCat(std::index_sequence{}, DeterminePreserveRegisters()); + } else { + /* We don't need to preserve register, so we can skip onwards. */ + return IndexSequenceCat(std::index_sequence<>{}, DeterminePreserveRegisters()); + } + } + } + + template + static constexpr auto DetermineClearRegisters() { + static_assert(ClearRegisterStartIndex <= Index && Index <= ClearRegisterEndIndex); + + if constexpr (Index >= ClearRegisterEndIndex) { + /* Base case: we're done. */ + return std::index_sequence<>{}; + } else { + if constexpr (ShouldClearRegister) { + /* Clear this register. */ + return IndexSequenceCat(std::index_sequence{}, DetermineClearRegisters()); + } else { + /* We don't need to preserve register, so we can skip onwards. */ + return IndexSequenceCat(std::index_sequence<>{}, DetermineClearRegisters()); + } + } + } + public: + static constexpr size_t NonAbiUsedStackIndices = AfterUsedStackIndices + BeforeUsedStackIndices; + using BeforeOperations = decltype(DetermineConversionOperations()); + using AfterOperations = decltype(DetermineConversionOperations()); + + static constexpr size_t NumBeforeOperations = std::tuple_size::value; + static constexpr size_t NumAfterOperations = std::tuple_size::value; + + using PreserveRegisters = decltype(DeterminePreserveRegisters()); + using ClearRegisters = decltype(DetermineClearRegisters()); + + static constexpr size_t NumPreserveRegisters = PreserveRegisters::size(); + static constexpr size_t NumClearRegisters = ClearRegisters::size(); + + static constexpr auto PreserveRegistersArray = ConvertToArray(PreserveRegisters{}); + static constexpr auto ClearRegistersArray = ConvertToArray(ClearRegisters{}); + public: + template + static constexpr bool CanGenerateCode(RegisterAllocator allocator) { + if constexpr (CodeGenKind == CodeGenerationKind::SvcInvocationToKernelProcedure) { + return Operation::ImplType::template CanGenerateCodeForSvcInvocationToKernelProcedure(allocator); + } else { + static_assert(CodeGenKind != CodeGenKind, "Invalid CodeGenerationKind"); + } + } + + template + static constexpr void GenerateCode(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + if constexpr (CodeGenKind == CodeGenerationKind::SvcInvocationToKernelProcedure) { + Operation::ImplType::template GenerateCodeForSvcInvocationToKernelProcedure(mcg, allocator); + } else if constexpr (CodeGenKind == CodeGenerationKind::PrepareForKernelProcedureToSvcInvocation) { + Operation::ImplType::template GenerateCodeForPrepareForKernelProcedureToSvcInvocation(mcg); + } else if constexpr (CodeGenKind == CodeGenerationKind::KernelProcedureToSvcInvocation) { + Operation::ImplType::template GenerateCodeForKernelProcedureToSvcInvocation(mcg); + } else { + static_assert(CodeGenKind != CodeGenKind, "Invalid CodeGenerationKind"); + } + } + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp new file mode 100644 index 000000000..e7544702b --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" + +namespace ams::svc::codegen::impl { + + class MetaCode { + public: + static constexpr size_t MaxOperations = 0x40; + + enum class OperationKind { + SaveRegisters, + RestoreRegisters, + ClearRegisters, + AllocateStackSpace, + FreeStackSpace, + MoveRegister, + LoadFromStack, + LoadPairFromStack, + StoreToStack, + StorePairToStack, + Pack, + Unpack, + LoadStackAddress, + }; + + static constexpr const char *GetOperationKindName(OperationKind k) { + #define META_CODE_OPERATION_KIND_ENUM_CASE(s) case OperationKind::s: return #s + switch (k) { + META_CODE_OPERATION_KIND_ENUM_CASE(SaveRegisters); + META_CODE_OPERATION_KIND_ENUM_CASE(RestoreRegisters); + META_CODE_OPERATION_KIND_ENUM_CASE(ClearRegisters); + META_CODE_OPERATION_KIND_ENUM_CASE(AllocateStackSpace); + META_CODE_OPERATION_KIND_ENUM_CASE(FreeStackSpace); + META_CODE_OPERATION_KIND_ENUM_CASE(MoveRegister); + META_CODE_OPERATION_KIND_ENUM_CASE(LoadFromStack); + META_CODE_OPERATION_KIND_ENUM_CASE(LoadPairFromStack); + META_CODE_OPERATION_KIND_ENUM_CASE(StoreToStack); + META_CODE_OPERATION_KIND_ENUM_CASE(StorePairToStack); + META_CODE_OPERATION_KIND_ENUM_CASE(Pack); + META_CODE_OPERATION_KIND_ENUM_CASE(Unpack); + META_CODE_OPERATION_KIND_ENUM_CASE(LoadStackAddress); + default: + std::abort(); + } + #undef META_CODE_OPERATION_KIND_ENUM_CASE + } + + struct Operation { + OperationKind kind; + size_t num_parameters; + size_t parameters[16]; + }; + + template + static constexpr inline Operation MakeOperation() { + Operation op = {}; + static_assert(sizeof...(Is) <= sizeof(op.parameters) / sizeof(op.parameters[0])); + + op.kind = Kind; + op.num_parameters = sizeof...(Is); + + size_t i = 0; + ((op.parameters[i++] = Is), ...); + + return op; + } + private: + size_t num_operations; + std::array operations; + public: + constexpr explicit MetaCode() : num_operations(0), operations() { /* ... */ } + + constexpr size_t GetNumOperations() const { + return this->num_operations; + } + + constexpr Operation GetOperation(size_t i) const { + return this->operations[i]; + } + + constexpr void AddOperation(Operation op) { + this->operations[this->num_operations++] = op; + } + }; + + template + static constexpr auto GetOperationParameterSequence() { + constexpr auto _Operation = UNWRAP_TEMPLATE_CONSTANT(_OperationHolder); + constexpr size_t NumParameters = _Operation.num_parameters; + + return [](OperationHolder, std::index_sequence) { + constexpr auto Operation = UNWRAP_TEMPLATE_CONSTANT(OperationHolder); + return std::index_sequence{}; + }(_OperationHolder{}, std::make_index_sequence()); + } + + template + static ALWAYS_INLINE void GenerateCodeForOperationImpl(std::index_sequence) { + #define META_CODE_OPERATION_KIND_GENERATE_CODE(KIND) else if constexpr (Kind == MetaCode::OperationKind::KIND) { CodeGenerator::template KIND(); } + if constexpr (false) { /* ... */ } + META_CODE_OPERATION_KIND_GENERATE_CODE(SaveRegisters) + META_CODE_OPERATION_KIND_GENERATE_CODE(RestoreRegisters) + META_CODE_OPERATION_KIND_GENERATE_CODE(ClearRegisters) + META_CODE_OPERATION_KIND_GENERATE_CODE(AllocateStackSpace) + META_CODE_OPERATION_KIND_GENERATE_CODE(FreeStackSpace) + META_CODE_OPERATION_KIND_GENERATE_CODE(MoveRegister) + META_CODE_OPERATION_KIND_GENERATE_CODE(LoadFromStack) + META_CODE_OPERATION_KIND_GENERATE_CODE(LoadPairFromStack) + META_CODE_OPERATION_KIND_GENERATE_CODE(StoreToStack) + META_CODE_OPERATION_KIND_GENERATE_CODE(StorePairToStack) + META_CODE_OPERATION_KIND_GENERATE_CODE(Pack) + META_CODE_OPERATION_KIND_GENERATE_CODE(Unpack) + META_CODE_OPERATION_KIND_GENERATE_CODE(LoadStackAddress) + else { static_assert(Kind != Kind, "Unknown MetaOperationKind"); } + #undef META_CODE_OPERATION_KIND_GENERATE_CODE + } + + template + static ALWAYS_INLINE void GenerateCodeForOperation(OperationHolder) { + constexpr auto Operation = UNWRAP_TEMPLATE_CONSTANT(OperationHolder); + GenerateCodeForOperationImpl(GetOperationParameterSequence()); + } + + class MetaCodeGenerator { + private: + using OperationKind = typename MetaCode::OperationKind; + private: + MetaCode meta_code; + public: + constexpr explicit MetaCodeGenerator() : meta_code() { /* ... */ } + + constexpr MetaCode GetMetaCode() const { + return this->meta_code; + } + + constexpr void AddOperationDirectly(MetaCode::Operation op) { + this->meta_code.AddOperation(op); + } + + template + constexpr void SaveRegisters() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void RestoreRegisters() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void ClearRegisters() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void AllocateStackSpace() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void FreeStackSpace() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void MoveRegister() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void LoadFromStack() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void LoadPairFromStack() { + static_assert(Offset % Size == 0); + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void StoreToStack() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void StorePairToStack() { + static_assert(Offset % Size == 0); + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void Pack() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void Unpack() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void LoadStackAddress() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + }; + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp new file mode 100644 index 000000000..3f5d74526 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" + +namespace ams::svc::codegen::impl { + + enum class Storage { + Register, + Stack, + Invalid, + }; + + class Location { + private: + static constexpr size_t InvalidIndex = std::numeric_limits::max(); + private: + Storage storage; + size_t index; + public: + constexpr explicit Location() : storage(Storage::Invalid), index(InvalidIndex) { /* ... */ } + constexpr explicit Location(Storage s, size_t i) : storage(s), index(i) { /* ... */ } + + constexpr size_t GetIndex() const { return this->index; } + constexpr Storage GetStorage() const { return this->storage; } + + constexpr bool IsValid() const { + return this->index != InvalidIndex && this->storage != Storage::Invalid; + } + + constexpr bool operator==(const Location &rhs) const { + return this->index == rhs.index && this->storage == rhs.storage; + } + + constexpr bool operator<(const Location &rhs) const { + if (this->storage < rhs.storage) { + return true; + } else if (this->storage > rhs.storage) { + return false; + } else { + return this->index < rhs.index; + } + } + + constexpr bool operator>(const Location &rhs) const { + if (this->storage > rhs.storage) { + return true; + } else if (this->storage < rhs.storage) { + return false; + } else { + return this->index > rhs.index; + } + } + + constexpr bool operator!=(const Location &rhs) const { + return !(*this == rhs); + } + }; + + class Parameter { + public: + static constexpr size_t MaxLocations = 8; + static constexpr size_t IdentifierLengthMax = 0x40; + class Identifier { + private: + char name[IdentifierLengthMax]; + size_t index; + public: + constexpr explicit Identifier() : name(), index() { /* ... */ } + constexpr explicit Identifier(const char *nm, size_t idx = 0) : name(), index(idx) { + for (size_t i = 0; i < IdentifierLengthMax && nm[i]; i++) { + this->name[i] = nm[i]; + } + } + + constexpr bool operator==(const Identifier &rhs) const { + for (size_t i = 0; i < IdentifierLengthMax; i++) { + if (this->name[i] != rhs.name[i]) { + return false; + } + } + return this->index == rhs.index; + } + + constexpr bool operator!=(const Identifier &rhs) const { + return !(*this == rhs); + } + }; + private: + Identifier identifier; + ArgumentType type; + size_t type_size; + size_t passed_size; + bool passed_by_pointer; + size_t num_locations; + Location locations[MaxLocations]; + public: + constexpr explicit Parameter() + : identifier(), type(ArgumentType::Invalid), type_size(0), passed_size(0), passed_by_pointer(0), num_locations(0), locations() + { /* ... */ } + + constexpr explicit Parameter(Identifier id, ArgumentType t, size_t ts, size_t ps, bool p, Location l) + : identifier(id), type(t), type_size(ts), passed_size(ps), passed_by_pointer(p), num_locations(1), locations() + { + this->locations[0] = l; + } + + constexpr Identifier GetIdentifier() const { + return this->identifier; + } + + constexpr bool Is(Identifier rhs) const { + return this->identifier == rhs; + } + + constexpr ArgumentType GetArgumentType() const { + return this->type; + } + + constexpr size_t GetTypeSize() const { + return this->type_size; + } + + constexpr size_t GetPassedSize() const { + return this->passed_size; + } + + constexpr bool IsPassedByPointer() const { + return this->passed_by_pointer; + } + + constexpr size_t GetNumLocations() const { + return this->num_locations; + } + + constexpr Location GetLocation(size_t i) const { + return this->locations[i]; + } + + constexpr void AddLocation(Location l) { + this->locations[this->num_locations++] = l; + } + + constexpr bool UsesLocation(Location l) const { + for (size_t i = 0; i < this->num_locations; i++) { + if (this->locations[i] == l) { + return true; + } + } + return false; + } + + constexpr bool operator==(const Parameter &rhs) const { + if (!(this->identifier == rhs.identifier && + this->type == rhs.type && + this->type_size == rhs.type_size && + this->passed_size == rhs.passed_size && + this->passed_by_pointer == rhs.passed_by_pointer && + this->num_locations == rhs.num_locations)) + { + return false; + } + + for (size_t i = 0; i < this->num_locations; i++) { + if (!(this->locations[i] == rhs.locations[i])) { + return false; + } + } + + return true; + } + + constexpr bool operator!=(const Parameter &rhs) const { + return !(*this == rhs); + } + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp new file mode 100644 index 000000000..beaf8d318 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_kernel_svc_wrapper.hpp" + +namespace ams::svc::codegen { + +#if defined(ATMOSPHERE_ARCH_ARM64) || defined(ATMOSPHERE_ARCH_ARM) + + template + class KernelSvcWrapper { + private: + /* TODO: using Aarch32 = */ + using Aarch64 = impl::KernelSvcWrapperHelper; + using Aarch64From32 = impl::KernelSvcWrapperHelper; + public: +/* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */ +#pragma GCC push_options +#pragma GCC optimize ("omit-frame-pointer") + + static ALWAYS_INLINE void Call64() { + Aarch64::WrapSvcFunction(); + } + + static ALWAYS_INLINE void Call64From32() { + Aarch64From32::WrapSvcFunction(); + } + +#pragma GCC pop_options + }; + +#else + + #error "Unknown architecture for Kernel SVC Code Generation" + +#endif + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/svc_codegen.hpp b/libraries/libvapours/include/vapours/svc/svc_codegen.hpp new file mode 100644 index 000000000..4095a22a3 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/svc_codegen.hpp @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +/* NOTE: This header must not be included by svc.hpp. */ +#include "svc_common.hpp" +#include "svc_types.hpp" +#include "svc_definitions.hpp" + +#include "codegen/svc_codegen_kernel_svc_wrapper.hpp" diff --git a/libraries/libvapours/include/vapours/svc/svc_common.hpp b/libraries/libvapours/include/vapours/svc/svc_common.hpp index f60170667..c32d3fdd8 100644 --- a/libraries/libvapours/include/vapours/svc/svc_common.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_common.hpp @@ -15,7 +15,7 @@ */ #pragma once -#include "../results.hpp" +#include namespace ams::svc { @@ -28,6 +28,33 @@ namespace ams::svc { #error "Unknown target for svc::Handle" #endif + static constexpr size_t MaxWaitSynchronizationHandleCount = 0x40; + + enum PseudoHandle : Handle { + CurrentThread = 0xFFFF8000, + CurrentProcess = 0xFFFF8001, + }; + + constexpr ALWAYS_INLINE bool operator==(const Handle &lhs, const PseudoHandle &rhs) { + return static_cast(lhs) == static_cast(rhs); + } + + constexpr ALWAYS_INLINE bool operator==(const PseudoHandle &lhs, const Handle &rhs) { + return static_cast(lhs) == static_cast(rhs); + } + + constexpr ALWAYS_INLINE bool operator!=(const Handle &lhs, const PseudoHandle &rhs) { + return !(lhs == rhs); + } + + constexpr ALWAYS_INLINE bool operator!=(const PseudoHandle &lhs, const Handle &rhs) { + return !(lhs == rhs); + } + + constexpr ALWAYS_INLINE bool IsPseudoHandle(const Handle &handle) { + return handle == PseudoHandle::CurrentProcess || handle == PseudoHandle::CurrentThread; + } + #ifdef ATMOSPHERE_ARCH_ARM64 diff --git a/libraries/libvapours/include/vapours/svc/svc_definitions.hpp b/libraries/libvapours/include/vapours/svc/svc_definitions.hpp index ea0062e83..fa305f536 100644 --- a/libraries/libvapours/include/vapours/svc/svc_definitions.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_definitions.hpp @@ -20,13 +20,13 @@ #define AMS_SVC_KERN_INPUT_HANDLER(TYPE, NAME) TYPE NAME #define AMS_SVC_KERN_OUTPUT_HANDLER(TYPE, NAME) TYPE *NAME -#define AMS_SVC_KERN_INPTR_HANDLER(TYPE, NAME) ::ams::kern::KUserPointer NAME -#define AMS_SVC_KERN_OUTPTR_HANDLER(TYPE, NAME) ::ams::kern::KUserPointer NAME +#define AMS_SVC_KERN_INPTR_HANDLER(TYPE, NAME) ::ams::kern::svc::KUserPointer NAME +#define AMS_SVC_KERN_OUTPTR_HANDLER(TYPE, NAME) ::ams::kern::svc::KUserPointer NAME #define AMS_SVC_USER_INPUT_HANDLER(TYPE, NAME) TYPE NAME #define AMS_SVC_USER_OUTPUT_HANDLER(TYPE, NAME) TYPE *NAME -#define AMS_SVC_USER_INPTR_HANDLER(TYPE, NAME) const TYPE *NAME -#define AMS_SVC_USER_OUTPTR_HANDLER(TYPE, NAME) TYPE *NAME +#define AMS_SVC_USER_INPTR_HANDLER(TYPE, NAME) ::ams::svc::UserPointer NAME +#define AMS_SVC_USER_OUTPTR_HANDLER(TYPE, NAME) ::ams::svc::UserPointer NAME #define AMS_SVC_FOREACH_DEFINITION_IMPL(HANDLER, NAMESPACE, INPUT, OUTPUT, INPTR, OUTPTR) \ HANDLER(0x01, Result, SetHeapSize, OUTPUT(::ams::svc::Address, out_address), INPUT(::ams::svc::Size, size)) \ @@ -181,5 +181,49 @@ namespace ams::svc { } +/* NOTE: Change this to 1 to test the SVC definitions for user-pointer validity. */ +#if 0 +namespace ams::svc::test { + + namespace impl { + + template + struct Validator { + private: + std::array valid; + public: + constexpr Validator(Ts... args) : valid{static_cast(args)...} { /* ... */ } + + constexpr bool IsValid() const { + for (size_t i = 0; i < sizeof...(Ts); i++) { + if (!this->valid[i]) { + return false; + } + } + return true; + } + }; + + } + + + #define AMS_SVC_TEST_EMPTY_HANDLER(TYPE, NAME) true + #define AMS_SVC_TEST_INPTR_HANDLER(TYPE, NAME) (sizeof(::ams::svc::UserPointer) == sizeof(uintptr_t) && std::is_trivially_destructible<::ams::svc::UserPointer>::value) + #define AMS_SVC_TEST_OUTPTR_HANDLER(TYPE, NAME) (sizeof(::ams::svc::UserPointer) == sizeof(uintptr_t) && std::is_trivially_destructible<::ams::svc::UserPointer>::value) + + #define AMS_SVC_TEST_VERIFY_USER_POINTERS(ID, RETURN_TYPE, NAME, ...) \ + static_assert(impl::Validator(__VA_ARGS__).IsValid(), "Invalid User Pointer in svc::" #NAME); + + AMS_SVC_FOREACH_DEFINITION_IMPL(AMS_SVC_TEST_VERIFY_USER_POINTERS, lp64, AMS_SVC_TEST_EMPTY_HANDLER, AMS_SVC_TEST_EMPTY_HANDLER, AMS_SVC_TEST_INPTR_HANDLER, AMS_SVC_TEST_OUTPTR_HANDLER); + AMS_SVC_FOREACH_DEFINITION_IMPL(AMS_SVC_TEST_VERIFY_USER_POINTERS, ilp32, AMS_SVC_TEST_EMPTY_HANDLER, AMS_SVC_TEST_EMPTY_HANDLER, AMS_SVC_TEST_INPTR_HANDLER, AMS_SVC_TEST_OUTPTR_HANDLER); + + #undef AMS_SVC_TEST_VERIFY_USER_POINTERS + #undef AMS_SVC_TEST_INPTR_HANDLER + #undef AMS_SVC_TEST_OUTPTR_HANDLER + #undef AMS_SVC_TEST_EMPTY_HANDLER + +} #endif +#endif /* ATMOSPHERE_IS_STRATOSPHERE */ + diff --git a/libraries/libvapours/include/vapours/svc/svc_select_device_name.hpp b/libraries/libvapours/include/vapours/svc/svc_select_device_name.hpp new file mode 100644 index 000000000..31e91b351 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/svc_select_device_name.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include "svc_common.hpp" + +#if defined(ATMOSPHERE_BOARD_NINTENDO_NX) + + #include "board/nintendo/nx/svc_device_name.hpp" + namespace ams::svc { + using namespace ams::svc::board::nintendo::nx; + } + +#else + + #error "Unknown board for svc::DeviceName" + +#endif diff --git a/libraries/libvapours/include/vapours/svc/svc_select_hardware_constants.hpp b/libraries/libvapours/include/vapours/svc/svc_select_hardware_constants.hpp new file mode 100644 index 000000000..5ab60ee5d --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/svc_select_hardware_constants.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include "svc_common.hpp" + +#if defined(ATMOSPHERE_BOARD_NINTENDO_NX) + + #include "board/nintendo/nx/svc_hardware_constants.hpp" + namespace ams::svc { + using namespace ams::svc::board::nintendo::nx; + } + +#else + + #error "Unknown board for svc::DeviceName" + +#endif diff --git a/libraries/libvapours/include/vapours/svc/svc_tick.hpp b/libraries/libvapours/include/vapours/svc/svc_tick.hpp new file mode 100644 index 000000000..bc393cffa --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/svc_tick.hpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include "svc_common.hpp" +#include "svc_select_hardware_constants.hpp" + +namespace ams::svc { + + class Tick { + public: + static constexpr s64 TicksPerSecond = ::ams::svc::TicksPerSecond; + static constexpr s64 GetTicksPerSecond() { return TicksPerSecond; } + private: + s64 tick; + private: + static constexpr s64 NanoSecondsPerSecond = INT64_C(1'000'000'000); + + static constexpr void DivNs(s64 &out, const s64 value) { + out = value / NanoSecondsPerSecond; + } + + static constexpr void DivModNs(s64 &out_div, s64 &out_mod, const s64 value) { + out_div = value / NanoSecondsPerSecond; + out_mod = value % NanoSecondsPerSecond; + } + + static constexpr s64 ConvertTimeSpanToTickImpl(TimeSpan ts) { + /* Split up timespan and ticks-per-second by ns. */ + s64 ts_div = 0, ts_mod = 0; + s64 tick_div = 0, tick_mod = 0; + DivModNs(ts_div, ts_mod, ts.GetNanoSeconds()); + DivModNs(tick_div, tick_mod, TicksPerSecond); + + /* Convert the timespan into a tick count. */ + s64 value = 0; + DivNs(value, ts_mod * tick_mod + NanoSecondsPerSecond - 1); + + return (ts_div * tick_div) * NanoSecondsPerSecond + ts_div * tick_mod + ts_mod * tick_div + value; + } + public: + constexpr explicit Tick(s64 t = 0) : tick(t) { /* ... */ } + constexpr Tick(TimeSpan ts) : tick(ConvertTimeSpanToTickImpl(ts)) { /* ... */ } + + constexpr operator s64() const { return this->tick; } + + /* Tick arithmetic. */ + constexpr Tick &operator+=(Tick rhs) { this->tick += rhs.tick; return *this; } + constexpr Tick &operator-=(Tick rhs) { this->tick -= rhs.tick; return *this; } + constexpr Tick operator+(Tick rhs) const { Tick r(*this); return r += rhs; } + constexpr Tick operator-(Tick rhs) const { Tick r(*this); return r -= rhs; } + + constexpr Tick &operator+=(TimeSpan rhs) { this->tick += Tick(rhs).tick; return *this; } + constexpr Tick &operator-=(TimeSpan rhs) { this->tick -= Tick(rhs).tick; return *this; } + constexpr Tick operator+(TimeSpan rhs) const { Tick r(*this); return r += rhs; } + constexpr Tick operator-(TimeSpan rhs) const { Tick r(*this); return r -= rhs; } + }; + +} diff --git a/libraries/libvapours/include/vapours/svc/svc_types.hpp b/libraries/libvapours/include/vapours/svc/svc_types.hpp index 70ebb09a3..078172c16 100644 --- a/libraries/libvapours/include/vapours/svc/svc_types.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_types.hpp @@ -16,6 +16,7 @@ #pragma once #include "svc_common.hpp" +#include "svc_tick.hpp" #include "svc_types_common.hpp" #include "svc_types_base.hpp" #include "svc_types_dd.hpp" diff --git a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp index 72f5b13df..e10c313de 100644 --- a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp @@ -17,6 +17,12 @@ #pragma once #include "svc_common.hpp" +namespace ams::kern::svc::impl { + + struct KUserPointerTag{}; + +} + namespace ams::svc { /* Utility classes required to encode information into the type system for SVC veneers. */ @@ -40,6 +46,25 @@ namespace ams::svc { static_assert(sizeof(Address) == sizeof(uintptr_t)); static_assert(std::is_trivially_destructible
::value); + namespace impl { + + struct UserPointerTag{}; + + } + + template + struct UserPointer : impl::UserPointerTag { + public: + static_assert(std::is_pointer::value); + static constexpr bool IsInput = std::is_const::type>::value; + private: + T pointer; + }; + + template + static constexpr inline bool IsUserPointer = std::is_base_of::value; + + using ProgramId = u64; using PhysicalAddress = u64; /* Memory types. */ @@ -172,9 +197,9 @@ namespace ams::svc { /* Synchronization types. */ enum SignalType : u32 { - SignalType_Signal = 0, - SignalType_SignalAndIfEqual = 1, - SignalType_SignalAndModifyBasedOnWaitingThreadCountIfEqual = 2, + SignalType_Signal = 0, + SignalType_SignalAndIncrementIfEqual = 1, + SignalType_SignalAndModifyByWaitingCountIfEqual = 2, }; enum ArbitrationType : u32 { @@ -251,6 +276,11 @@ namespace ams::svc { ThreadActivity_Paused = 1, }; + constexpr size_t ThreadLocalRegionSize = 0x200; + + constexpr s32 LowestThreadPriority = 63; + constexpr s32 HighestThreadPriority = 0; + /* Process types. */ enum ProcessInfoType : u32 { ProcessInfoType_ProcessState = 0, @@ -262,9 +292,9 @@ namespace ams::svc { ProcessState_Running = 2, ProcessState_Crashed = 3, ProcessState_RunningAttached = 4, - ProcessState_Exiting = 5, - ProcessState_Exited = 6, - ProcessState_DebugSuspended = 7, + ProcessState_Terminating = 5, + ProcessState_Terminated = 6, + ProcessState_DebugBreak = 7, }; enum ProcessExitReason : u32 { diff --git a/libraries/libvapours/include/vapours/svc/svc_types_dd.hpp b/libraries/libvapours/include/vapours/svc/svc_types_dd.hpp index ece84ef6e..2a2529496 100644 --- a/libraries/libvapours/include/vapours/svc/svc_types_dd.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_types_dd.hpp @@ -15,16 +15,8 @@ */ #pragma once #include "svc_types_common.hpp" - -#ifdef ATMOSPHERE_BOARD_NINTENDO_SWITCH - - #include "board/nintendo/switch/svc_device_name.hpp" - -#else - - #error "Unknown board for svc::DeviceName" - -#endif +#include "svc_select_hardware_constants.hpp" +#include "svc_select_device_name.hpp" namespace ams::svc { diff --git a/libraries/libvapours/include/vapours/timespan.hpp b/libraries/libvapours/include/vapours/timespan.hpp new file mode 100644 index 000000000..b269e5d4b --- /dev/null +++ b/libraries/libvapours/include/vapours/timespan.hpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include "defines.hpp" +#include + +namespace ams { + + class TimeSpan { + private: + s64 ns; + private: + constexpr explicit ALWAYS_INLINE TimeSpan(s64 v) : ns(v) { /* ... */ } + public: + constexpr explicit ALWAYS_INLINE TimeSpan() : TimeSpan(0) { /* ... */ } + + static constexpr ALWAYS_INLINE TimeSpan FromNanoSeconds(s64 ns) { return TimeSpan(ns); } + static constexpr ALWAYS_INLINE TimeSpan FromMicroSeconds(s64 ms) { return FromNanoSeconds(ms * INT64_C(1000)); } + static constexpr ALWAYS_INLINE TimeSpan FromMilliSeconds(s64 ms) { return FromMicroSeconds(ms * INT64_C(1000)); } + static constexpr ALWAYS_INLINE TimeSpan FromSeconds(s64 s) { return FromMilliSeconds(s * INT64_C(1000)); } + static constexpr ALWAYS_INLINE TimeSpan FromMinutes(s64 m) { return FromSeconds(m * INT64_C(60)); } + static constexpr ALWAYS_INLINE TimeSpan FromHours(s64 h) { return FromMinutes(h * INT64_C(60)); } + static constexpr ALWAYS_INLINE TimeSpan FromDays(s64 d) { return FromMinutes(d * INT64_C(24)); } + + template + constexpr explicit ALWAYS_INLINE TimeSpan(const std::chrono::duration& c) : TimeSpan(static_cast(c).count()) { /* ... */ } + public: + constexpr ALWAYS_INLINE s64 GetNanoSeconds() const { return this->ns; } + constexpr ALWAYS_INLINE s64 GetMicroSeconds() const { return this->GetNanoSeconds() / (INT64_C(1000)); } + constexpr ALWAYS_INLINE s64 GetMilliSeconds() const { return this->GetNanoSeconds() / (INT64_C(1000) * INT64_C(1000)); } + constexpr ALWAYS_INLINE s64 GetSeconds() const { return this->GetNanoSeconds() / (INT64_C(1000) * INT64_C(1000) * INT64_C(1000)); } + constexpr ALWAYS_INLINE s64 GetMinutes() const { return this->GetNanoSeconds() / (INT64_C(1000) * INT64_C(1000) * INT64_C(1000) * INT64_C( 60)); } + constexpr ALWAYS_INLINE s64 GetHours() const { return this->GetNanoSeconds() / (INT64_C(1000) * INT64_C(1000) * INT64_C(1000) * INT64_C( 60) * INT64_C( 60)); } + constexpr ALWAYS_INLINE s64 GetDays() const { return this->GetNanoSeconds() / (INT64_C(1000) * INT64_C(1000) * INT64_C(1000) * INT64_C( 60) * INT64_C( 60) * INT64_C( 24)); } + + constexpr ALWAYS_INLINE bool operator==(const TimeSpan &rhs) const { return this->ns == rhs.ns; } + constexpr ALWAYS_INLINE bool operator!=(const TimeSpan &rhs) const { return this->ns != rhs.ns; } + constexpr ALWAYS_INLINE bool operator<=(const TimeSpan &rhs) const { return this->ns <= rhs.ns; } + constexpr ALWAYS_INLINE bool operator>=(const TimeSpan &rhs) const { return this->ns >= rhs.ns; } + constexpr ALWAYS_INLINE bool operator< (const TimeSpan &rhs) const { return this->ns < rhs.ns; } + constexpr ALWAYS_INLINE bool operator> (const TimeSpan &rhs) const { return this->ns > rhs.ns; } + + constexpr ALWAYS_INLINE TimeSpan &operator+=(TimeSpan rhs) { this->ns += rhs.ns; return *this; } + constexpr ALWAYS_INLINE TimeSpan &operator-=(TimeSpan rhs) { this->ns -= rhs.ns; return *this; } + constexpr ALWAYS_INLINE TimeSpan operator+(TimeSpan rhs) const { TimeSpan r(*this); return r += rhs; } + constexpr ALWAYS_INLINE TimeSpan operator-(TimeSpan rhs) const { TimeSpan r(*this); return r -= rhs; } + }; + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/util.hpp b/libraries/libvapours/include/vapours/util.hpp index 532abd3c5..2f1035ecc 100644 --- a/libraries/libvapours/include/vapours/util.hpp +++ b/libraries/libvapours/include/vapours/util.hpp @@ -21,8 +21,11 @@ #include "util/util_size.hpp" #include "util/util_fourcc.hpp" #include "util/util_bitpack.hpp" +#include "util/util_bitset.hpp" #include "util/util_scope_guard.hpp" +#include "util/util_specialization_of.hpp" #include "util/util_typed_storage.hpp" #include "util/util_intrusive_list.hpp" #include "util/util_intrusive_red_black_tree.hpp" -#include "util/util_bitutil.hpp" +#include "util/util_tinymt.hpp" +#include "util/util_bitutil.hpp" \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/util/util_bitset.hpp b/libraries/libvapours/include/vapours/util/util_bitset.hpp new file mode 100644 index 000000000..e3f9ae732 --- /dev/null +++ b/libraries/libvapours/include/vapours/util/util_bitset.hpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include "../defines.hpp" + +namespace ams::util { + + namespace impl { + + template + class BitSet { + private: + static_assert(std::is_integral::value); + static_assert(std::is_unsigned::value); + static_assert(sizeof(Storage) <= sizeof(u64)); + + static constexpr size_t FlagsPerWord = BITSIZEOF(Storage); + static constexpr size_t NumWords = util::AlignUp(N, FlagsPerWord) / FlagsPerWord; + + static constexpr ALWAYS_INLINE auto CountLeadingZeroImpl(Storage word) { + return __builtin_clzll(static_cast(word)) - (BITSIZEOF(unsigned long long) - FlagsPerWord); + } + + static constexpr ALWAYS_INLINE Storage GetBitMask(size_t bit) { + return Storage(1) << (FlagsPerWord - 1 - bit); + } + private: + Storage words[NumWords]; + public: + constexpr ALWAYS_INLINE BitSet() : words() { /* ... */ } + + constexpr ALWAYS_INLINE void SetBit(size_t i) { + this->words[i / FlagsPerWord] |= GetBitMask(i % FlagsPerWord); + } + + constexpr ALWAYS_INLINE void ClearBit(size_t i) { + this->words[i / FlagsPerWord] &= ~GetBitMask(i % FlagsPerWord); + } + + constexpr ALWAYS_INLINE size_t CountLeadingZero() const { + for (size_t i = 0; i < NumWords; i++) { + if (this->words[i]) { + return FlagsPerWord * i + CountLeadingZeroImpl(this->words[i]); + } + } + return FlagsPerWord * NumWords; + } + + constexpr ALWAYS_INLINE size_t GetNextSet(size_t n) const { + for (size_t i = (n + 1) / FlagsPerWord; i < NumWords; i++) { + Storage word = this->words[i]; + if (!util::IsAligned(n + 1, FlagsPerWord)) { + word &= GetBitMask(n % FlagsPerWord) - 1; + } + if (word) { + return FlagsPerWord * i + CountLeadingZeroImpl(word); + } + } + return FlagsPerWord * NumWords; + } + }; + + } + + template + using BitSet8 = impl::BitSet; + + template + using BitSet16 = impl::BitSet; + + template + using BitSet32 = impl::BitSet; + + template + using BitSet64 = impl::BitSet; + +} diff --git a/libraries/libvapours/include/vapours/util/util_intrusive_list.hpp b/libraries/libvapours/include/vapours/util/util_intrusive_list.hpp index 0c1cf1468..771aa3843 100644 --- a/libraries/libvapours/include/vapours/util/util_intrusive_list.hpp +++ b/libraries/libvapours/include/vapours/util/util_intrusive_list.hpp @@ -183,7 +183,7 @@ namespace ams::util { } }; public: - IntrusiveListImpl() : root_node() { /* ... */ } + constexpr IntrusiveListImpl() : root_node() { /* ... */ } /* Iterator accessors. */ iterator begin() { @@ -405,7 +405,7 @@ namespace ams::util { return Traits::GetParent(node); } public: - IntrusiveList() : impl() { /* ... */ } + constexpr IntrusiveList() : impl() { /* ... */ } /* Iterator accessors. */ iterator begin() { @@ -566,6 +566,38 @@ namespace ams::util { static_assert(std::addressof(GetParent(GetNode(GetReference(DerivedStorage)))) == GetPointer(DerivedStorage)); }; + template> + class IntrusiveListMemberTraitsDeferredAssert; + + template + class IntrusiveListMemberTraitsDeferredAssert { + public: + using ListType = IntrusiveList; + + static constexpr bool IsValid() { + TYPED_STORAGE(Derived) DerivedStorage = {}; + return std::addressof(GetParent(GetNode(GetReference(DerivedStorage)))) == GetPointer(DerivedStorage); + } + private: + friend class IntrusiveList; + + static constexpr IntrusiveListNode &GetNode(Derived &parent) { + return parent.*Member; + } + + static constexpr IntrusiveListNode const &GetNode(Derived const &parent) { + return parent.*Member; + } + + static constexpr Derived &GetParent(IntrusiveListNode &node) { + return util::GetParentReference(&node); + } + + static constexpr Derived const &GetParent(IntrusiveListNode const &node) { + return util::GetParentReference(&node); + } + }; + template class IntrusiveListBaseNode : public IntrusiveListNode{}; diff --git a/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp b/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp index 538dd5ee5..8d63f1982 100644 --- a/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp +++ b/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp @@ -125,7 +125,7 @@ namespace ams::util { } static constexpr inline IntrusiveRedBlackTreeNode *GetPrev(IntrusiveRedBlackTreeNode *node) { - return RB_NEXT(IntrusiveRedBlackTreeRoot, nullptr, node); + return RB_PREV(IntrusiveRedBlackTreeRoot, nullptr, node); } static constexpr inline IntrusiveRedBlackTreeNode const *GetPrev(IntrusiveRedBlackTreeNode const *node) { @@ -133,7 +133,7 @@ namespace ams::util { } /* Define accessors using RB_* functions. */ - void InitializeImpl() { + constexpr ALWAYS_INLINE void InitializeImpl() { RB_INIT(&this->root); } @@ -146,7 +146,7 @@ namespace ams::util { } IntrusiveRedBlackTreeNode *GetMaxImpl() const { - return RB_MIN(IntrusiveRedBlackTreeRoot, const_cast(&this->root)); + return RB_MAX(IntrusiveRedBlackTreeRoot, const_cast(&this->root)); } IntrusiveRedBlackTreeNode *InsertImpl(IntrusiveRedBlackTreeNode *node) { @@ -166,7 +166,7 @@ namespace ams::util { } public: - IntrusiveRedBlackTree() { + constexpr ALWAYS_INLINE IntrusiveRedBlackTree() : root() { this->InitializeImpl(); } @@ -187,6 +187,14 @@ namespace ams::util { return const_iterator(Traits::GetParent(static_cast(nullptr))); } + const_iterator cbegin() const { + return this->begin(); + } + + const_iterator cend() const { + return this->end(); + } + iterator iterator_to(reference ref) { return iterator(&ref); } @@ -201,19 +209,19 @@ namespace ams::util { } reference back() { - return Traits::GetParent(this->GetMaxImpl()); + return *Traits::GetParent(this->GetMaxImpl()); } const_reference back() const { - return Traits::GetParent(this->GetMaxImpl()); + return *Traits::GetParent(this->GetMaxImpl()); } reference front() { - return Traits::GetParent(this->GetMinImpl()); + return *Traits::GetParent(this->GetMinImpl()); } const_reference front() const { - return Traits::GetParent(this->GetMinImpl()); + return *Traits::GetParent(this->GetMinImpl()); } iterator insert(reference ref) { @@ -244,7 +252,7 @@ namespace ams::util { class IntrusiveRedBlackTreeMemberTraits { public: template - using ListType = IntrusiveRedBlackTree; + using TreeType = IntrusiveRedBlackTree; private: template friend class IntrusiveRedBlackTree; @@ -266,7 +274,7 @@ namespace ams::util { } private: static constexpr TYPED_STORAGE(Derived) DerivedStorage = {}; - static_assert(std::addressof(GetParent(GetNode(GetPointer(DerivedStorage)))) == GetPointer(DerivedStorage)); + static_assert(GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage)); }; template @@ -276,7 +284,7 @@ namespace ams::util { class IntrusiveRedBlackTreeBaseTraits { public: template - using ListType = IntrusiveRedBlackTree; + using TreeType = IntrusiveRedBlackTree; private: template friend class IntrusiveRedBlackTree; diff --git a/libraries/libvapours/include/vapours/util/util_parent_of_member.hpp b/libraries/libvapours/include/vapours/util/util_parent_of_member.hpp index 7f7445d30..66f9b1b99 100644 --- a/libraries/libvapours/include/vapours/util/util_parent_of_member.hpp +++ b/libraries/libvapours/include/vapours/util/util_parent_of_member.hpp @@ -26,13 +26,15 @@ namespace ams::util { struct OffsetOfUnionHolder { template union UnionImpl { - using PaddingMember = std::array; + using PaddingMember = char; static constexpr size_t GetOffset() { return Offset; } + #pragma pack(push, 1) struct { PaddingMember padding[Offset]; MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1]; } data; + #pragma pack(pop) UnionImpl next_union; }; @@ -47,12 +49,12 @@ namespace ams::util { }; template - union UnionImpl { /* Empty */ }; + union UnionImpl { /* Empty ... */ }; }; template struct OffsetOfCalculator { - using UnionHolder = typename OffsetOfUnionHolder::template UnionImpl; + using UnionHolder = typename OffsetOfUnionHolder::template UnionImpl; union Union { char c; UnionHolder first_union; @@ -82,14 +84,14 @@ namespace ams::util { const auto next = GetNextAddress(start, target); if (next != target) { - if constexpr (Offset < sizeof(MemberType) / alignof(MemberType)) { + if constexpr (Offset < sizeof(MemberType) - 1) { return OffsetOfImpl(member, cur_union.next_union); } else { - std::abort(); + __builtin_unreachable(); } } - return (next - start) * sizeof(MemberType) + Offset * alignof(MemberType); + return (next - start) * sizeof(MemberType) + Offset; } diff --git a/libraries/libvapours/include/vapours/util/util_specialization_of.hpp b/libraries/libvapours/include/vapours/util/util_specialization_of.hpp new file mode 100644 index 000000000..ce3454def --- /dev/null +++ b/libraries/libvapours/include/vapours/util/util_specialization_of.hpp @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include "../defines.hpp" + +namespace ams::util { + + template class Template> + struct is_specialization_of : std::false_type{}; + + template