diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 74a22e6b0..28c48beb5 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -86,6 +86,7 @@ add_library(citra_common STATIC file_util.cpp file_util.h hash.h + intrusive_list.h linear_disk_cache.h literals.h logging/backend.cpp @@ -107,8 +108,11 @@ add_library(citra_common STATIC microprofile.h microprofileui.h misc.cpp + page_table.cpp + page_table.h param_package.cpp param_package.h + parent_of_member.h polyfill_thread.h precompiled_headers.h quaternion.h diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h index ff7452f6c..296e0d522 100644 --- a/src/common/common_funcs.h +++ b/src/common/common_funcs.h @@ -110,6 +110,14 @@ __declspec(dllimport) void __stdcall DebugBreak(void); return static_cast(key) == 0; \ } +#define CITRA_NON_COPYABLE(cls) \ + cls(const cls&) = delete; \ + cls& operator=(const cls&) = delete + +#define CITRA_NON_MOVEABLE(cls) \ + cls(cls&&) = delete; \ + cls& operator=(cls&&) = delete + // Generic function to get last error message. // Call directly after the command or use the error num. // This function might change the error code. diff --git a/src/common/intrusive_list.h b/src/common/intrusive_list.h new file mode 100644 index 000000000..edc9a9853 --- /dev/null +++ b/src/common/intrusive_list.h @@ -0,0 +1,631 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/common_funcs.h" +#include "common/parent_of_member.h" + +namespace Common { + +// Forward declare implementation class for Node. +namespace impl { + +class IntrusiveListImpl; + +} + +class IntrusiveListNode { + CITRA_NON_COPYABLE(IntrusiveListNode); + +private: + friend class impl::IntrusiveListImpl; + + IntrusiveListNode* m_prev; + IntrusiveListNode* m_next; + +public: + constexpr IntrusiveListNode() : m_prev(this), m_next(this) {} + + constexpr bool IsLinked() const { + return m_next != this; + } + +private: + constexpr void LinkPrev(IntrusiveListNode* node) { + // We can't link an already linked node. + ASSERT(!node->IsLinked()); + this->SplicePrev(node, node); + } + + constexpr void SplicePrev(IntrusiveListNode* first, IntrusiveListNode* last) { + // Splice a range into the list. + auto last_prev = last->m_prev; + first->m_prev = m_prev; + last_prev->m_next = this; + m_prev->m_next = first; + m_prev = last_prev; + } + + constexpr void LinkNext(IntrusiveListNode* node) { + // We can't link an already linked node. + ASSERT(!node->IsLinked()); + return this->SpliceNext(node, node); + } + + constexpr void SpliceNext(IntrusiveListNode* first, IntrusiveListNode* last) { + // Splice a range into the list. + auto last_prev = last->m_prev; + first->m_prev = this; + last_prev->m_next = m_next; + m_next->m_prev = last_prev; + m_next = first; + } + + constexpr void Unlink() { + this->Unlink(m_next); + } + + constexpr void Unlink(IntrusiveListNode* last) { + // Unlink a node from a next node. + auto last_prev = last->m_prev; + m_prev->m_next = last; + last->m_prev = m_prev; + last_prev->m_next = this; + m_prev = last_prev; + } + + constexpr IntrusiveListNode* GetPrev() { + return m_prev; + } + + constexpr const IntrusiveListNode* GetPrev() const { + return m_prev; + } + + constexpr IntrusiveListNode* GetNext() { + return m_next; + } + + constexpr const IntrusiveListNode* GetNext() const { + return m_next; + } +}; +// DEPRECATED: static_assert(std::is_literal_type::value); + +namespace impl { + +class IntrusiveListImpl { + CITRA_NON_COPYABLE(IntrusiveListImpl); + +private: + IntrusiveListNode m_root_node; + +public: + template + class Iterator; + + using value_type = IntrusiveListNode; + using size_type = size_t; + using difference_type = ptrdiff_t; + using pointer = value_type*; + using const_pointer = const value_type*; + using reference = value_type&; + using const_reference = const value_type&; + using iterator = Iterator; + using const_iterator = Iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + template + class Iterator { + public: + using iterator_category = std::bidirectional_iterator_tag; + using value_type = typename IntrusiveListImpl::value_type; + using difference_type = typename IntrusiveListImpl::difference_type; + using pointer = + std::conditional_t; + using reference = std::conditional_t; + + private: + pointer m_node; + + public: + constexpr explicit Iterator(pointer n) : m_node(n) {} + + constexpr bool operator==(const Iterator& rhs) const { + return m_node == rhs.m_node; + } + + constexpr pointer operator->() const { + return m_node; + } + + constexpr reference operator*() const { + return *m_node; + } + + constexpr Iterator& operator++() { + m_node = m_node->m_next; + return *this; + } + + constexpr Iterator& operator--() { + m_node = m_node->m_prev; + return *this; + } + + constexpr Iterator operator++(int) { + const Iterator it{*this}; + ++(*this); + return it; + } + + constexpr Iterator operator--(int) { + const Iterator it{*this}; + --(*this); + return it; + } + + constexpr operator Iterator() const { + return Iterator(m_node); + } + + constexpr Iterator GetNonConstIterator() const { + return Iterator(const_cast(m_node)); + } + }; + +public: + constexpr IntrusiveListImpl() : m_root_node() {} + + // Iterator accessors. + constexpr iterator begin() { + return iterator(m_root_node.GetNext()); + } + + constexpr const_iterator begin() const { + return const_iterator(m_root_node.GetNext()); + } + + constexpr iterator end() { + return iterator(std::addressof(m_root_node)); + } + + constexpr const_iterator end() const { + return const_iterator(std::addressof(m_root_node)); + } + + constexpr iterator iterator_to(reference v) { + // Only allow iterator_to for values in lists. + ASSERT(v.IsLinked()); + return iterator(std::addressof(v)); + } + + constexpr const_iterator iterator_to(const_reference v) const { + // Only allow iterator_to for values in lists. + ASSERT(v.IsLinked()); + return const_iterator(std::addressof(v)); + } + + // Content management. + constexpr bool empty() const { + return !m_root_node.IsLinked(); + } + + constexpr size_type size() const { + return static_cast(std::distance(this->begin(), this->end())); + } + + constexpr reference back() { + return *m_root_node.GetPrev(); + } + + constexpr const_reference back() const { + return *m_root_node.GetPrev(); + } + + constexpr reference front() { + return *m_root_node.GetNext(); + } + + constexpr const_reference front() const { + return *m_root_node.GetNext(); + } + + constexpr void push_back(reference node) { + m_root_node.LinkPrev(std::addressof(node)); + } + + constexpr void push_front(reference node) { + m_root_node.LinkNext(std::addressof(node)); + } + + constexpr void pop_back() { + m_root_node.GetPrev()->Unlink(); + } + + constexpr void pop_front() { + m_root_node.GetNext()->Unlink(); + } + + constexpr iterator insert(const_iterator pos, reference node) { + pos.GetNonConstIterator()->LinkPrev(std::addressof(node)); + return iterator(std::addressof(node)); + } + + constexpr void splice(const_iterator pos, IntrusiveListImpl& o) { + splice_impl(pos, o.begin(), o.end()); + } + + constexpr void splice(const_iterator pos, IntrusiveListImpl&, const_iterator first) { + const_iterator last(first); + std::advance(last, 1); + splice_impl(pos, first, last); + } + + constexpr void splice(const_iterator pos, IntrusiveListImpl&, const_iterator first, + const_iterator last) { + splice_impl(pos, first, last); + } + + constexpr iterator erase(const_iterator pos) { + if (pos == this->end()) { + return this->end(); + } + iterator it(pos.GetNonConstIterator()); + (it++)->Unlink(); + return it; + } + + constexpr void clear() { + while (!this->empty()) { + this->pop_front(); + } + } + +private: + constexpr void splice_impl(const_iterator _pos, const_iterator _first, const_iterator _last) { + if (_first == _last) { + return; + } + iterator pos(_pos.GetNonConstIterator()); + iterator first(_first.GetNonConstIterator()); + iterator last(_last.GetNonConstIterator()); + first->Unlink(std::addressof(*last)); + pos->SplicePrev(std::addressof(*first), std::addressof(*first)); + } +}; + +} // namespace impl + +template +class IntrusiveList { + CITRA_NON_COPYABLE(IntrusiveList); + +private: + impl::IntrusiveListImpl m_impl; + +public: + template + class Iterator; + + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + using pointer = value_type*; + using const_pointer = const value_type*; + using reference = value_type&; + using const_reference = const value_type&; + using iterator = Iterator; + using const_iterator = Iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + template + class Iterator { + public: + friend class Common::IntrusiveList; + + using ImplIterator = + std::conditional_t; + + using iterator_category = std::bidirectional_iterator_tag; + using value_type = typename IntrusiveList::value_type; + using difference_type = typename IntrusiveList::difference_type; + using pointer = + std::conditional_t; + using reference = + std::conditional_t; + + private: + ImplIterator m_iterator; + + private: + constexpr explicit Iterator(ImplIterator it) : m_iterator(it) {} + + constexpr ImplIterator GetImplIterator() const { + return m_iterator; + } + + public: + constexpr bool operator==(const Iterator& rhs) const { + return m_iterator == rhs.m_iterator; + } + + constexpr pointer operator->() const { + return std::addressof(Traits::GetParent(*m_iterator)); + } + + constexpr reference operator*() const { + return Traits::GetParent(*m_iterator); + } + + constexpr Iterator& operator++() { + ++m_iterator; + return *this; + } + + constexpr Iterator& operator--() { + --m_iterator; + return *this; + } + + constexpr Iterator operator++(int) { + const Iterator it{*this}; + ++m_iterator; + return it; + } + + constexpr Iterator operator--(int) { + const Iterator it{*this}; + --m_iterator; + return it; + } + + constexpr operator Iterator() const { + return Iterator(m_iterator); + } + }; + +private: + static constexpr IntrusiveListNode& GetNode(reference ref) { + return Traits::GetNode(ref); + } + + static constexpr IntrusiveListNode const& GetNode(const_reference ref) { + return Traits::GetNode(ref); + } + + static constexpr reference GetParent(IntrusiveListNode& node) { + return Traits::GetParent(node); + } + + static constexpr const_reference GetParent(IntrusiveListNode const& node) { + return Traits::GetParent(node); + } + +public: + constexpr IntrusiveList() : m_impl() {} + + // Iterator accessors. + constexpr iterator begin() { + return iterator(m_impl.begin()); + } + + constexpr const_iterator begin() const { + return const_iterator(m_impl.begin()); + } + + constexpr iterator end() { + return iterator(m_impl.end()); + } + + constexpr const_iterator end() const { + return const_iterator(m_impl.end()); + } + + constexpr const_iterator cbegin() const { + return this->begin(); + } + + constexpr const_iterator cend() const { + return this->end(); + } + + constexpr reverse_iterator rbegin() { + return reverse_iterator(this->end()); + } + + constexpr const_reverse_iterator rbegin() const { + return const_reverse_iterator(this->end()); + } + + constexpr reverse_iterator rend() { + return reverse_iterator(this->begin()); + } + + constexpr const_reverse_iterator rend() const { + return const_reverse_iterator(this->begin()); + } + + constexpr const_reverse_iterator crbegin() const { + return this->rbegin(); + } + + constexpr const_reverse_iterator crend() const { + return this->rend(); + } + + constexpr iterator iterator_to(reference v) { + return iterator(m_impl.iterator_to(GetNode(v))); + } + + constexpr const_iterator iterator_to(const_reference v) const { + return const_iterator(m_impl.iterator_to(GetNode(v))); + } + + // Content management. + constexpr bool empty() const { + return m_impl.empty(); + } + + constexpr size_type size() const { + return m_impl.size(); + } + + constexpr reference back() { + return GetParent(m_impl.back()); + } + + constexpr const_reference back() const { + return GetParent(m_impl.back()); + } + + constexpr reference front() { + return GetParent(m_impl.front()); + } + + constexpr const_reference front() const { + return GetParent(m_impl.front()); + } + + constexpr void push_back(reference ref) { + m_impl.push_back(GetNode(ref)); + } + + constexpr void push_front(reference ref) { + m_impl.push_front(GetNode(ref)); + } + + constexpr void pop_back() { + m_impl.pop_back(); + } + + constexpr void pop_front() { + m_impl.pop_front(); + } + + constexpr iterator insert(const_iterator pos, reference ref) { + return iterator(m_impl.insert(pos.GetImplIterator(), GetNode(ref))); + } + + constexpr void splice(const_iterator pos, IntrusiveList& o) { + m_impl.splice(pos.GetImplIterator(), o.m_impl); + } + + constexpr void splice(const_iterator pos, IntrusiveList& o, const_iterator first) { + m_impl.splice(pos.GetImplIterator(), o.m_impl, first.GetImplIterator()); + } + + constexpr void splice(const_iterator pos, IntrusiveList& o, const_iterator first, + const_iterator last) { + m_impl.splice(pos.GetImplIterator(), o.m_impl, first.GetImplIterator(), + last.GetImplIterator()); + } + + constexpr iterator erase(const_iterator pos) { + return iterator(m_impl.erase(pos.GetImplIterator())); + } + + constexpr void clear() { + m_impl.clear(); + } +}; + +template > +class IntrusiveListMemberTraits; + +template +class IntrusiveListMemberTraits { +public: + using ListType = IntrusiveList; + +private: + friend class IntrusiveList; + + static constexpr IntrusiveListNode& GetNode(Derived& parent) { + return parent.*Member; + } + + static constexpr IntrusiveListNode const& GetNode(Derived const& parent) { + return parent.*Member; + } + + static Derived& GetParent(IntrusiveListNode& node) { + return Common::GetParentReference(std::addressof(node)); + } + + static Derived const& GetParent(IntrusiveListNode const& node) { + return Common::GetParentReference(std::addressof(node)); + } +}; + +template > +class IntrusiveListMemberTraitsByNonConstexprOffsetOf; + +template +class IntrusiveListMemberTraitsByNonConstexprOffsetOf { +public: + using ListType = IntrusiveList; + +private: + friend class IntrusiveList; + + static constexpr IntrusiveListNode& GetNode(Derived& parent) { + return parent.*Member; + } + + static constexpr IntrusiveListNode const& GetNode(Derived const& parent) { + return parent.*Member; + } + + static Derived& GetParent(IntrusiveListNode& node) { + return *reinterpret_cast(reinterpret_cast(std::addressof(node)) - + GetOffset()); + } + + static Derived const& GetParent(IntrusiveListNode const& node) { + return *reinterpret_cast( + reinterpret_cast(std::addressof(node)) - GetOffset()); + } + + static uintptr_t GetOffset() { + return reinterpret_cast(std::addressof(reinterpret_cast(0)->*Member)); + } +}; + +template +class IntrusiveListBaseNode : public IntrusiveListNode {}; + +template +class IntrusiveListBaseTraits { +public: + using ListType = IntrusiveList; + +private: + friend class IntrusiveList; + + static constexpr IntrusiveListNode& GetNode(Derived& parent) { + return static_cast( + static_cast&>(parent)); + } + + static constexpr IntrusiveListNode const& GetNode(Derived const& parent) { + return static_cast( + static_cast&>(parent)); + } + + static constexpr Derived& GetParent(IntrusiveListNode& node) { + return static_cast(static_cast&>(node)); + } + + static constexpr Derived const& GetParent(IntrusiveListNode const& node) { + return static_cast( + static_cast&>(node)); + } +}; + +} // namespace Common diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp new file mode 100644 index 000000000..270f6e334 --- /dev/null +++ b/src/common/page_table.cpp @@ -0,0 +1,64 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/page_table.h" + +namespace Common { + +PageTable::PageTable() = default; + +PageTable::~PageTable() noexcept = default; + +bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, VAddr address) const { + // Setup invalid defaults. + out_entry->phys_addr = 0; + out_entry->block_size = page_size; + out_context->next_page = 0; + + // Validate that we can read the actual entry. + const auto page = address / page_size; + if (page >= backing_addr.size()) { + return false; + } + + // Validate that the entry is mapped. + const auto phys_addr = backing_addr[page]; + if (phys_addr == 0) { + return false; + } + + // Populate the results. + out_entry->phys_addr = phys_addr + address; + out_context->next_page = page + 1; + out_context->next_offset = address + page_size; + + return true; +} + +bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const { + // Setup invalid defaults. + out_entry->phys_addr = 0; + out_entry->block_size = page_size; + + // Validate that we can read the actual entry. + const auto page = context->next_page; + if (page >= backing_addr.size()) { + return false; + } + + // Validate that the entry is mapped. + const auto phys_addr = backing_addr[page]; + if (phys_addr == 0) { + return false; + } + + // Populate the results. + out_entry->phys_addr = phys_addr + context->next_offset; + context->next_page = page + 1; + context->next_offset += page_size; + + return true; +} + +} // namespace Common diff --git a/src/common/page_table.h b/src/common/page_table.h new file mode 100644 index 000000000..eef81ccbf --- /dev/null +++ b/src/common/page_table.h @@ -0,0 +1,116 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Common { + +enum class PageType : u8 { + /// Page is unmapped and should cause an access error. + Unmapped, + /// Page is mapped to regular memory. This is the only type you can get pointers to. + Memory, + /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and + /// invalidation + RasterizerCachedMemory, +}; + +/** + * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely + * mimics the way a real CPU page table works. + */ +struct PageTable { + struct TraversalEntry { + u64 phys_addr{}; + std::size_t block_size{}; + }; + + struct TraversalContext { + u64 next_page{}; + u64 next_offset{}; + }; + + /// Number of bits reserved for attribute tagging. + /// This can be at most the guaranteed alignment of the pointers in the page table. + static constexpr int ATTRIBUTE_BITS = 2; + static constexpr size_t PAGE_BITS = 12; + static constexpr size_t NUM_ENTRIES = 1 << (32 - PAGE_BITS); + + /** + * Pair of host pointer and page type attribute. + * This uses the lower bits of a given pointer to store the attribute tag. + * Writing and reading the pointer attribute pair is guaranteed to be atomic for the same method + * call. In other words, they are guaranteed to be synchronized at all times. + */ + class PageInfo { + public: + /// Returns the page pointer + [[nodiscard]] uintptr_t Pointer() const noexcept { + return ExtractPointer(raw); + } + + /// Returns the page type attribute + [[nodiscard]] PageType Type() const noexcept { + return ExtractType(raw); + } + + /// Returns the page pointer and attribute pair, extracted from the same atomic read + [[nodiscard]] std::pair PointerType() const noexcept { + return {ExtractPointer(raw), ExtractType(raw)}; + } + + /// Returns the raw representation of the page information. + /// Use ExtractPointer and ExtractType to unpack the value. + [[nodiscard]] uintptr_t Raw() const noexcept { + return raw; + } + + /// Write a page pointer and type pair atomically + void Store(uintptr_t pointer, PageType type) noexcept { + raw = pointer | static_cast(type); + } + + /// Unpack a pointer from a page info raw representation + [[nodiscard]] static uintptr_t ExtractPointer(uintptr_t raw) noexcept { + return raw & (~uintptr_t{0} << ATTRIBUTE_BITS); + } + + /// Unpack a page type from a page info raw representation + [[nodiscard]] static PageType ExtractType(uintptr_t raw) noexcept { + return static_cast(raw & ((uintptr_t{1} << ATTRIBUTE_BITS) - 1)); + } + + private: + uintptr_t raw; + }; + + PageTable(); + ~PageTable() noexcept; + + PageTable(const PageTable&) = delete; + PageTable& operator=(const PageTable&) = delete; + + PageTable(PageTable&&) noexcept = default; + PageTable& operator=(PageTable&&) noexcept = default; + + bool BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, VAddr address) const; + bool ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const; + + PAddr GetPhysicalAddress(VAddr virt_addr) const { + return backing_addr[virt_addr / page_size] + virt_addr; + } + + /** + * Vector of memory pointers backing each page. An entry can only be non-null if the + * corresponding attribute element is of type `Memory`. + */ + std::array pointers; + std::array blocks; + std::array backing_addr; + std::size_t page_size{}; +}; + +} // namespace Common diff --git a/src/common/parent_of_member.h b/src/common/parent_of_member.h new file mode 100644 index 000000000..8e03f17d8 --- /dev/null +++ b/src/common/parent_of_member.h @@ -0,0 +1,190 @@ +// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include + +#include "common/assert.h" + +namespace Common { +namespace detail { +template +struct TypedStorageImpl { + alignas(Align) u8 storage_[Size]; +}; +} // namespace detail + +template +using TypedStorage = detail::TypedStorageImpl; + +template +static constexpr T* GetPointer(TypedStorage& ts) { + return static_cast(static_cast(std::addressof(ts.storage_))); +} + +template +static constexpr const T* GetPointer(const TypedStorage& ts) { + return static_cast(static_cast(std::addressof(ts.storage_))); +} + +namespace impl { + +template +struct OffsetOfUnionHolder { + template + union UnionImpl { + using PaddingMember = char; + static constexpr size_t GetOffset() { + return Offset; + } + +#pragma pack(push, 1) + struct { + PaddingMember padding[Offset]; + MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1]; + } data; +#pragma pack(pop) + UnionImpl next_union; + }; + + template + union UnionImpl { + static constexpr size_t GetOffset() { + return 0; + } + + struct { + MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1]; + } data; + UnionImpl next_union; + }; + + template + union UnionImpl {}; +}; + +template +struct OffsetOfCalculator { + using UnionHolder = + typename OffsetOfUnionHolder::template UnionImpl; + union Union { + char c{}; + UnionHolder first_union; + TypedStorage parent; + + constexpr Union() : c() {} + }; + static constexpr Union U = {}; + + static constexpr const MemberType* GetNextAddress(const MemberType* start, + const MemberType* target) { + while (start < target) { + start++; + } + return start; + } + + static constexpr std::ptrdiff_t GetDifference(const MemberType* start, + const MemberType* target) { + return (target - start) * sizeof(MemberType); + } + + template + static constexpr std::ptrdiff_t OffsetOfImpl(MemberType ParentType::*member, + CurUnion& cur_union) { + constexpr size_t Offset = CurUnion::GetOffset(); + const auto target = std::addressof(GetPointer(U.parent)->*member); + const auto start = std::addressof(cur_union.data.members[0]); + const auto next = GetNextAddress(start, target); + + if (next != target) { + if constexpr (Offset < sizeof(MemberType) - 1) { + return OffsetOfImpl(member, cur_union.next_union); + } else { + UNREACHABLE(); + } + } + + return static_cast(static_cast(next - start) * sizeof(MemberType) + + Offset); + } + + static constexpr std::ptrdiff_t OffsetOf(MemberType ParentType::*member) { + return OffsetOfImpl(member, U.first_union); + } +}; + +template +struct GetMemberPointerTraits; + +template +struct GetMemberPointerTraits { + using Parent = P; + using Member = M; +}; + +template +using GetParentType = typename GetMemberPointerTraits::Parent; + +template +using GetMemberType = typename GetMemberPointerTraits::Member; + +template > +constexpr std::ptrdiff_t OffsetOf() { + using DeducedParentType = GetParentType; + using MemberType = GetMemberType; + static_assert(std::is_base_of::value || + std::is_same::value); + + return OffsetOfCalculator::OffsetOf(MemberPtr); +}; + +} // namespace impl + +template > +constexpr RealParentType& GetParentReference(impl::GetMemberType* member) { + std::ptrdiff_t Offset = impl::OffsetOf(); + return *static_cast( + static_cast(static_cast(static_cast(member)) - Offset)); +} + +template > +constexpr RealParentType const& GetParentReference(impl::GetMemberType const* member) { + std::ptrdiff_t Offset = impl::OffsetOf(); + return *static_cast(static_cast( + static_cast(static_cast(member)) - Offset)); +} + +template > +constexpr RealParentType* GetParentPointer(impl::GetMemberType* member) { + return std::addressof(GetParentReference(member)); +} + +template > +constexpr RealParentType const* GetParentPointer(impl::GetMemberType const* member) { + return std::addressof(GetParentReference(member)); +} + +template > +constexpr RealParentType& GetParentReference(impl::GetMemberType& member) { + return GetParentReference(std::addressof(member)); +} + +template > +constexpr RealParentType const& GetParentReference(impl::GetMemberType const& member) { + return GetParentReference(std::addressof(member)); +} + +template > +constexpr RealParentType* GetParentPointer(impl::GetMemberType& member) { + return std::addressof(GetParentReference(member)); +} + +template > +constexpr RealParentType const* GetParentPointer(impl::GetMemberType const& member) { + return std::addressof(GetParentReference(member)); +} + +} // namespace Common diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 15c532c87..37917e79a 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -149,6 +149,27 @@ add_library(citra_core STATIC hle/kernel/ipc_debugger/recorder.h hle/kernel/kernel.cpp hle/kernel/kernel.h + hle/kernel/k_auto_object.cpp + hle/kernel/k_auto_object.h + hle/kernel/k_class_token.cpp + hle/kernel/k_class_token.h + hle/kernel/k_linked_list.cpp + hle/kernel/k_linked_list.h + hle/kernel/k_memory_block.cpp + hle/kernel/k_memory_block.h + hle/kernel/k_memory_block_manager.cpp + hle/kernel/k_memory_block_manager.h + hle/kernel/k_memory_manager.cpp + hle/kernel/k_memory_manager.h + hle/kernel/k_page_group.cpp + hle/kernel/k_page_group.h + hle/kernel/k_page_heap.cpp + hle/kernel/k_page_heap.h + hle/kernel/k_page_manager.cpp + hle/kernel/k_page_manager.h + hle/kernel/k_page_table.cpp + hle/kernel/k_page_table.h + hle/kernel/k_slab_heap.h hle/kernel/memory.cpp hle/kernel/memory.h hle/kernel/mutex.cpp @@ -473,6 +494,7 @@ add_library(citra_core STATIC tracer/citrace.h tracer/recorder.cpp tracer/recorder.h + hle/kernel/slab_helpers.h ) create_target_directory_groups(citra_core) diff --git a/src/core/hle/kernel/k_auto_object.cpp b/src/core/hle/kernel/k_auto_object.cpp new file mode 100644 index 000000000..9cd7a9fd5 --- /dev/null +++ b/src/core/hle/kernel/k_auto_object.cpp @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/kernel.h" + +namespace Kernel { + +KAutoObject* KAutoObject::Create(KAutoObject* obj) { + obj->m_ref_count = 1; + return obj; +} + +void KAutoObject::RegisterWithKernel() { + m_kernel.RegisterKernelObject(this); +} + +void KAutoObject::UnregisterWithKernel(KernelCore& kernel, KAutoObject* self) { + kernel.UnregisterKernelObject(self); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h new file mode 100644 index 000000000..8783ce6c9 --- /dev/null +++ b/src/core/hle/kernel/k_auto_object.h @@ -0,0 +1,268 @@ +// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include +#include + +#include "common/assert.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "core/hle/kernel/k_class_token.h" + +namespace Kernel { + +class KernelSystem; +class KProcess; + +#define KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, ATTRIBUTE) \ +private: \ + friend class ::Kernel::KClassTokenGenerator; \ + static constexpr inline auto ObjectType = ::Kernel::KClassTokenGenerator::ObjectType::CLASS; \ + static constexpr inline const char* const TypeName = #CLASS; \ + static constexpr inline ClassTokenType ClassToken() { return ::Kernel::ClassToken; } \ + \ +public: \ + CITRA_NON_COPYABLE(CLASS); \ + CITRA_NON_MOVEABLE(CLASS); \ + \ + using BaseClass = BASE_CLASS; \ + static constexpr TypeObj GetStaticTypeObj() { \ + constexpr ClassTokenType Token = ClassToken(); \ + return TypeObj(TypeName, Token); \ + } \ + static constexpr const char* GetStaticTypeName() { return TypeName; } \ + virtual TypeObj GetTypeObj() ATTRIBUTE { return GetStaticTypeObj(); } \ + virtual const char* GetTypeName() ATTRIBUTE { return GetStaticTypeName(); } \ + \ +private: \ + constexpr bool operator!=(const TypeObj& rhs) + +#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \ + KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, const override) + +class KAutoObject { +protected: + class TypeObj { + public: + constexpr explicit TypeObj(const char* n, ClassTokenType tok) + : m_name(n), m_class_token(tok) {} + + constexpr const char* GetName() const { + return m_name; + } + constexpr ClassTokenType GetClassToken() const { + return m_class_token; + } + + constexpr bool operator==(const TypeObj& rhs) const { + return this->GetClassToken() == rhs.GetClassToken(); + } + + constexpr bool operator!=(const TypeObj& rhs) const { + return this->GetClassToken() != rhs.GetClassToken(); + } + + constexpr bool IsDerivedFrom(const TypeObj& rhs) const { + return (this->GetClassToken() | rhs.GetClassToken()) == this->GetClassToken(); + } + + private: + const char* m_name; + ClassTokenType m_class_token; + }; + +private: + KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const); + +public: + explicit KAutoObject(KernelSystem& kernel) : m_kernel(kernel) { + RegisterWithKernel(); + } + virtual ~KAutoObject() = default; + + static KAutoObject* Create(KAutoObject* ptr); + + // Destroy is responsible for destroying the auto object's resources when ref_count hits zero. + virtual void Destroy() { + UNIMPLEMENTED(); + } + + // Finalize is responsible for cleaning up resource, but does not destroy the object. + virtual void Finalize() {} + + virtual KProcess* GetOwner() const { + return nullptr; + } + + u32 GetReferenceCount() const { + return m_ref_count.load(); + } + + bool IsDerivedFrom(const TypeObj& rhs) const { + return this->GetTypeObj().IsDerivedFrom(rhs); + } + + bool IsDerivedFrom(const KAutoObject& rhs) const { + return this->IsDerivedFrom(rhs.GetTypeObj()); + } + + template + Derived DynamicCast() { + static_assert(std::is_pointer_v); + using DerivedType = std::remove_pointer_t; + + if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) { + return static_cast(this); + } else { + return nullptr; + } + } + + template + const Derived DynamicCast() const { + static_assert(std::is_pointer_v); + using DerivedType = std::remove_pointer_t; + + if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) { + return static_cast(this); + } else { + return nullptr; + } + } + + bool Open() { + // Atomically increment the reference count, only if it's positive. + u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire); + do { + if (cur_ref_count == 0) { + return false; + } + ASSERT(cur_ref_count < cur_ref_count + 1); + } while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, + std::memory_order_relaxed)); + + return true; + } + + void Close() { + // Atomically decrement the reference count, not allowing it to become negative. + u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire); + do { + ASSERT(cur_ref_count > 0); + } while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, + std::memory_order_acq_rel)); + + // If ref count hits zero, destroy the object. + if (cur_ref_count - 1 == 0) { + KernelSystem& kernel = m_kernel; + this->Destroy(); + KAutoObject::UnregisterWithKernel(kernel, this); + } + } + +private: + void RegisterWithKernel(); + static void UnregisterWithKernel(KernelSystem& kernel, KAutoObject* self); + +protected: + KernelSystem& m_kernel; + +private: + std::atomic m_ref_count{}; +}; + +template +class KScopedAutoObject { +public: + CITRA_NON_COPYABLE(KScopedAutoObject); + + constexpr KScopedAutoObject() = default; + + constexpr KScopedAutoObject(T* o) : m_obj(o) { + if (m_obj != nullptr) { + m_obj->Open(); + } + } + + ~KScopedAutoObject() { + if (m_obj != nullptr) { + m_obj->Close(); + } + m_obj = nullptr; + } + + template + requires(std::derived_from || std::derived_from) + constexpr KScopedAutoObject(KScopedAutoObject&& rhs) { + if constexpr (std::derived_from) { + // Upcast. + m_obj = rhs.m_obj; + rhs.m_obj = nullptr; + } else { + // Downcast. + T* derived = nullptr; + if (rhs.m_obj != nullptr) { + derived = rhs.m_obj->template DynamicCast(); + if (derived == nullptr) { + rhs.m_obj->Close(); + } + } + + m_obj = derived; + rhs.m_obj = nullptr; + } + } + + constexpr KScopedAutoObject& operator=(KScopedAutoObject&& rhs) { + rhs.Swap(*this); + return *this; + } + + constexpr T* operator->() { + return m_obj; + } + constexpr T& operator*() { + return *m_obj; + } + + constexpr void Reset(T* o) { + KScopedAutoObject(o).Swap(*this); + } + + constexpr T* GetPointerUnsafe() { + return m_obj; + } + + constexpr T* GetPointerUnsafe() const { + return m_obj; + } + + constexpr T* ReleasePointerUnsafe() { + T* ret = m_obj; + m_obj = nullptr; + return ret; + } + + constexpr bool IsNull() const { + return m_obj == nullptr; + } + constexpr bool IsNotNull() const { + return m_obj != nullptr; + } + +private: + template + friend class KScopedAutoObject; + +private: + T* m_obj{}; + +private: + constexpr void Swap(KScopedAutoObject& rhs) noexcept { + std::swap(m_obj, rhs.m_obj); + } +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp new file mode 100644 index 000000000..a850db3c4 --- /dev/null +++ b/src/core/hle/kernel/k_class_token.cpp @@ -0,0 +1,125 @@ +// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_class_token.h" +#include "core/hle/kernel/k_client_port.h" +#include "core/hle/kernel/k_client_session.h" +#include "core/hle/kernel/k_code_memory.h" +#include "core/hle/kernel/k_event.h" +#include "core/hle/kernel/k_port.h" +#include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/k_readable_event.h" +#include "core/hle/kernel/k_resource_limit.h" +#include "core/hle/kernel/k_server_port.h" +#include "core/hle/kernel/k_server_session.h" +#include "core/hle/kernel/k_session.h" +#include "core/hle/kernel/k_shared_memory.h" +#include "core/hle/kernel/k_synchronization_object.h" +#include "core/hle/kernel/k_system_resource.h" +#include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_transfer_memory.h" + +namespace Kernel { + +// Ensure that we generate correct class tokens for all types. + +// Ensure that the absolute token values are correct. +static_assert(ClassToken == 0b00000000'00000000); +static_assert(ClassToken == 0b00000000'00000001); +static_assert(ClassToken == 0b00000000'00000011); +// static_assert(ClassToken == 0b00000111'00000011); +// static_assert(ClassToken == 0b00001011'00000001); +static_assert(ClassToken == 0b00010011'00000001); +static_assert(ClassToken == 0b00100011'00000001); +static_assert(ClassToken == 0b01000011'00000001); +static_assert(ClassToken == 0b10000011'00000001); +static_assert(ClassToken == 0b00001101'00000000); +static_assert(ClassToken == 0b00010101'00000001); +static_assert(ClassToken == 0b00100101'00000000); +// static_assert(ClassToken == 0b01000101'00000000); +static_assert(ClassToken == 0b10000101'00000000); +static_assert(ClassToken == 0b00011001'00000000); +static_assert(ClassToken == 0b00101001'00000000); +static_assert(ClassToken == 0b01001001'00000000); +// static_assert(ClassToken == 0b00110001'00000000); +// static_assert(ClassToken == 0b01010001'00000000); +static_assert(ClassToken == 0b01010001'00000000); +// static_assert(ClassToken == 0b01100001'00000000); +// static_assert(ClassToken == 0b10100001'00000000); +static_assert(ClassToken == 0b10100001'00000000); + +// Ensure that the token hierarchy is correct. + +// Base classes +static_assert(ClassToken == (0b00000000)); +static_assert(ClassToken == (0b00000001 | ClassToken)); +static_assert(ClassToken == (0b00000010 | ClassToken)); + +// Final classes +// static_assert(ClassToken == ((0b00000111 << 8) | ClassToken)); +// static_assert(ClassToken == ((0b00001011 << 8) | ClassToken)); +static_assert(ClassToken == ((0b00010011 << 8) | ClassToken)); +static_assert(ClassToken == ((0b00100011 << 8) | ClassToken)); +static_assert(ClassToken == + ((0b01000011 << 8) | ClassToken)); +static_assert(ClassToken == ((0b10000011 << 8) | ClassToken)); +static_assert(ClassToken == ((0b00001101 << 8) | ClassToken)); +static_assert(ClassToken == ((0b00010101 << 8) | ClassToken)); +static_assert(ClassToken == ((0b00100101 << 8) | ClassToken)); +// static_assert(ClassToken == ((0b01000101 << 8) | ClassToken)); +static_assert(ClassToken == ((0b10000101 << 8) | ClassToken)); +static_assert(ClassToken == ((0b00011001 << 8) | ClassToken)); +static_assert(ClassToken == ((0b00101001 << 8) | ClassToken)); +static_assert(ClassToken == ((0b01001001 << 8) | ClassToken)); +// static_assert(ClassToken == ((0b00110001 << 8) | ClassToken)); +// static_assert(ClassToken == ((0b01010001 << 8) | ClassToken)); +static_assert(ClassToken == ((0b01010001 << 8) | ClassToken)); +// static_assert(ClassToken == ((0b01100001 << 8) | ClassToken)); +// static_assert(ClassToken == ((0b10100001 << 8) | ClassToken)); +static_assert(ClassToken == ((0b10100001 << 8) | ClassToken)); + +// Ensure that the token hierarchy reflects the class hierarchy. + +// Base classes. +static_assert(!std::is_final_v && + std::is_base_of_v); +static_assert(!std::is_final_v && + std::is_base_of_v); + +// Final classes +// static_assert(std::is_final_v && +// std::is_base_of_v); +// static_assert(std::is_final_v && +// std::is_base_of_v); +static_assert(std::is_final_v && std::is_base_of_v); +static_assert(std::is_final_v && + std::is_base_of_v); +static_assert(std::is_final_v && + std::is_base_of_v); +static_assert(std::is_final_v && + std::is_base_of_v); +static_assert(std::is_final_v && std::is_base_of_v); +static_assert(std::is_final_v && std::is_base_of_v); +static_assert(std::is_final_v && std::is_base_of_v); +// static_assert(std::is_final_v && +// std::is_base_of_v); +static_assert(std::is_final_v && std::is_base_of_v); +static_assert(std::is_final_v && std::is_base_of_v); +static_assert(std::is_final_v && std::is_base_of_v); +static_assert(std::is_final_v && std::is_base_of_v); +// static_assert(std::is_final_v && +// std::is_base_of_v); +// static_assert(std::is_final_v && +// std::is_base_of_v); +static_assert(std::is_final_v && std::is_base_of_v); +// static_assert(std::is_final_v && +// std::is_base_of_v); +// static_assert(std::is_final_v && +// std::is_base_of_v); +// static_assert(std::is_final_v && +// std::is_base_of_v); + +static_assert(std::is_base_of_v); + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h new file mode 100644 index 000000000..469863423 --- /dev/null +++ b/src/core/hle/kernel/k_class_token.h @@ -0,0 +1,111 @@ +// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/common_types.h" + +namespace Kernel { + +class KAutoObject; +class KSynchronizationObject; + +class KClassTokenGenerator { +public: + using TokenBaseType = u8; + +public: + static constexpr size_t BaseClassBits = 1; + static constexpr size_t FinalClassBits = (sizeof(TokenBaseType) * CHAR_BIT) - BaseClassBits - 1; + // One bit per base class. + static constexpr size_t NumBaseClasses = BaseClassBits; + // Final classes are permutations of three bits. + static constexpr size_t NumFinalClasses = [] { + TokenBaseType index = 0; + for (size_t i = 0; i < FinalClassBits; i++) { + for (size_t j = i + 1; j < FinalClassBits; j++) { + for (size_t k = j + 1; k < FinalClassBits; k++) { + index++; + } + } + } + return index; + }(); + +private: + template + static constexpr inline TokenBaseType BaseClassToken = 1U << Index; + + template + static constexpr inline TokenBaseType FinalClassToken = [] { + TokenBaseType index = 0; + for (size_t i = 0; i < FinalClassBits; i++) { + for (size_t j = i + 1; j < FinalClassBits; j++) { + for (size_t k = j + 1; k < FinalClassBits; k++) { + if ((index++) == Index) { + return static_cast(((1ULL << i) | (1ULL << j) | (1ULL << k)) + << BaseClassBits); + } + } + } + } + UNREACHABLE(); + }(); + + template + static constexpr inline TokenBaseType GetClassToken() { + static_assert(std::is_base_of::value); + if constexpr (std::is_same::value) { + static_assert(T::ObjectType == ObjectType::KAutoObject); + return 0; + } else if constexpr (std::is_same::value) { + static_assert(T::ObjectType == ObjectType::KSynchronizationObject); + return 1; + } else if constexpr (ObjectType::FinalClassesStart <= T::ObjectType && + T::ObjectType < ObjectType::FinalClassesEnd) { + constexpr auto ClassIndex = static_cast(T::ObjectType) - + static_cast(ObjectType::FinalClassesStart); + return FinalClassToken | GetClassToken(); + } else { + static_assert(!std::is_same::value, "GetClassToken: Invalid Type"); + } + }; + +public: + enum class ObjectType { + KAutoObject, + KSynchronizationObject, + + FinalClassesStart, + KSemaphore, + KEvent, + KTimer, + KMutex, + KDebug, + KServerPort, + KDmaObject, + KClientPort, + KCodeSet, + KSession, + KThread, + KServerSession, + KAddressArbiter, + KClientSession, + KPort, + KSharedMemory, + KProcess, + KResourceLimit, + + FinalClassesEnd = FinalClassesStart + NumFinalClasses, + }; + + template + static constexpr inline TokenBaseType ClassToken = GetClassToken(); +}; + +using ClassTokenType = KClassTokenGenerator::TokenBaseType; + +template +static constexpr inline ClassTokenType ClassToken = KClassTokenGenerator::ClassToken; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_linked_list.cpp b/src/core/hle/kernel/k_linked_list.cpp new file mode 100644 index 000000000..e69de29bb diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h new file mode 100644 index 000000000..c2de2facf --- /dev/null +++ b/src/core/hle/kernel/k_linked_list.h @@ -0,0 +1,237 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/intrusive_list.h" +#include "core/hle/kernel/slab_helpers.h" + +namespace Kernel { + +class KernelSystem; + +class KLinkedListNode : public Common::IntrusiveListBaseNode, + public KSlabAllocated { + +public: + explicit KLinkedListNode(KernelSystem&) {} + KLinkedListNode() = default; + + void Initialize(void* it) { + m_item = it; + } + + void* GetItem() const { + return m_item; + } + +private: + void* m_item = nullptr; +}; + +template +class KLinkedList : private Common::IntrusiveListBaseTraits::ListType { +private: + using BaseList = Common::IntrusiveListBaseTraits::ListType; + +public: + template + class Iterator; + + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + using pointer = value_type*; + using const_pointer = const value_type*; + using reference = value_type&; + using const_reference = const value_type&; + using iterator = Iterator; + using const_iterator = Iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + template + class Iterator { + private: + using BaseIterator = BaseList::iterator; + friend class KLinkedList; + + public: + using iterator_category = std::bidirectional_iterator_tag; + using value_type = typename KLinkedList::value_type; + using difference_type = typename KLinkedList::difference_type; + using pointer = std::conditional_t; + using reference = + std::conditional_t; + + public: + explicit Iterator(BaseIterator it) : m_base_it(it) {} + + pointer GetItem() const { + return static_cast(m_base_it->GetItem()); + } + + bool operator==(const Iterator& rhs) const { + return m_base_it == rhs.m_base_it; + } + + bool operator!=(const Iterator& rhs) const { + return !(*this == rhs); + } + + pointer operator->() const { + return this->GetItem(); + } + + reference operator*() const { + return *this->GetItem(); + } + + Iterator& operator++() { + ++m_base_it; + return *this; + } + + Iterator& operator--() { + --m_base_it; + return *this; + } + + Iterator operator++(int) { + const Iterator it{*this}; + ++(*this); + return it; + } + + Iterator operator--(int) { + const Iterator it{*this}; + --(*this); + return it; + } + + operator Iterator() const { + return Iterator(m_base_it); + } + + private: + BaseIterator m_base_it; + }; + +public: + constexpr KLinkedList(KernelSystem& kernel_) : BaseList(), kernel{kernel_} {} + + ~KLinkedList() { + // Erase all elements. + for (auto it = begin(); it != end(); it = erase(it)) { + } + + // Ensure we succeeded. + ASSERT(this->empty()); + } + + // Iterator accessors. + iterator begin() { + return iterator(BaseList::begin()); + } + + const_iterator begin() const { + return const_iterator(BaseList::begin()); + } + + iterator end() { + return iterator(BaseList::end()); + } + + const_iterator end() const { + return const_iterator(BaseList::end()); + } + + const_iterator cbegin() const { + return this->begin(); + } + + const_iterator cend() const { + return this->end(); + } + + reverse_iterator rbegin() { + return reverse_iterator(this->end()); + } + + const_reverse_iterator rbegin() const { + return const_reverse_iterator(this->end()); + } + + reverse_iterator rend() { + return reverse_iterator(this->begin()); + } + + const_reverse_iterator rend() const { + return const_reverse_iterator(this->begin()); + } + + const_reverse_iterator crbegin() const { + return this->rbegin(); + } + + const_reverse_iterator crend() const { + return this->rend(); + } + + // Content management. + using BaseList::empty; + using BaseList::size; + + reference back() { + return *(--this->end()); + } + + const_reference back() const { + return *(--this->end()); + } + + reference front() { + return *this->begin(); + } + + const_reference front() const { + return *this->begin(); + } + + iterator insert(const_iterator pos, reference ref) { + KLinkedListNode* new_node = KLinkedListNode::Allocate(kernel); + ASSERT(new_node != nullptr); + new_node->Initialize(std::addressof(ref)); + return iterator(BaseList::insert(pos.m_base_it, *new_node)); + } + + void push_back(reference ref) { + this->insert(this->end(), ref); + } + + void push_front(reference ref) { + this->insert(this->begin(), ref); + } + + void pop_back() { + this->erase(--this->end()); + } + + void pop_front() { + this->erase(this->begin()); + } + + iterator erase(const iterator pos) { + KLinkedListNode* freed_node = std::addressof(*pos.m_base_it); + iterator ret = iterator(BaseList::erase(pos.m_base_it)); + KLinkedListNode::Free(kernel, freed_node); + + return ret; + } + +private: + KernelSystem& kernel; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block.cpp b/src/core/hle/kernel/k_memory_block.cpp new file mode 100644 index 000000000..6ebbd6089 --- /dev/null +++ b/src/core/hle/kernel/k_memory_block.cpp @@ -0,0 +1,41 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_memory_block.h" + +namespace Kernel { + +void KMemoryBlock::ShrinkBlock(VAddr addr, u32 num_pages) { + const VAddr end_addr = addr + (num_pages << Memory::CITRA_PAGE_BITS) - 1; + const VAddr last_addr = this->GetLastAddress(); + if (m_base_addr < end_addr && end_addr < last_addr) { + m_base_addr = end_addr + 1; + m_num_pages = (last_addr - end_addr) >> Memory::CITRA_PAGE_BITS; + return; + } + if (m_base_addr < addr && addr < last_addr) { + m_num_pages = (addr - m_base_addr) >> Memory::CITRA_PAGE_BITS; + return; + } +} + +void KMemoryBlock::GrowBlock(VAddr addr, u32 num_pages) { + const u32 end_addr = addr + (num_pages << Memory::CITRA_PAGE_BITS) - 1; + const u32 last_addr = this->GetLastAddress(); + if (addr < m_base_addr) { + m_base_addr = addr; + m_num_pages = (last_addr - addr + 1) >> Memory::CITRA_PAGE_BITS; + } + if (last_addr < end_addr) { + m_num_pages = (end_addr - m_base_addr + 1) >> Memory::CITRA_PAGE_BITS; + } +} + +bool KMemoryBlock::IncludesRange(VAddr addr, u32 num_pages) { + const u32 end_addr = addr + (num_pages << Memory::CITRA_PAGE_BITS) - 1; + const u32 last_addr = this->GetLastAddress(); + return m_base_addr >= addr && last_addr <= end_addr; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h new file mode 100644 index 000000000..336b773fe --- /dev/null +++ b/src/core/hle/kernel/k_memory_block.h @@ -0,0 +1,187 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_funcs.h" +#include "core/hle/kernel/slab_helpers.h" +#include "core/memory.h" + +namespace Kernel { + +enum class KMemoryPermission : u32 { + None = 0x0, + UserRead = 0x1, + UserWrite = 0x2, + UserReadWrite = UserRead | UserWrite, + UserExecute = 0x4, + UserReadExecute = UserRead | UserExecute, + KernelRead = 0x8, + KernelWrite = 0x10, + KernelExecute = 0x20, + KernelReadWrite = KernelRead | KernelWrite, + DontCare = 0x10000000, +}; +DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission) + +enum class KMemoryState : u32 { + Free = 0x0, + Reserved = 0x1, + Io = 0x2, + Static = 0x3, + Code = 0x4, + Private = 0x5, + Shared = 0x6, + Continuous = 0x7, + Aliased = 0x8, + Alias = 0x9, + Aliascode = 0xA, + Locked = 0xB, + KernelMask = 0xFF, + + FlagDeallocatable = 0x100, + FlagProtectible = 0x200, + FlagDebuggable = 0x400, + FlagIpcAllowed = 0x800, + FlagMapped = 0x1000, + FlagPrivate = 0x2000, + FlagShared = 0x4000, + FlagsPrivateOrShared = 0x6000, + FlagCodeAllowed = 0x8000, + FlagsIpc = 0x1800, + FlagsPrivateData = 0x3800, + FlagsPrivateCodeAllowed = 0xB800, + FlagsPrivateCode = 0xBC00, + FlagsCode = 0x9C00, + + KernelIo = 0x1002, + KernelStatic = 0x1003, + KernelShared = 0x5806, + KernelLinear = 0x3907, + KernelAliased = 0x3A08, + KernelAlias = 0x1A09, + KernelAliasCode = 0x9C0A, + PrivateAliasCode = 0xBC0A, + PrivateCode = 0xBC04, + PrivateData = 0xBB05, + KernelLocked = 0x380B, + FlagsAny = 0xFFFFFFFF, +}; +DECLARE_ENUM_FLAG_OPERATORS(KMemoryState) + +struct KMemoryInfo { + VAddr m_base_address; + u32 m_size; + KMemoryPermission m_perms; + KMemoryState m_state; + + constexpr VAddr GetAddress() const { + return m_base_address; + } + + constexpr u32 GetSize() const { + return m_size; + } + + constexpr u32 GetNumPages() const { + return this->GetSize() >> Memory::CITRA_PAGE_BITS; + } + + constexpr VAddr GetEndAddress() const { + return this->GetAddress() + this->GetSize(); + } + + constexpr VAddr GetLastAddress() const { + return this->GetEndAddress() - 1; + } + + constexpr KMemoryPermission GetPerms() const { + return m_perms; + } + + constexpr KMemoryState GetState() const { + return m_state; + } +}; + +struct KMemoryBlock : public KSlabAllocated { +public: + explicit KMemoryBlock() = default; + + constexpr void Initialize(VAddr base_addr, u32 num_pages, u32 tag, KMemoryState state, + KMemoryPermission perms) { + m_base_addr = base_addr; + m_num_pages = num_pages; + m_permission = perms; + m_memory_state = state; + m_tag = tag; + } + + constexpr bool Contains(VAddr addr) const { + return this->GetAddress() <= addr && addr <= this->GetLastAddress(); + } + + constexpr KMemoryInfo GetInfo() const { + return { + .m_base_address = m_base_addr, + .m_size = this->GetSize(), + .m_perms = m_permission, + .m_state = m_memory_state, + }; + } + + constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, u32 t) const { + return m_memory_state == s && m_permission == p && m_tag == t; + } + + constexpr bool HasSameProperties(const KMemoryBlock& rhs) const { + return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission && + m_tag == rhs.m_tag; + } + + constexpr u32 GetSize() const { + return m_num_pages << Memory::CITRA_PAGE_BITS; + } + + constexpr u32 GetEndAddress() const { + return this->GetAddress() + this->GetSize(); + } + + constexpr u32 GetLastAddress() const { + return this->GetEndAddress() - 1; + } + + constexpr u32 GetAddress() const { + return m_base_addr; + } + + constexpr u32 GetNumPages() const { + return m_num_pages; + } + + constexpr KMemoryPermission GetPermission() const { + return m_permission; + } + + constexpr KMemoryState GetState() const { + return m_memory_state; + } + + constexpr u32 GetTag() const { + return m_tag; + } + + void ShrinkBlock(VAddr addr, u32 num_pages); + void GrowBlock(VAddr addr, u32 num_pages); + bool IncludesRange(VAddr addr, u32 num_pages); + +private: + u32 m_base_addr{}; + u32 m_num_pages{}; + KMemoryPermission m_permission{}; + KMemoryState m_memory_state{}; + u32 m_tag{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp new file mode 100644 index 000000000..f2ca347de --- /dev/null +++ b/src/core/hle/kernel/k_memory_block_manager.cpp @@ -0,0 +1,199 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/scope_exit.h" +#include "core/hle/kernel/k_memory_block_manager.h" + +namespace Kernel { + +void KMemoryBlockManager::Initialize(u32 addr_space_start, u32 addr_space_end) { + const u32 num_pages = (addr_space_end - addr_space_start) >> Memory::CITRA_PAGE_BITS; + KMemoryBlock* block = KMemoryBlock::Allocate(m_kernel); + block->Initialize(addr_space_start, num_pages, 0, KMemoryState::Free, KMemoryPermission::None); + m_blocks.push_back(*block); +} + +s64 KMemoryBlockManager::GetTotalCommittedMemory() { + u32 total_commited_memory{}; + for (const auto& block : m_blocks) { + const KMemoryInfo info = block.GetInfo(); + if (info.GetAddress() - 0x1C000000 >= 0x4000000 && + True(info.GetState() & KMemoryState::Private)) { + total_commited_memory += info.GetSize(); + } + } + return total_commited_memory; +} + +KMemoryBlock* KMemoryBlockManager::FindFreeBlockInRegion(VAddr start, u32 num_pages, + u32 block_num_pages) { + const VAddr end = start + (num_pages << Memory::CITRA_PAGE_BITS); + const u32 block_size = block_num_pages << Memory::CITRA_PAGE_BITS; + for (auto& block : m_blocks) { + const KMemoryInfo info = block.GetInfo(); + if (info.GetState() != KMemoryState::Free) { + continue; + } + const VAddr block_start = std::max(info.GetAddress(), start); + const VAddr block_end = block_start + block_size; + if (block_end <= end && block_end <= info.GetEndAddress()) { + return std::addressof(block); + } + } + return nullptr; +} + +void KMemoryBlockManager::CoalesceBlocks() { + auto it = m_blocks.begin(); + while (true) { + iterator prev = it++; + if (it == m_blocks.end()) { + break; + } + + // Merge adjacent blocks with the same properties. + if (prev->HasSameProperties(*it)) { + KMemoryBlock* block = std::addressof(*it); + const KMemoryInfo info = block->GetInfo(); + prev->GrowBlock(info.GetAddress(), info.GetNumPages()); + KMemoryBlock::Free(m_kernel, block); + m_blocks.erase(it); + it = prev; + } + } +} + +ResultCode KMemoryBlockManager::MutateRange(VAddr addr, u32 num_pages, KMemoryState state, + KMemoryPermission perms, u32 tag) { + // Initialize iterators. + const VAddr last_addr = addr + (num_pages << Memory::CITRA_PAGE_BITS) - 1; + iterator begin = FindIterator(addr); + iterator end = FindIterator(last_addr); + + // Before returning we have to coalesce. + SCOPE_EXIT({ this->CoalesceBlocks(); }); + + // Begin and end addresses are in different blocks. We need to shrink/remove + // any blocks in that range and insert a new one with the new attributes. + if (begin != end) { + // Any blocks in-between begin and end can be completely erased. + for (auto it = std::next(begin); it != end;) { + KMemoryBlock::Free(m_kernel, std::addressof(*it)); + it = m_blocks.erase(it); + } + + // If begin block has same properties, grow it to accomodate the range. + if (begin->HasProperties(state, perms, tag)) { + begin->GrowBlock(addr, num_pages); + // If the end block is fully overwritten, remove it. + if (end->GetLastAddress() == last_addr) { + KMemoryBlock::Free(m_kernel, std::addressof(*end)); + m_blocks.erase(end); + R_SUCCEED(); + } + } else if (end->HasProperties(state, perms, tag)) { + // If end block has same properties, grow it to accomodate the range. + end->GrowBlock(addr, num_pages); + + // Remove start block if fully overwritten + if (begin->GetAddress() == addr) { + KMemoryBlock::Free(m_kernel, std::addressof(*begin)); + m_blocks.erase(begin); + R_SUCCEED(); + } + } else { + // Neither begin and end blocks have required properties. + // Shrink them both and create a new block in-between. + if (begin->IncludesRange(addr, num_pages)) { + KMemoryBlock::Free(m_kernel, std::addressof(*begin)); + begin = m_blocks.erase(begin); + } else { + // Otherwise cut off the part that inside our range + begin->ShrinkBlock(addr, num_pages); + } + + // If the end block is fully inside the range, remove it + if (end->IncludesRange(addr, num_pages)) { + KMemoryBlock::Free(m_kernel, std::addressof(*end)); + end = m_blocks.erase(end); + } else { + // Otherwise cut off the part that inside our range + end->ShrinkBlock(addr, num_pages); + } + + // The range [va, endVa] is now void, create new block in its place. + KMemoryBlock* block = KMemoryBlock::Allocate(m_kernel); + block->Initialize(addr, num_pages, 0, state, perms); + + // Insert it to the block list + m_blocks.insert(end, *block); + R_SUCCEED(); + } + + // Shrink the block containing the start va + begin->ShrinkBlock(addr, num_pages); + R_SUCCEED(); + } + + // Start and end address are in same block, we have to split that. + if (!begin->HasProperties(state, perms, tag)) { + const KMemoryInfo info = begin->GetInfo(); + const u32 pages_in_block = (addr - info.GetAddress()) >> Memory::CITRA_PAGE_BITS; + + // Block has same starting address, we can just adjust the size. + if (info.GetAddress() == addr) { + // Block size matches, simply change attributes. + if (info.GetSize() == num_pages << Memory::CITRA_PAGE_BITS) { + begin->Initialize(addr, num_pages, tag, state, perms); + R_SUCCEED(); + } + // Block size is bigger, split, insert new block after and update + begin->ShrinkBlock(addr, num_pages); + KMemoryBlock* block = KMemoryBlock::Allocate(m_kernel); + block->Initialize(addr, num_pages, tag, state, perms); + + // Insert it to the block list. + m_blocks.insert(begin, *block); + R_SUCCEED(); + } + + // Same end address, but different base addr. + if (info.GetLastAddress() == last_addr) { + begin->ShrinkBlock(addr, num_pages); + KMemoryBlock* block = KMemoryBlock::Allocate(m_kernel); + block->Initialize(addr, num_pages, tag, state, perms); + + // Insert it to the block list + m_blocks.insert(++begin, *block); + R_SUCCEED(); + } + + // Block fully contains start and end addresses. Shrink it to [last_addr, block_end] range. + begin->ShrinkBlock(0, num_pages + (addr >> Memory::CITRA_PAGE_BITS)); + + // Create a new block for [addr, last_addr] with the provided attributes. + KMemoryBlock* middle_block = KMemoryBlock::Allocate(m_kernel); + middle_block->Initialize(addr, num_pages, tag, state, perms); + begin = m_blocks.insert(begin, *middle_block); + + // Create another block for the third range [block_addr, addr]. + KMemoryBlock* start_block = KMemoryBlock::Allocate(m_kernel); + start_block->Initialize(info.GetAddress(), pages_in_block, 0, info.GetState(), + info.GetPerms()); + m_blocks.insert(begin, *start_block); + } + + // We are done :) + R_SUCCEED(); +} + +void KMemoryBlockManager::Finalize() { + auto it = m_blocks.begin(); + while (it != m_blocks.end()) { + KMemoryBlock::Free(m_kernel, std::addressof(*it)); + it = m_blocks.erase(it); + } +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h new file mode 100644 index 000000000..7478d8ea9 --- /dev/null +++ b/src/core/hle/kernel/k_memory_block_manager.h @@ -0,0 +1,41 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "core/hle/kernel/k_linked_list.h" +#include "core/hle/kernel/k_memory_block.h" + +namespace Kernel { + +class KMemoryBlockManager final { + using BlockList = KLinkedList; + using iterator = BlockList::iterator; + +public: + explicit KMemoryBlockManager(KernelSystem& kernel) : m_kernel{kernel}, m_blocks{kernel} {} + ~KMemoryBlockManager() = default; + + void Initialize(u32 addr_space_start, u32 addr_sce_end); + void Finalize(); + + void CoalesceBlocks(); + s64 GetTotalCommittedMemory(); + ResultCode MutateRange(VAddr addr, u32 num_pages, KMemoryState state, KMemoryPermission perms, + u32 tag); + + KMemoryBlock* GetMemoryBlockContainingAddr(u32 addr); + KMemoryBlock* FindFreeBlockInRegion(VAddr start, u32 num_pages, u32 block_num_pages); + + iterator FindIterator(VAddr address) { + return std::find_if(m_blocks.begin(), m_blocks.end(), + [address](auto& block) { return block.Contains(address); }); + } + +private: + KernelSystem& m_kernel; + BlockList m_blocks; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp new file mode 100644 index 000000000..6fb846f24 --- /dev/null +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -0,0 +1,73 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_memory_manager.h" + +namespace Kernel { + +void KMemoryManager::Initialize(FcramLayout* layout, u32 fcram_addr, u32 fcram_size) { + m_application_heap.Initialize(layout->application_addr, layout->application_size); + m_system_heap.Initialize(layout->system_addr, layout->system_size); + m_base_heap.Initialize(layout->base_addr, layout->base_size); + m_page_manager.Initialize(fcram_addr, fcram_size >> Memory::CITRA_PAGE_BITS); +} + +u32 KMemoryManager::ConvertSharedMemPaLinearWithAppMemType(PAddr addr) { + int v2; // r1 + + const u32 fcram_offset = addr - Memory::FCRAM_PADDR; + if ((unsigned __int8)g_kernelSharedConfigPagePtr->appMemType == 7) { + v2 = 0x30000000; + } else { + v2 = 0x14000000; + } + return fcram_offset + v2; +} + +VAddr KMemoryManager::AllocateContiguous(u32 num_pages, u32 page_alignment, MemoryOperation op) { + // KLightScopedMutex m{m_page_manager.GetMutex()}; + + if (True(op & MemoryOperation::Kernel)) { + m_page_manager.GetKernelMemoryUsage() += num_pages << Memory::CITRA_PAGE_BITS; + } + + switch (op & MemoryOperation::RegionMask) { + case MemoryOperation::RegionApplication: + return m_application_heap.AllocateContiguous(num_pages, page_alignment); + case MemoryOperation::RegionSystem: + return m_system_heap.AllocateContiguous(num_pages, page_alignment); + case MemoryOperation::RegionBase: + return m_base_heap.AllocateContiguous(num_pages, page_alignment); + default: + UNREACHABLE(); + return 0; + } +} + +VAddr KMemoryManager::AllocateContiguousBackwards(u32 num_pages, MemoryOperation op) { + // KLightScopedMutex m{m_page_manager.GetMutex()}; + + if (True(op & MemoryOperation::Kernel)) { + m_page_manager.GetKernelMemoryUsage() += num_pages << Memory::CITRA_PAGE_BITS; + } + + switch (op & MemoryOperation::RegionMask) { + case MemoryOperation::RegionApplication: + return m_application_heap.AllocateBackwards(num_pages); + case MemoryOperation::RegionSystem: + return m_system_heap.AllocateBackwards(num_pages); + case MemoryOperation::RegionBase: + return m_base_heap.AllocateBackwards(num_pages); + default: + UNREACHABLE(); + return 0; + } +} + +void KMemoryManager::FreeContiguousLocked(u32 addr, u32 num_pages) { + // KLightScopedMutex m{m_page_manager.GetMutex()}; + m_page_manager.FreeContiguous(addr, num_pages, MemoryOperation::None); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h new file mode 100644 index 000000000..517507a2b --- /dev/null +++ b/src/core/hle/kernel/k_memory_manager.h @@ -0,0 +1,60 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "core/hle/kernel/k_page_heap.h" +#include "core/hle/kernel/k_page_manager.h" + +namespace Memory { +class MemorySystem; +} + +namespace Kernel { + +struct FcramLayout { + u32 application_addr; + u32 application_size; + u32 system_addr; + u32 system_size; + u32 base_addr; + u32 base_size; +}; + +class KMemoryManager { +public: + explicit KMemoryManager(Memory::MemorySystem& memory) + : m_application_heap{memory}, m_system_heap{memory}, m_base_heap{memory}, m_page_manager{ + memory, + this} {} + ~KMemoryManager() = default; + + void Initialize(FcramLayout* layout, u32 fcram_addr, u32 fcram_size); + + u32 ConvertSharedMemPaLinearWithAppMemType(PAddr addr); + + KPageHeap& GetApplicationHeap() noexcept { + return m_application_heap; + } + + KPageHeap& GetSystemHeap() noexcept { + return m_system_heap; + } + + KPageHeap& GetBaseHeap() noexcept { + return m_base_heap; + } + + VAddr AllocateContiguous(u32 num_pages, u32 page_alignment, MemoryOperation op); + VAddr AllocateContiguousBackwards(u32 num_pages, MemoryOperation op); + void FreeContiguousLocked(u32 addr, u32 num_pages); + +private: + KPageHeap m_application_heap; + KPageHeap m_system_heap; + KPageHeap m_base_heap; + KPageManager m_page_manager; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_group.cpp b/src/core/hle/kernel/k_page_group.cpp new file mode 100644 index 000000000..7934ff67d --- /dev/null +++ b/src/core/hle/kernel/k_page_group.cpp @@ -0,0 +1,82 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_page_group.h" +#include "core/hle/kernel/k_page_manager.h" + +namespace Kernel { + +KPageGroup::~KPageGroup() { + EraseAll(); +} + +void KPageGroup::AddRange(u32 addr, u32 num_pages) { + // If the provided range is empty there is nothing to do. + if (num_pages == 0 || addr + (num_pages << Memory::CITRA_PAGE_BITS) == 0) { + return; + } + + // KScopedSchedulerLock lk{m_kernel}; + + // Attempt to coaelse with last block if possible. + if (!m_blocks.empty()) { + KBlockInfo& last = m_blocks.back(); + if (addr != 0 && addr == last.GetEndAddress()) { + last.m_num_pages += num_pages; + return; + } + } + + // Allocate and initialize the new block. + KBlockInfo* new_block = KBlockInfo::Allocate(m_kernel); + new_block->Initialize(addr, num_pages); + + // Push the block to the list. + m_blocks.push_back(*new_block); +} + +void KPageGroup::IncrefPages() { + // Iterate over block list and increment page reference counts. + for (const auto& block : m_blocks) { + m_page_manager->IncrefPages(block.GetAddress(), block.GetNumPages()); + } +} + +u32 KPageGroup::GetTotalNumPages() { + // Iterate over block list and count number of pages. + u32 total_num_pages{}; + for (const auto& block : m_blocks) { + total_num_pages = block.GetNumPages(); + } + return total_num_pages; +} + +void KPageGroup::EraseAll() { + // Free all blocks referenced in the linked list. + auto it = m_blocks.begin(); + while (it != m_blocks.end()) { + KBlockInfo::Free(m_kernel, std::addressof(*it)); + it = m_blocks.erase(it); + } +} + +bool KPageGroup::IsEquivalentTo(const KPageGroup& rhs) const { + auto lit = m_blocks.begin(); + auto rit = rhs.m_blocks.begin(); + auto lend = m_blocks.end(); + auto rend = rhs.m_blocks.end(); + + while (lit != lend && rit != rend) { + if (*lit != *rit) { + return false; + } + + ++lit; + ++rit; + } + + return lit == lend && rit == rend; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h new file mode 100644 index 000000000..30bc942f3 --- /dev/null +++ b/src/core/hle/kernel/k_page_group.h @@ -0,0 +1,102 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "core/hle/kernel/k_linked_list.h" +#include "core/hle/kernel/slab_helpers.h" +#include "core/memory.h" + +namespace Kernel { + +struct KBlockInfo final : public KSlabAllocated { +public: + explicit KBlockInfo() = default; + ~KBlockInfo() = default; + + void Initialize(u32 address, u32 num_pages) { + m_base_address = address; + m_num_pages = num_pages; + } + + constexpr u32 GetAddress() const { + return m_base_address; + } + + constexpr u32 GetEndAddress() const { + return this->GetAddress() + this->GetSize(); + } + + constexpr u32 GetSize() const { + return m_num_pages << Memory::CITRA_PAGE_BITS; + } + + constexpr u32 GetNumPages() const { + return m_num_pages; + } + + constexpr bool IsEquivalentTo(const KBlockInfo& rhs) const { + return m_base_address == rhs.m_base_address && m_num_pages == rhs.m_num_pages; + } + + constexpr bool operator==(const KBlockInfo& rhs) const { + return this->IsEquivalentTo(rhs); + } + + constexpr bool operator!=(const KBlockInfo& rhs) const { + return !(*this == rhs); + } + +public: + u32 m_base_address; + u32 m_num_pages; +}; + +class KPageManager; +class KernelSystem; + +class KPageGroup { + using BlockInfoList = KLinkedList; + using iterator = BlockInfoList::const_iterator; + +public: + explicit KPageGroup(KernelSystem& kernel, KPageManager* page_manager) + : m_kernel{kernel}, m_page_manager{page_manager}, m_blocks{kernel} {} + ~KPageGroup(); + + iterator begin() const { + return this->m_blocks.begin(); + } + iterator end() const { + return this->m_blocks.end(); + } + bool empty() const { + return this->m_blocks.empty(); + } + + void AddRange(u32 addr, u32 num_pages); + void IncrefPages(); + + void EraseAll(); + void FreeMemory(); + + u32 GetTotalNumPages(); + + bool IsEquivalentTo(const KPageGroup& rhs) const; + + bool operator==(const KPageGroup& rhs) const { + return this->IsEquivalentTo(rhs); + } + + bool operator!=(const KPageGroup& rhs) const { + return !(*this == rhs); + } + +private: + KernelSystem& m_kernel; + KPageManager* m_page_manager{}; + BlockInfoList m_blocks; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp new file mode 100644 index 000000000..cb0360a70 --- /dev/null +++ b/src/core/hle/kernel/k_page_heap.cpp @@ -0,0 +1,408 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_page_heap.h" + +namespace Kernel { + +void KPageHeap::Initialize(VAddr region_start, u32 region_size) { + m_region_start = region_start; + m_region_size = region_size; + + // Retrieve the first block in the provided region. + Block* first_block = m_memory.GetPointer(m_region_start); + ASSERT(first_block); + + // Initialize the block. + first_block->num_pages = this->GetNumPages(); + first_block->current = first_block; + + // Insert the block to our block list. + m_blocks.push_front(*first_block); +} + +u32 KPageHeap::GetTotalNumPages() { + // Iterate over the blocks. + u32 total_num_pages{}; + for (const auto& block : m_blocks) { + total_num_pages = block.num_pages; + } + return total_num_pages; +} + +void KPageHeap::FreeBlock(u32 addr, u32 num_pages) { + // Return if there are no pages to free. + if (num_pages == 0) { + return; + } + + // Return if unable to insert block at the beginning. + auto start_block = std::addressof(m_blocks.front()); + if (this->TryInsert(addr, num_pages, nullptr, start_block)) { + return; + } + + // Iterate over the blocks. + for (auto it = m_blocks.begin(); it != m_blocks.end();) { + // Attempt to insert. + Block* block = std::addressof(*it++); + Block* next_block = std::addressof(*it++); + if (this->TryInsert(addr, num_pages, block, next_block)) { + break; + } + } +} + +void* KPageHeap::AllocateBackwards(u32 size) { + // Ensure allocation is possible. + if (size == 0) [[unlikely]] { + return nullptr; + } + + // Iterate over block list backwards. + u32 remaining = size; + for (auto it = m_blocks.rbegin(); it != m_blocks.rend(); it++) { + // If block does not cover remaining pages continue. + auto block = std::addressof(*it); + const u32 num_pages = block->num_pages; + if (remaining > num_pages) { + remaining -= num_pages; + continue; + } + + // Split last block at our boundary. + const u32 new_block_pages = num_pages - remaining; + auto new_block = this->SplitBlock(block, new_block_pages); + ASSERT(new_block && new_block->num_pages == new_block_pages); + + // new_block.prev = 0; + this->SetLastBlock(block); + + // Return final block which points to our allocated memory. + return new_block; + } + + return nullptr; +} + +void* KPageHeap::AllocateContiguous(u32 size, u32 page_alignment) { + KPageHeapBlock* next; // r6 + KPageHeapBlock* v13; // r1 + KPageHeapBlock* prev; // [sp+0h] [bp-30h] + KPageHeapBlock* block; // [sp+4h] [bp-2Ch] + + // Ensure allocation is possible. + if (m_blocks.empty() || size == 0) [[unlikely]] { + return nullptr; + } + + for (auto it = m_blocks.begin(); it != m_blocks.end(); it++) { + // Ensure block is valid. + auto block = std::addressof(*it); + this->ValidateBlock(block); + } + + KPageHeapBlock* current_node = m_link.next; + while (current_node) { + u32 misalignment = 0; + KPageHeap::ValidateBlock(current_node); + const u32 num_pages = current_node->num_pages; + // if (current_node->num_pages > this->GetNumPages() || this->GetRegionEnd() < (unsigned + // int)current_node + 4096 * num_pages) { + // UNREACHABLE(); + // } + if (page_alignment > 1) { + const u32 v11 = ((unsigned int)current_node >> 12) % page_alignment; + if (v11) { + misalignment = page_alignment - v11; + } + } + + if (size + misalignment <= num_pages) { + block = current_node; + if (misalignment) { + block = KPageHeap::SplitBlock(current_node, misalignment); + } + + KPageHeap::SplitBlock(block, size); + KPageHeap::ValidateBlock(block); + prev = block->link.prev; + next = block->link.next; + KPageHeap::ValidateBlock(prev); + KPageHeap::ValidateBlock(next); + + if (prev) { + prev->link.next = next; + v13 = prev; + } else { + m_link.next = next; + if (!next) { + m_link.prev = 0; + goto LABEL_28; + } + m_link.next->link.prev = 0; + v13 = m_link.next; + } + KPageHeap::UpdateBlockMac(v13); + if (next) { + next->link.prev = prev; + KPageHeap::UpdateBlockMac(next); + LABEL_29: + if (block->num_pages != size) { + UNREACHABLE(); + } + return block; + } + LABEL_28: + KPageHeap::SetLastBlock(prev); + goto LABEL_29; + } + current_node = current_node->link.next; + } + + for (KPageHeapBlock* j = m_link.next; j; j = j->link.next) { + KPageHeap::ValidateBlock(j); + } + + return 0; +} + +void KPageHeap::SetLastBlock(KPageHeapBlock* block) { + m_link.prev = block; + if (!block) [[unlikely]] { + m_link.next = nullptr; + return; + } + + /*u32 v2 = m_key[0]; + u32 v3 = m_key[1]; + u32 v4 = m_key[2]; + u32 v5 = m_key[3]; + for (int i = 0; i < 2; i++) { + int v7 = 0; + do { + v2 -= *(u32 *)((char *)&block->num_pages + v7) - __ROR4__(v3, 3); + v7 += 4; + v3 -= __ROR4__(v4, (v5 & 0xF) + 3) ^ __ROR4__(v5, (v2 & 0xF) + 13); + v4 -= __ROR4__(v5, v2) * v3; + v5 -= __ROR4__(v2, v3) * v4; + } while ( v7 < 20 ); + } + + if ((v2 ^ v3) != block->mac) { + UNREACHABLE(); + }*/ + + m_link.prev->link.next = nullptr; +} + +KPageHeap::Block* KPageHeap::SplitBlock(Block* block, u32 new_block_size) { + const u32 num_pages = block->num_pages; + ASSERT(block->num_pages <= this->GetNumPages()); + // if (block->num_pages > this->GetNumPages() || this->GetRegionEnd() < (unsigned int)block + + // 4096 * num_pages) { + // UNREACHABLE(); + // } + + if (!new_block_size || num_pages == new_block_size) [[unlikely]] { + return nullptr; + } + + Block* new_block = (Block*)((char*)block + Memory::CITRA_PAGE_SIZE * new_block_size); + Block* next = block->link.next; + const u32 v12 = num_pages - new_block_size; + new_block->nonce = 0; + new_block->num_pages = v12; + new_block->mac = 0; + new_block->link.next = next; + new_block->link.prev = block; + new_block->current = new_block; + + if (new_block->num_pages != v12) { + UNREACHABLE(); + } + + block->link.next = new_block; + block->num_pages = new_block_size; + + if (block->num_pages != new_block_size) { + UNREACHABLE(); + } + + KPageHeapBlock* v13 = new_block->link.next; + KPageHeap::ValidateBlock(v13); + if (v13) { + v13->link.prev = new_block; + KPageHeap::UpdateBlockMac(v13); + } else { + KPageHeap::SetLastBlock(new_block); + } + + return new_block; +} + +bool KPageHeap::TryInsert(u32 freedAddr, u32 freedNumPages, Block* prev, Block* next) { + KPageHeapBlock* v6; // r5 + u32 numPages; // r8 + u32 freedAddrEnd; // r11 + u32 regionSize; // r0 + u32 prevRightNeighour; // r10 + KPageHeapBlock* nxt; // r9 + u32 regionStart; // r0 + bool v14; // cc + BOOL result; // r0 + bool v16; // zf + u32 v17; // r8 + u32 v18; // r1 + u32 v19; // r9 + KPageHeapBlock* next; // r5 + u32 v21; // r3 + u32 v22; // r0 + u32 v23; // r1 + u32 v24; // r2 + int i; // r11 + int v26; // r12 + int v27; // r10 + bool v28; // zf + u32 v29; // r4 + u32 v30; // r1 + u32 v31; // r8 + KPageHeapBlock* v32; // r4 + u32 v33; // [sp+4h] [bp-3Ch] + + v6 = (KPageHeapBlock*)freedAddr; + numPages = 0; + freedAddrEnd = freedAddr + (freedNumPages << 12); + v33 = 0; + regionSize = this->regionSize; + if (freedNumPages > regionSize >> 12 || regionSize + this->regionStart < freedAddrEnd) + kernelpanic(); + if (prev) { + KPageHeap::ValidateBlock(this, prev); + numPages = prev->numPages; + } + if (next) { + KPageHeap::ValidateBlock(this, next); + v33 = next->numPages; + } + if (prev) { + if ((KPageHeapBlock*)((char*)prev + 4096 * prev->numPages - 1) < prev) + kernelpanic(); + prevRightNeighour = (u32)prev + 4096 * prev->numPages; + } else { + prevRightNeighour = this->regionStart; + } + if (next) + nxt = next; + else + nxt = (KPageHeapBlock*)(this->regionStart + this->regionSize); + regionStart = this->regionStart; + if (regionStart > prevRightNeighour || regionStart + this->regionSize < (unsigned int)nxt) + kernelpanic(); + v14 = prevRightNeighour > (unsigned int)v6; + if (prevRightNeighour <= (unsigned int)v6) + v14 = freedAddrEnd > (unsigned int)nxt; + result = 0; + if (!v14) { + v6->nonce = 0; + v6->link.prev = prev; + v6->mac = 0; + v6->numPages = freedNumPages; + v6->link.next = next; + v6->current = v6; + KPageHeap::UpdateBlockMac(this, v6); + if (v6->numPages != freedNumPages) + kernelpanic(); + if (prev) { + prev->link.next = v6; + KPageHeap::UpdateBlockMac(this, prev); + if (prev->numPages != numPages) + kernelpanic(); + } else { + this->link.next = v6; + if (v6) { + v21 = this->key[2]; + v22 = this->key[0]; + v23 = this->key[1]; + v24 = this->key[3]; + for (i = 0; i < 2; ++i) { + v26 = 0; + do { + v27 = *(u32*)((char*)&v6->numPages + v26); + v26 += 4; + v22 -= v27 - __ROR4__(v23, 3); + v23 -= __ROR4__(v21, (v24 & 0xF) + 3) ^ __ROR4__(v24, (v22 & 0xF) + 13); + v21 -= __ROR4__(v24, v22) * v23; + v24 -= __ROR4__(v22, v23) * v21; + } while (v26 < 20); + } + if ((v22 ^ v23) != v6->mac) + kernelpanic(); + this->link.next->link.prev = 0; + KPageHeap::UpdateBlockMac(this, this->link.next); + } else { + this->link.prev = 0; + } + } + if (next) { + next->link.prev = v6; + KPageHeap::UpdateBlockMac(this, next); + if (next->numPages != v33) + kernelpanic(); + } else { + KPageHeap::SetLastBlock(this, v6); + } + v16 = prev == 0; + if (prev) + v16 = v6 == 0; + if (!v16 && (KPageHeapBlock*)((char*)prev + 4096 * prev->numPages) == v6) { + KPageHeap::ValidateBlock(this, prev); + v17 = prev->numPages; + KPageHeap::ValidateBlock(this, v6); + v18 = prev->numPages; + v19 = v6->numPages; + prev->link.next = v6->link.next; + prev->numPages = v18 + v19; + KPageHeap::UpdateBlockMac(this, prev); + if (prev->numPages != v17 + v19) + kernelpanic(); + next = v6->link.next; + KPageHeap::ValidateBlock(this, next); + if (next) { + next->link.prev = prev; + KPageHeap::UpdateBlockMac(this, next); + } else { + KPageHeap::SetLastBlock(this, prev); + } + v6 = prev; + } + v28 = v6 == 0; + if (v6) + v28 = next == 0; + if (!v28 && (KPageHeapBlock*)((char*)v6 + 4096 * v6->numPages) == next) { + KPageHeap::ValidateBlock(this, v6); + v29 = v6->numPages; + KPageHeap::ValidateBlock(this, next); + v30 = v6->numPages; + v31 = next->numPages; + v6->link.next = next->link.next; + v6->numPages = v30 + v31; + KPageHeap::UpdateBlockMac(this, v6); + if (v6->numPages != v29 + v31) + kernelpanic(); + v32 = next->link.next; + KPageHeap::ValidateBlock(this, v32); + if (v32) { + v32->link.prev = v6; + KPageHeap::UpdateBlockMac(this, v32); + } else { + KPageHeap::SetLastBlock(this, v6); + } + } + return 1; + } + return result; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h new file mode 100644 index 000000000..2fd7939c8 --- /dev/null +++ b/src/core/hle/kernel/k_page_heap.h @@ -0,0 +1,64 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/intrusive_list.h" +#include "core/memory.h" + +namespace Kernel { + +class KPageHeap final { +public: + explicit KPageHeap(Memory::MemorySystem& memory) : m_memory{memory} {} + ~KPageHeap() = default; + + constexpr u32 GetNumPages() const { + return m_region_size >> Memory::CITRA_PAGE_BITS; + } + + constexpr u32 GetRegionStart() const { + return m_region_start; + } + + constexpr u32 GetRegionEnd() const { + return m_region_start + m_region_size; + } + + constexpr bool Contains(u32 addr) const { + return this->GetRegionStart() <= addr && addr < this->GetRegionEnd(); + } + +public: + void Initialize(VAddr region_start, u32 region_size); + u32 GetTotalNumPages(); + + VAddr AllocateBackwards(u32 size); + VAddr AllocateContiguous(u32 size, u32 page_alignment); + void FreeBlock(u32 addr, u32 num_pages); + +private: + struct Block final : public Common::IntrusiveListBaseNode { + u32 num_pages; + Block* current; + u32 nonce; + u32 mac; + }; + + using BlockList = Common::IntrusiveListBaseTraits::ListType; + using iterator = BlockList::iterator; + + Block* SplitBlock(Block* block, u32 new_block_size); + bool TryInsert(u32 freed_addr, u32 num_freed_pages, Block* prev_block, Block* next_block); + void SetLastBlock(Block* block); + +private: + BlockList m_blocks{}; + Memory::MemorySystem& m_memory; + u32 m_region_start{}; + u32 m_region_size{}; + std::array m_key{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_manager.cpp b/src/core/hle/kernel/k_page_manager.cpp new file mode 100644 index 000000000..45eabeee2 --- /dev/null +++ b/src/core/hle/kernel/k_page_manager.cpp @@ -0,0 +1,107 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_memory_manager.h" +#include "core/hle/kernel/k_page_manager.h" +#include "core/memory.h" + +namespace Kernel { + +void KPageManager::Initialize(u32 start_addr, u32 num_pages) { + // Initialize page manager address range. + m_start_addr = start_addr; + m_num_pages = num_pages; + + // Compute the number of pages to allocate from the base heap. + const u32 num_ref_counts_pages = ((sizeof(u32) * num_pages - 1) >> Memory::CITRA_PAGE_BITS) + 1; + auto& base_heap = m_memory_manager->GetBaseHeap(); + + // Allocate page refcounting memory. + u32 ref_counts_addr{}; + { + // KLightScopedMutex m{m_mutex}; + m_kernel_memory_usage += num_ref_counts_pages << Memory::CITRA_PAGE_BITS; + ref_counts_addr = base_heap.AllocateContiguous(num_ref_counts_pages, 0); + m_page_ref_counts = m_memory.GetPointer(ref_counts_addr); + ASSERT(m_page_ref_counts); + } + + // Zero-initialize reference counts. + if (num_pages) { + std::memset(m_page_ref_counts, 0, num_ref_counts_pages << Memory::CITRA_PAGE_BITS); + } + + // Track allocated pages. + this->IncrefPages(ref_counts_addr, num_ref_counts_pages); +} + +void KPageManager::IncrefPages(u32 addr, u32 num_pages) { + // KLightScopedMutex m{m_mutex}; + + // Increment page reference counts. + const u32 page_start = (addr - m_start_addr) >> Memory::CITRA_PAGE_BITS; + const u32 page_end = num_pages + page_start; + for (u32 page = page_start; page < page_end; page++) { + m_page_ref_counts[page_start]++; + } +} + +void KPageManager::FreeContiguous(u32 addr, u32 num_pages, MemoryOperation op) { + // Ensure the provided address is in range. + const u32 page_start = (addr - m_start_addr) >> Memory::CITRA_PAGE_BITS; + const u32 page_end = page_start + num_pages; + if (page_start >= page_end) [[unlikely]] { + return; + } + + // Retrieve page heaps from the memory manager. + auto& application_heap = m_memory_manager->GetApplicationHeap(); + auto& base_heap = m_memory_manager->GetBaseHeap(); + auto& system_heap = m_memory_manager->GetSystemHeap(); + + // Frees the range of pages provided from the appropriate heap. + const auto FreePages = [&](u32 start_page, u32 num_pages) { + const u32 current_addr = m_start_addr + (start_page << Memory::CITRA_PAGE_BITS); + if (base_heap.Contains(current_addr)) { + base_heap.FreeBlock(current_addr, num_pages); + } else if (system_heap.Contains(current_addr)) { + system_heap.FreeBlock(current_addr, num_pages); + } else { + application_heap.FreeBlock(current_addr, num_pages); + } + // Update kernel memory usage if requested. + if (True(op & MemoryOperation::Kernel)) { + m_kernel_memory_usage -= num_pages << Memory::CITRA_PAGE_BITS; + } + }; + + // Iterate over the range of pages to free. + u32 start_free_page = 0; + u32 num_pages_to_free = 0; + for (u32 page = page_start; page < page_end; page++) { + const u32 new_count = --m_page_ref_counts[page]; + if (new_count) { + // Nothing to free, continue to next page. + if (num_pages_to_free <= 0) { + continue; + } + // Free accumulated pages and reset. + FreePages(start_free_page, num_pages_to_free); + num_pages_to_free = 0; + } else if (num_pages_to_free <= 0) { + start_free_page = page; + num_pages_to_free = 1; + } else { + // Advance number of pages to free. + num_pages_to_free++; + } + } + + // Free any remaining pages. + if (num_pages_to_free > 0) { + FreePages(start_free_page, num_pages_to_free); + } +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_manager.h b/src/core/hle/kernel/k_page_manager.h new file mode 100644 index 000000000..21854bf4c --- /dev/null +++ b/src/core/hle/kernel/k_page_manager.h @@ -0,0 +1,63 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include + +#include "common/common_funcs.h" +#include "common/common_types.h" + +namespace Memory { +class MemorySystem; +} + +namespace Kernel { + +enum class MemoryOperation : u32 { + None = 0x0, + RegionApplication = 0x100, + RegionSystem = 0x200, + RegionBase = 0x300, + Kernel = 0x80000000, + RegionBaseKernel = Kernel | RegionBase, + Free = 0x1, + Reserve = 0x2, + Alloc = 0x3, + Map = 0x4, + Unmap = 0x5, + Prot = 0x6, + OpMask = 0xFF, + RegionMask = 0xF00, + LinearFlag = 0x10000, +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryOperation) + +class KMemoryManager; + +class KPageManager { +public: + explicit KPageManager(Memory::MemorySystem& memory, KMemoryManager* memory_manager) + : m_memory{memory}, m_memory_manager{memory_manager} {} + ~KPageManager() = default; + + std::atomic& GetKernelMemoryUsage() noexcept { + return m_kernel_memory_usage; + } + + void Initialize(u32 start_addr, u32 num_pages); + void IncrefPages(u32 addr, u32 num_pages); + void FreeContiguous(u32 data, u32 num_pages, MemoryOperation op); + +private: + Memory::MemorySystem& m_memory; + KMemoryManager* m_memory_manager{}; + u32 m_start_addr{}; + u32 m_num_pages{}; + u32* m_page_ref_counts{}; + std::atomic m_kernel_memory_usage{}; + // KLightMutex m_mutex; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp new file mode 100644 index 000000000..4175bb14c --- /dev/null +++ b/src/core/hle/kernel/k_page_table.cpp @@ -0,0 +1,1511 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/page_table.h" +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/k_page_group.h" +#include "core/hle/kernel/k_page_manager.h" +#include "core/hle/kernel/k_page_table.h" +#include "core/hle/result.h" + +namespace Kernel { + +using TraversalEntry = Common::PageTable::TraversalEntry; +using TraversalContext = Common::PageTable::TraversalContext; + +ResultCode KPageTable::CheckAndUpdateAddrRangeMaskedStateAndPerms( + u32 addr, u32 num_pages, KMemoryState state_mask, KMemoryState expected_state, + KMemoryPermission min_perms, KMemoryState new_state, KMemoryPermission new_perms) { + // Ensure the provided region has the expected state and permissions. + R_TRY(this->CheckAddrRangeMaskedStateAndPerms(addr, num_pages << Memory::CITRA_PAGE_BITS, + state_mask, expected_state, min_perms)); + + // Change the memory state. + R_RETURN(this->Operate(addr, num_pages, 0, new_state, new_perms, + KMemoryUpdateFlags::StateAndPerms, MemoryOperation::RegionBase)); +} + +ResultCode KPageTable::CheckAddressRangeSizeAndState(u32 addr, u32 size, KMemoryState state) { + // Verify that we can query the address. + KMemoryInfo info; + u32 page_info; + R_TRY(this->QueryInfo(std::addressof(info), std::addressof(page_info), addr)); + + // Validate the states match expectation. + R_UNLESS(info.GetState() == state, ERR_INVALID_ADDRESS_STATE); + R_UNLESS(info.GetEndAddress() >= addr + size, ERR_INVALID_ADDRESS_STATE); + R_SUCCEED(); +} + +ResultCode KPageTable::CheckAddressRangeSizeAndStateFlags(u32 addr, u32 size, + KMemoryState state_mask, + KMemoryState state) { + // Verify that we can query the address. + KMemoryInfo info; + std::array page_info; + R_TRY(this->QueryInfo(std::addressof(info), page_info.data(), addr)); + + // Validate the states match expectation. + R_UNLESS((info.GetState() & state_mask) == state, ERR_INVALID_ADDRESS_STATE); + R_UNLESS(info.GetEndAddress() >= addr + size, ERR_INVALID_ADDRESS_STATE); + R_SUCCEED(); +} + +ResultCode KPageTable::CheckMemoryBlockAttributes(u32 addr, u32 size, KMemoryState state, + KMemoryPermission perms) { + // Verify that we can query the address. + KMemoryInfo info; + std::array page_info; + R_TRY(this->QueryInfo(std::addressof(info), page_info.data(), addr)); + + // Validate the states match expectation. + const VAddr end_addr = addr + size; + R_UNLESS(info.GetState() == state, ERR_INVALID_ADDRESS_STATE); + R_UNLESS((perms & ~info.GetPerms()) == KMemoryPermission::None, ERR_INVALID_ADDRESS_STATE); + R_UNLESS(!size || addr <= end_addr - 1, ERR_INVALID_ADDRESS_STATE); + R_UNLESS(end_addr <= info.GetEndAddress(), ERR_INVALID_ADDRESS_STATE); + R_SUCCEED(); +} + +ResultCode KPageTable::CheckAddrRangeMaskedStateAndPerms(u32 addr, u32 size, + KMemoryState state_mask, + KMemoryState state, + KMemoryPermission minPerms) { + // Validate provided address region. + R_UNLESS(!size || addr <= addr + size - 1, ERR_INVALID_ADDRESS_STATE); + + while (true) { + // Query the page table. + KMemoryInfo info; + std::array page_info; + R_TRY(this->QueryInfo(std::addressof(info), page_info.data(), addr)); + + // Validate the states match expectation. + R_UNLESS((info.GetState() & state_mask) == state, ERR_INVALID_ADDRESS_STATE); + R_UNLESS((minPerms & ~info.GetPerms()) == KMemoryPermission::None, + ERR_INVALID_ADDRESS_STATE); + + // If we reached the end, we are done. + if (info.GetEndAddress() >= addr + size) { + R_SUCCEED(); + } + + // Move to next block. + addr = info.GetEndAddress(); + size -= info.GetEndAddress() - addr; + } + + UNREACHABLE(); +} + +ResultCode KPageTable::MapL1Entries(u32 va, u32 pa, u32 numPages, u32* attribsPtr, bool isLarge) { + u32 v6; // r11 + unsigned int v7; // r10 + u32 v8; // r12 + u32 v9; // r4 + u32** p_L1Table; // r5 + u32 v11; // r6 + unsigned int v12; // r0 + int v13; // lr + u32 v14; // r8 + int v15; // lr + u32 v16; // r9 + u32 v19; // [sp+8h] [bp-40h] + unsigned int v20; // [sp+Ch] [bp-3Ch] + + v6 = 0; + if (isLarge) + v7 = 0x1000000; + else + v7 = 0x100000; + v8 = *attribsPtr & 0xC; + if (isLarge) + v8 |= 0x40002u; + if (!isLarge) + v8 |= 2u; + v19 = v8 | ((*attribsPtr & 0xFF0) << 6) | ((*attribsPtr & 1) << 4); + if (numPages) { + v20 = v7 >> 20; + do { + v9 = va >> 20; + v11 = pa | v19; + v12 = (v7 & 0x100000) != 0; + v13 = 0; + if (v12 == 1) { + v13 = 1; + m_l1_table[v9] = v11; + } + for (; v12 < v20; m_l1_table[v16] = v11) { + v14 = v9 + v13; + v15 = v13 + 1; + v16 = v9 + v15; + m_l1_table[v14] = v11; + v12 += 2; + v13 = v15 + 1; + } + v6 += v7 >> 12; + pa += v7; + va += v7; + } while (v6 < numPages); + } + KPageTable::CleanDataCacheRange((u32)&m_l1_table[va >> 20], 4 * (numPages >> 8)); + return RESULT_SUCCESS; +} + +ResultCode KPageTable::MapContiguousPhysicalAddressRange(u32 va, u32 pa, u32 numPages, + u32* mmuAttribs) { + u32 v5; // r5 + u32 v6; // r7 + u32 v9; // r4 + ResultCode result; // r0 + unsigned int v12; // r10 + unsigned int v13; // r2 + unsigned int v14; // r3 + ResultCode v15; // r0 + int v16; // r10 + ResultCode v17; // r0 + unsigned int v18; // r2 + bool v19; // zf + u32* attribsPtr; // [sp+0h] [bp-38h] + + v5 = va; + v6 = pa; + v9 = numPages; + if (m_use_small_pages || numPages < 0x10) { + result = KPageTable::MapL2Entries(va, pa, numPages, mmuAttribs, 0); + if (result.IsError()) { + return result; + } + } else { + u32 i{}; + for (i = 0x10000; i <= 0x1000000; i <<= 4) { + if ((v5 & (i - 1)) != (v6 & (i - 1))) { + break; + } + v12 = ((i - 1) & (i - v5)) >> 12; + if (v12 + (i >> 12) > v9) { + break; + } + if (v12) { + v13 = i >> 4; + v14 = ((i - 1) & (i - v5)) >> 12; + if (i >> 4 == 4096) { + v15 = KPageTable::MapL2Entries(v5, v6, v14, mmuAttribs, 0); + } else { + switch (v13) { + case 0x10000u: + v15 = KPageTable::MapL2Entries(v5, v6, v14, mmuAttribs, 1); + break; + case 0x100000u: + v15 = KPageTable::MapL1Entries(v5, v6, v14, mmuAttribs, 0); + break; + case 0x1000000u: + v15 = KPageTable::MapL1Entries(v5, v6, v14, mmuAttribs, 1); + break; + default: + v15.raw = -656406531; + break; + } + } + if (v15.IsError()) { + LABEL_28: + if (numPages != v9) { + KPageTable::UnmapEntries(va, numPages - v9, 0); + } + return v15; + } + v9 -= v12; + v5 += v12 << 12; + v6 += v12 << 12; + } + } + while (true) { + i >>= 4; + if (!v9 || i < 0x1000) + break; + v16 = v9 & ~((i >> 12) - 1); + if (v16) { + switch (i) { + case 0x1000u: + v17 = KPageTable::MapL2Entries(v5, v6, v16, mmuAttribs, 0); + break; + case 0x10000u: + v17 = KPageTable::MapL2Entries(v5, v6, v16, mmuAttribs, 1); + break; + case 0x100000u: + v17 = KPageTable::MapL1Entries(v5, v6, v16, mmuAttribs, 0); + break; + case 0x1000000u: + v17 = KPageTable::MapL1Entries(v5, v6, v9 & 0xFFFFF000, mmuAttribs, 1); + break; + default: + v17.raw = -656406531; + break; + } + if (v17.IsError()) { + goto LABEL_28; + } + v9 -= v16; + v5 += v16 << 12; + v6 += v16 << 12; + } + } + } + KPageTable::MergeContiguousEntries(va); + if (numPages > 1) { + KPageTable::MergeContiguousEntries(va + (numPages << 12) - 4096); + } + if (pa - 0x20000000 < 0x10000000) { + // KLightScopedMutex m{m_page_manager.GetMutex()}; + m_page_manager->IncrefPages(pa - 0x40000000, numPages); + } + return RESULT_SUCCESS; +} + +ResultCode KPageTable::MergeContiguousEntries(u32 va) { + int L1EntryType; // r0 + u32 v7; // r5 + unsigned int v8; // r0 + int v9; // r1 + bool v10; // zf + unsigned int v11; // r4 + int v12; // r1 + int v13; // r8 + int v14; // r2 + u32 v15; // r0 + int v16; // r1 + unsigned int v17; // r9 + int v18; // r1 + __int16 v19; // r0 + unsigned int v20; // r2 + bool v21; // zf + unsigned int v22; // r3 + u32 v23; // r0 + unsigned int v24; // r2 + bool v25; // zf + unsigned int v26; // r3 + KLevel2TranslationTable* rootNode; // r2 + u32 v28; // r2 + unsigned int v29; // r12 + int v30; // r0 + __int16 v31; // r0 + unsigned int v32; // r3 + u32 v33; // r0 + int v34; // r2 + unsigned int i; // r1 + u32 v36; // r5 + int v37; // r2 + u32 v38; // r6 + int v40; // [sp+0h] [bp-28h] + + ResultCode v4{0xE0A01BF5}; + if (m_use_small_pages) { + return ResultCode{0xE0A01BF5}; + } + + u32 L1Entry = m_l1_table[va >> 20]; + L1EntryType = L1Entry & 0x40003; + if ((L1Entry & 0x40003) != 1) { + if (L1EntryType == 2) { + LABEL_48: + v28 = va & 0xFF000000; + v29 = L1Entry & 0xFF000000; + v30 = 0; + while (((v29 + (v30 << 20)) | (L1Entry & 0xFFFFF)) == + m_l1_table[(v28 + (v30 << 20)) >> 20]) { + if ((unsigned int)++v30 >= 0x10) { + v31 = (L1Entry & 0xC) | ((u16)(L1Entry & 0x7000) >> 6) | + ((L1Entry & 0x38C00) >> 6) | ((unsigned __int8)(L1Entry & 0x10) >> 4); + v32 = + v29 | (v31 & 0xC) | 0x40002 | + (((((u16)(L1Entry & 0x7000) >> 6) & 0xFF0) | + (((L1Entry & 0x38C00) >> 6) & 0xFF0) | ((u8)(L1Entry & 0x10) >> 4) & 0xF0) + << 6) | + (16 * (v31 & 1)); + v33 = v28 >> 20; + v34 = 0; + for (i = 0; i < 0x10; i += 2) { + v36 = v33 + v34; + v37 = v34 + 1; + v38 = v33 + v37; + m_l1_table[v36] = v32; + v34 = v37 + 1; + m_l1_table[v38] = v32; + } + KPageTable::CleanDataCacheRange((u32)&m_l1_table[v33], 0x40u); + return 0; + } + } + return v4; + } + if (L1EntryType != 262145) + return v4; + } + v7 = (L1Entry >> 10 << 10) + 0xC0000000; + v8 = *(u32*)(v7 + ((va & 0xFF000) >> 10)); + v9 = v8 & 3; + if (v9 == 1) { + LABEL_16: + va = va >> 20 << 20; + v17 = v8 >> 20 << 20; + v18 = 0; + while (((v17 + (v18 << 16)) | (unsigned __int16)v8) == + *(_DWORD*)(v7 + (((va + (v18 << 16)) & 0xFF000) >> 10))) { + if ((unsigned int)++v18 >= 0x10) { + v19 = v8 & 0xE3C | ((unsigned __int16)(v8 & 0x7000) >> 6) | + ((unsigned __int16)(v8 & 0x8000) >> 15); + v40 = v19 & 0xC | 2 | ((v19 & 0xFF0) << 6) | (16 * (v19 & 1)); + this->L1Table[va >> 20] = v17 | v40; + KPageTable::CleanDataCacheRange((u32) & this->L1Table[va >> 20], 4u); + KPageTable::InvalidateAllTlbEntries(this); + if (v7 + 0x20000000 < 0x10000000) { + while (1) { + // KScopedLightMutex m{g_level2TtblAllocator.mutex}; + v23 = (v7 - g_level2TtblAllocator.baseRegionStart) >> 10; + if (!--g_level2TtblAllocator.refcounts[v23]) + break; + } + + // KScopedLightMutex m{g_level2TtblAllocator.mutex}; + + if (v7) { + *(u32*)v7 = 0; + *(u32*)(v7 + 4) = 0; + } + rootNode = g_level2TtblAllocator.rootNode; + if (g_level2TtblAllocator.rootNode) { + *(_DWORD*)(v7 + 4) = g_level2TtblAllocator.rootNode; + rootNode->next->prev = (KLevel2TranslationTable*)v7; + *(_DWORD*)v7 = rootNode->next; + rootNode->next = (KLevel2TranslationTable*)v7; + } else { + *(_DWORD*)(v7 + 4) = v7; + *(_DWORD*)v7 = v7; + } + g_level2TtblAllocator.rootNode = (KLevel2TranslationTable*)v7; + ++g_level2TtblAllocator.numAvailable; + } + v4 = 0; + L1Entry = v17 | v40; + goto LABEL_48; + } + } + } else { + v10 = v9 == 2; + if (v9 != 2) + v10 = v9 == 3; + if (v10) { + va = HIWORD(va) << 16; + v11 = HIWORD(v8) << 16; + v12 = 0; + while (((v11 + (v12 << 12)) | v8 & 0xFFF) == + *(_DWORD*)(v7 + (((va + (v12 << 12)) & 0xFF000) >> 10))) { + if ((unsigned int)++v12 >= 0x10) { + v13 = v8 & 0xC | v8 & 0xE30 | 1 | ((v8 & 1) << 15) | ((v8 & 0x1C0) << 6); + v14 = v11 | v13; + v15 = v7 + ((va & 0xFF000) >> 10) - 4; + v16 = 8; + do { + *(_DWORD*)(v15 + 4) = v14; + --v16; + *(_DWORD*)(v15 + 8) = v14; + v15 += 8; + } while (v16); + KPageTable::CleanDataCacheRange(v7 + ((va & 0xFF000) >> 10), 0x40u); + v8 = v11 | v13; + v4 = 0; + goto LABEL_16; + } + } + } + } + return 0; +} + +ResultCode KPageTable::MapNewlyAllocatedPhysicalAddressRange(KPageTable* this, u32 va, u32 pa, + u32 numPages, u32* mmuAttrbis) { + Result result; // r0 + u32 v6; // r2 + unsigned int v7; // r0 + u32 v8; // r8 + int v9; // r11 + unsigned int v10; // r2 + u32 v11; // r0 + int v12; // r10 + unsigned int v13; // r5 + u32 v14; // r1 + unsigned int v15; // r6 + u32 v16; // r9 + unsigned int v17; // r7 + int v18; // r0 + unsigned int v19; // r0 + u32 v20; // r4 + u32 v21; // r2 + int v22; // r0 + unsigned int v23; // r12 + Result v24; // r0 + unsigned int v25; // r1 + u32 v26; // r8 + int v27; // r11 + int v28; // r10 + unsigned int i; // r5 + unsigned int v30; // r6 + u32 v31; // r9 + unsigned int v32; // r7 + unsigned int v33; // r1 + bool v34; // zf + unsigned int v35; // r3 + int v36; // r0 + unsigned int v37; // r0 + u32 v38; // r4 + int v39; // r0 + int v40; // [sp+0h] [bp-68h] + int vaa; // [sp+4h] [bp-64h] + u32 v44; // [sp+Ch] [bp-5Ch] + int v45; // [sp+10h] [bp-58h] + int v46; // [sp+14h] [bp-54h] + u32 v47; // [sp+14h] [bp-54h] + u32 v48; // [sp+18h] [bp-50h] + u32 v49; // [sp+1Ch] [bp-4Ch] + u32 v50; // [sp+20h] [bp-48h] + u32 v51; // [sp+24h] [bp-44h] + u32 v52; // [sp+28h] [bp-40h] + Result v53; // [sp+2Ch] [bp-3Ch] + + if (this->onlyUseSmallPages || + numPages < 0x10) { // most complicated mem managment function so far + result = KPageTable::MapL2Entries(this, va, pa, numPages, mmuAttrbis, 0); + if (result < 0) + return result; + goto LABEL_54; + } + v6 = pa >> 12; + v7 = 4096; + v8 = v6 + numPages; + v52 = v6; + while (((v6 + v7 - 1) & ~(v7 - 1)) >= (v8 & ~(v7 - 1))) + v7 >>= 4; + v50 = v6; + v9 = (v6 + v7 - 1) & ~(v7 - 1); + v10 = v7 << 12; + v11 = va >> 12; + v12 = v9; + v13 = v10 >> 12; + v14 = numPages + (va >> 12); + v51 = v11; + while (((v11 + v13 - 1) & ~(v13 - 1)) >= (v14 & ~(v13 - 1))) + v13 >>= 4; + v48 = v11; + v45 = (v11 + v13 - 1) & ~(v13 - 1); + v15 = v13 & 0xFFFFF; + v49 = v14; + v46 = v45; + while (1) { + LABEL_12: + if (v49 - v45 >= v13) { + v16 = v45 << 12; + v17 = (v49 & ~(v13 - 1)) - v45; + v45 = v49 & ~(v13 - 1); + goto LABEL_14; + } + if (v46 - v48 >= v13) + break; + LABEL_18: + v19 = v13 << 12; + if (v13 << 12 == 4096) { + LABEL_54: + KPageTable::MergeContiguousEntries(this, va); + if (numPages > 1) + KPageTable::MergeContiguousEntries(this, va + (numPages << 12) - 4096); + if (pa - 0x20000000 < 0x10000000) { + v33 = __ldrex((unsigned int*)&g_memoryManager.pgMgr.mutex); + v34 = v33 == 0; + if (v33) + v35 = __strex(v33, (unsigned int*)&g_memoryManager.pgMgr.mutex); + else + v35 = __strex((unsigned int)current.clc.current.thread, + (unsigned int*)&g_memoryManager.pgMgr.mutex); + if (!v33) + v34 = v35 == 0; + if (!v34) + KLightMutex::LockImpl(&g_memoryManager.pgMgr.mutex); + __mcr(15, 0, 0, 7, 10, 5); + KPageManager::IncrefPages(&g_memoryManager.pgMgr, pa + 0xC0000000, numPages); + __mcr(15, 0, 0, 7, 10, 5); + g_memoryManager.pgMgr.mutex.lockingThread = 0; + if (g_memoryManager.pgMgr.mutex.numWaiters > 0) + KLightMutex::UnlockImpl(&g_memoryManager.pgMgr.mutex); + } + return 0; + } + v13 = HIWORD(v19); + v15 = HIWORD(v19); + } + v18 = (v48 + v13 - 1) & ~(v13 - 1); + v17 = v46 - v18; + v46 = v18; + v16 = v18 << 12; +LABEL_14: + if (!v17) + goto LABEL_18; + while (1) { + v20 = v17; + if (v15 > v8 - v9) { + if (v15 > v12 - v50) + goto LABEL_48; + v22 = (v50 + v15 - 1) & ~(v15 - 1); + if (v17 > v12 - v22) + v20 = v12 - v22; + v12 -= v20; + v21 = v12 << 12; + } else { + v21 = v9 << 12; + if (v17 > (v8 & ~(v15 - 1)) - v9) + v20 = (v8 & ~(v15 - 1)) - v9; + v9 += v20; + } + if (!v20) { + LABEL_48: + v13 = v15 << 12 >> 16; + v15 = v13; + goto LABEL_49; + } + v23 = v15 << 12; + if (v15 << 12 == 4096) { + v24 = KPageTable::MapL2Entries(this, v16, v21, v20, mmuAttrbis, 0); + } else if (v23 == 0x10000) { + v24 = KPageTable::MapL2Entries(this, v16, v21, v20, mmuAttrbis, 1); + } else if (v23 == 0x100000) { + v24 = KPageTable::MapL1Entries(this, v16, v21, v20, mmuAttrbis, 0); + } else { + v24 = v23 == 0x1000000 ? KPageTable::MapL1Entries(this, v16, v21, v20, mmuAttrbis, 1) + : -656406531; + } + v53 = v24; + if (v24 < 0) + break; + v16 += v20 << 12; + v17 -= v20; + LABEL_49: + if (!v17) + goto LABEL_12; + } + v25 = 4096; + v26 = v52 + numPages; + while (((v52 + v25 - 1) & ~(v25 - 1)) >= (v26 & ~(v25 - 1))) + v25 >>= 4; + v27 = (v52 + v25 - 1) & ~(v25 - 1); + v28 = v27; + for (i = v25 & 0xFFFFF; ((v51 + i - 1) & ~(i - 1)) >= ((v51 + numPages) & ~(i - 1)); i >>= 4) + ; + v40 = (v51 + i - 1) & ~(i - 1); + v30 = i & 0xFFFFF; + v44 = v51 + numPages; + vaa = v40; + v47 = v16; + while (2) { + while (2) { + if (v44 - v40 >= i) { + v31 = v40 << 12; + v32 = (v44 & ~(i - 1)) - v40; + v40 = v44 & ~(i - 1); + goto LABEL_46; + } + if (vaa - v51 >= i) { + v36 = (v51 + i - 1) & ~(i - 1); + v32 = vaa - v36; + vaa = v36; + v31 = v36 << 12; + LABEL_46: + if (v32) { + LABEL_72: + v38 = v32; + if (v30 <= v26 - v27) { + if (v32 > (v26 & ~(v30 - 1)) - v27) + v38 = (v26 & ~(v30 - 1)) - v27; + v27 += v38; + goto LABEL_80; + } + if (v30 > v28 - v52) + goto LABEL_85; + v39 = (v52 + v30 - 1) & ~(v30 - 1); + if (v32 > v28 - v39) + v38 = v28 - v39; + v28 -= v38; + LABEL_80: + if (v38) { + if (v47 == v31) + return v53; + KPageTable::UnmapEntries(this, v31, v38, 0); + v31 += v38 << 12; + v32 -= v38; + } else { + LABEL_85: + i = v30 << 12 >> 16; + v30 = i; + } + if (!v32) + continue; + goto LABEL_72; + } + } + break; + } + v37 = i << 12; + if (i << 12 != 4096) { + i = HIWORD(v37); + v30 = HIWORD(v37); + continue; + } + return v53; + } +} + +ResultCode KPageTable::RemapMemoryInterprocess(KPageTable* dstPgTbl, KPageTable* srcPgTbl, + u32 dstAddr, u32 srcAddr, u32 numPages, + KMemoryState dstMemState, + KMemoryPermission dstMemPerms) { + // Create a page group from the source address space. + KPageGroup group{m_kernel, m_page_manager}; + R_TRY(srcPgTbl->MakePageGroup(group, srcAddr, numPages)); + + // Map to the destination address space. + R_RETURN(dstPgTbl->OperateOnGroup(dstAddr, std::addressof(group), dstMemState, dstMemPerms, + KMemoryUpdateFlags::None)); +} + +struct KTranslationTableTraversalContext { + u32* L1Entry; + u32* L2Entry; +}; + +struct KTranslationTableIterator { + u32 pa; + u32 size; +}; + +ResultCode KPageTable::ChangePageAttributes(u32 addr, u32 size, u32* mmuAttribs) { + bool v8; // zf + bool v9; // zf + u32 v10; // r1 + u32 v11; // r12 + u32 v12; // r3 + u32 v13; // r0 + u32 v14; // r1 + int v15; // r2 + u32 v16; // r9 + int v17; // r2 + u32 v18; // r10 + u32 v19; // r0 + u32 v20; // r9 + u32 v21; // r8 + u32 v22; // r0 + u32 v23; // r2 + u32 v24; // r1 + unsigned int v25; // r0 + u32 i; // r1 + bool v27; // zf + u32 v28; // r1 + KTranslationTableIterator v30; // [sp+0h] [bp-30h] BYREF + KTranslationTableTraversalContext traversalContext; // [sp+8h] [bp-28h] BYREF + + u32 v6 = addr; + KTranslationTableIterator::CreateFromEntry(&this->L1Table, &v30, &traversalContext, addr); + if ((v30.pa & (v30.size - 1)) != 0) { + KPageTable::SplitContiguousEntries(this, v6, v30.pa & -v30.pa); + KTranslationTableIterator::CreateFromEntry(&this->L1Table, &v30, &traversalContext, v6); + } + while (size) { + if (!v30.pa) + kernelpanic(); + if (v30.size > size << 12) { // split if resizing + KPageTable::SplitContiguousEntries(this, v6, size << 12); + KTranslationTableIterator::CreateFromEntry(&this->L1Table, &v30, &traversalContext, v6); + } + v8 = v30.size == 0x1000; + if (v30.size != 0x1000) + v8 = v30.size == 0x10000; + if (v8) { + v20 = v6; + v21 = (this->L1Table[v6 >> 20] >> 10 << 10) - 0x40000000; + while (1) { + if (v30.size == 0x10000) + v22 = *mmuAttribs & 0xE30 | 1 | *mmuAttribs & 0xC | ((*mmuAttribs & 1) << 15) | + ((*mmuAttribs & 0x1C0) << 6); + else + v22 = *mmuAttribs | 2; + v23 = v30.pa | v22; + v24 = v30.size >> 12; + if (v30.size >> 12) { + v25 = v21 + ((v6 & 0xFF000) >> 10) - 4; + if ((v30.size & 0x1000) != 0) { + *(_DWORD*)(v21 + ((v6 & 0xFF000) >> 10)) = v23; + v25 = v21 + ((v6 & 0xFF000) >> 10); + } + for (i = v24 >> 1; i; v25 += 8) { + *(_DWORD*)(v25 + 4) = v23; + --i; + *(_DWORD*)(v25 + 8) = v23; + } + } + if (KPageTable::MergeContiguousEntries(this, v6) >= 0) { + KTranslationTableIterator::CreateFromEntry(&this->L1Table, &v30, + &traversalContext, v6); + v28 = v30.pa & (v30.size - 1); + v6 += v30.size - v28; + if (size > (v30.size - v28) >> 12) + size -= (v30.size - v28) >> 12; + else + size = 0; + } else { + v6 += v30.size; + size -= v30.size >> 12; + } + v27 = v6 << 12 == 0; + if (v6 << 12) + v27 = size == 0; + if (v27) + break; + KTranslationTableIterator::Advance(&this->L1Table, &v30, &traversalContext); + if (!v30.pa) + kernelpanic(); + if (v30.size > size << 12) { + KPageTable::SplitContiguousEntries(this, v6, size << 12); + KTranslationTableIterator::CreateFromEntry(&this->L1Table, &v30, + &traversalContext, v6); + } + } + KPageTable::CleanDataCacheRange(v21 + ((v20 & 0xFF000) >> 10), 4 * ((v6 - v20) >> 12)); + } else { + v9 = v30.size == 0x100000; + if (v30.size != 0x100000) + v9 = v30.size == 0x1000000; + if (v9) { + v10 = *mmuAttribs & 0xC; + if (v30.size == 0x1000000) + v10 |= 0x40000u; + v11 = v30.pa | v10 | 2 | ((*mmuAttribs & 0xFF0) << 6) | (16 * (*mmuAttribs & 1)); + v12 = v30.size >> 20; + v13 = v6 >> 20; + if (v30.size >> 20) { + v14 = (v30.size & 0x100000) != 0; + v15 = 0; + if (v14 == 1) { + v15 = 1; + this->L1Table[v13] = v11; + } + for (; v14 < v12; this->L1Table[v18] = v11) { + v16 = v13 + v15; + v17 = v15 + 1; + v18 = v13 + v17; + this->L1Table[v16] = v11; + v14 += 2; + v15 = v17 + 1; + } + } + KPageTable::CleanDataCacheRange((u32) & this->L1Table[v13], 4 * (v30.size >> 20)); + if (KPageTable::MergeContiguousEntries(this, v6) >= 0) { + KTranslationTableIterator::CreateFromEntry(&this->L1Table, &v30, + &traversalContext, v6); + v19 = v30.size - (v30.pa & (v30.size - 1)); + v6 += v19; + if (size > v19 >> 12) + size -= v19 >> 12; + else + size = 0; + } else { + v6 += v30.size; + size -= v30.size >> 12; + } + } + } + KTranslationTableIterator::Advance(&this->L1Table, &v30, &traversalContext); + } + return 0; +} + +ResultCode KPageTable::CheckAndUnmapPageGroup(u32 addr, KPageGroup* pgGroup) { + // Make a new page group for the region. + KPageGroup group{m_kernel, m_page_manager}; + R_TRY(this->MakePageGroup(group, addr, pgGroup->GetTotalNumPages())); + + // Ensure the new group is equivalent to the provided one. + R_UNLESS(pgGroup->IsEquivalentTo(group), ResultCode{0xE0A01835}); + + // Unmap the pages in the group. + R_RETURN(this->OperateOnGroup(addr, pgGroup, KMemoryState::Free, KMemoryPermission::None, + KMemoryUpdateFlags::None)); +} + +ResultCode KPageTable::CreateAlias(u32 src_addr, u32 dst_addr, u32 num_pages, + KMemoryState expected_state_src, + KMemoryPermission expected_min_perms_src, + KMemoryState new_state_src, KMemoryPermission new_perms_src, + KMemoryState new_state_dst, KMemoryPermission new_perms_dst) { + // Check the source memory block attributes match expected values. + R_TRY(this->CheckMemoryBlockAttributes(src_addr, num_pages << Memory::CITRA_PAGE_BITS, + expected_state_src, expected_min_perms_src)); + + // Check the destination memory block attributes match expected values. + R_TRY(this->CheckMemoryBlockAttributes(dst_addr, num_pages << Memory::CITRA_PAGE_BITS, + KMemoryState::Free, KMemoryPermission::None)); + + // Create a page group with the pages of the source range to alias. + KPageGroup group{m_kernel, m_page_manager}; + R_TRY(this->MakePageGroup(group, src_addr, num_pages)); + + // Update the source and destination region attributes. + R_ASSERT(this->OperateOnGroup(src_addr, std::addressof(group), new_state_src, new_perms_src, + KMemoryUpdateFlags::StateAndPerms)); + R_ASSERT(this->OperateOnGroup(dst_addr, std::addressof(group), new_state_dst, new_perms_dst, + KMemoryUpdateFlags::None)); + R_SUCCEED(); +} + +ResultCode KPageTable::DestroyAlias(u32 src_addr, u32 dst_addr, u32 num_pages, + KMemoryState expected_state_src, + KMemoryPermission expected_min_perms_src, + KMemoryState expected_state_dst, + KMemoryPermission expected_min_perms_dst, + KMemoryState new_state_src, KMemoryPermission new_perms_src) { + // Check the source memory block attributes match expected values. + R_TRY(this->CheckMemoryBlockAttributes(src_addr, num_pages << Memory::CITRA_PAGE_BITS, + expected_state_src, expected_min_perms_src)); + + // Check the destination memory block attributes match expected values. + R_TRY(this->CheckMemoryBlockAttributes(dst_addr, num_pages << Memory::CITRA_PAGE_BITS, + expected_state_dst, expected_min_perms_dst)); + + // Create a page group with the pages of the source range. + KPageGroup dst_group{m_kernel, m_page_manager}; + R_TRY(this->MakePageGroup(dst_group, dst_addr, num_pages)); + + // Create a page group with the pages of the destination range. + KPageGroup src_group{m_kernel, m_page_manager}; + R_TRY(this->MakePageGroup(src_group, src_addr, num_pages)); + + // Ensure ranges are equivalent. + R_UNLESS(src_group.IsEquivalentTo(dst_group), ResultCode{0xD8E007F5}); + + // Mark the aliased range as free. + R_ASSERT(this->OperateOnGroup(dst_addr, std::addressof(dst_group), KMemoryState::Free, + KMemoryPermission::None, KMemoryUpdateFlags::None)); + + // Update state and permissions of the source range. + R_ASSERT(this->OperateOnGroup(src_addr, std::addressof(src_group), new_state_src, new_perms_src, + KMemoryUpdateFlags::StateAndPerms)); + + R_SUCCEED(); +} + +void KPageTable::Unmap(VAddr addr, u32 num_pages) { + // Create a page group with the pages to unmap. + KPageGroup group{m_kernel, m_page_manager}; + this->UnmapEntries(addr, num_pages, std::addressof(group)); + + // Invalidate TLB(?) + if (this->m_is_kernel && num_pages == 1) { + this->InvalidateTlbEntryByMva(addr); + } else { + this->InvalidateAllTlbEntries(); + } + + // Iterate over the blocks and free them. + for (const auto& block : group) { + // KLightScopedMutex m{m_mutex}; + m_page_manager->FreeContiguous(block.GetAddress(), block.GetNumPages(), + MemoryOperation::None); + } +} + +ResultCode KPageTable::OperateOnGroup(VAddr addr, KPageGroup* group, KMemoryState state, + KMemoryPermission perms, KMemoryUpdateFlags update_flags) { + // Ensure provided page group is not empty. + R_UNLESS(!group->empty(), ResultCode{0x82007FF}); + + // Iterate the blocks and operate on them. + for (auto& block : *group) { + R_TRY(this->Operate(addr, block.GetNumPages(), block.GetAddress() - 0xC0000000, state, + perms, update_flags, MemoryOperation::RegionBase)); + addr += block.GetSize(); + } + + R_SUCCEED(); +} + +ResultCode KPageTable::OperateOnAnyFreeBlockInRegionWithGuardPage( + u32* outAddr, u32 blockNumPages, u32 regionStart, u32 regionNumPages, u32 pa, + KMemoryState state, KMemoryPermission perms, KMemoryUpdateFlags updateFlags, + MemoryOperation region) { + u32 block_addr = regionStart; + const u32 block_size = blockNumPages << Memory::CITRA_PAGE_BITS; + while (true) { + // KLightScopedMutex m{m_mutex}; + KMemoryBlock* free_block = m_memory_block_manager.FindFreeBlockInRegion( + regionStart, regionNumPages, blockNumPages + 1); + if (!free_block) { + break; + } + const KMemoryInfo info = free_block->GetInfo(); + block_addr = std::max(info.GetAddress(), block_addr); + // m.Unlock(); + if (info.GetLastAddress() > block_addr + block_size + Memory::CITRA_PAGE_SIZE) { + const ResultCode result = + this->Operate(block_addr + Memory::CITRA_PAGE_SIZE, blockNumPages, pa, state, perms, + updateFlags, region); + if (result.IsSuccess()) { + *outAddr = block_addr + Memory::CITRA_PAGE_SIZE; + return result; + } + if (result != 0xD900060C) + kernelpanic(); + blkAddr = regionStart; + } + } + return ERR_OUT_OF_MEMORY; +} + +ResultCode KPageTable::Operate(VAddr va, u32 numPages, PAddr pa, KMemoryState state, + KMemoryPermission perms, KMemoryUpdateFlags updateFlags, + u32 region) { + u32 v10; // r7 + u32 baseAddress; // r10 + KMemoryUpdateFlags v12; // r0 + int v13; // r5 + __int32 v14; // r8 + __int32 v15; // r2 + __int32 v16; // r1 + bool v17; // zf + int v18; // r0 + Result result; // r0 + BOOL v20; // r0 + bool v21; // zf + bool v22; // zf + __int32 v23; // r0 + bool v24; // zf + unsigned int v25; // r1 + bool v26; // zf + unsigned int v27; // r2 + KMemoryState v28; // r5 + u32 endArgVa; // r9 + KMemoryBlock* v30; // r1 + Result v31; // r11 + KPageTable* v32; // r0 + bool v33; // zf + Result v34; // r0 + Result v35; // r0 + KPageTable* v36; // r0 + u32 v37; // r5 + u32 currentAllocVa; // r10 + unsigned int v39; // r2 + bool v40; // zf + unsigned int v41; // r2 + u32 v42; // r0 + KPageHeapBlock* Backwards; // r5 + u32 remainingAllocBase; // r9 + u32 v45; // r11 + u32 v46; // r7 + u32 v47; // r0 + u32 v48; // r6 + u32 v49; // r3 + u32 v50; // r12 + u32 v51; // r12 + Result v52; // r0 + KMemoryBlock* MemoryBlockContainingAddr; // r1 + KMemoryPermission size[2]; // [sp+0h] [bp-58h] BYREF + u32 mmuAttrbis[2]; // [sp+8h] [bp-50h] BYREF + KMemoryInfo info; // [sp+10h] [bp-48h] BYREF + + v10 = (char)updateFlags; + baseAddress = g_permsToMmuAttribLut[perms & 3 | ((unsigned __int8)(perms & 0x18) >> 1)]; + mmuAttrbis[0] = 0; + v12 = updateFlags << 16; + if (this->isKernel) + v13 = 0; + else + v13 = 0x800; + v14 = v12 >> 24; + if (v12 >> 24 && !(_BYTE)updateFlags) { + R_TRY(this->QueryInfo(std::addressof(info), (u32*)size, va)); + R_UNLESS(True(info.m_state & KMemoryState::FlagDeallocatable), ResultCode{0xD8E007F5}); + state = info.m_state; + v20 = (perms & 0x24) == 0; + goto LABEL_21; + } + v15 = state - KMEMSTATE_LINEAR; + if (state == KMEMSTATE_LINEAR) + goto LABEL_50; + if (state > KMEMSTATE_LINEAR) // Anyway it's just a switch(memstate) + { + if (state == KMEMSTATE_PRIVATE_DATA) + goto LABEL_50; + if (v15 > 0x81FE) { + v24 = state == KMEMSTATE_PRIVATE_CODE; + if (state != KMEMSTATE_PRIVATE_CODE) + v24 = v15 == 0x8303; + if (!v24) + return 0xF8C007F4; + } else { + v23 = state - 0x3A08; + v22 = v15 == 0x101; + if (state != KMEMSTATE_ALIASED) { + v23 = state - 0x5806; + v22 = v15 == 0x1EFF; + } + if (v22) + goto LABEL_50; + if (v23 != 17412) + return 0xF8C007F4; + } + v20 = (((unsigned int)perms >> 2) & 1) == 0; + LABEL_21: + v18 = v20 | baseAddress | v13 | 0x584; // code + goto LABEL_39; + } + v16 = state - 0x1003; + if (state == 0x1003) + goto LABEL_50; + if (state > 0x1003) { + v21 = v16 == 2566; + if (state != 0x1A09) + v21 = v16 == 0x2808; + if (!v21) + return 0xF8C007F4; + LABEL_50: + v18 = baseAddress | v13 | 0x585; // data + goto LABEL_39; + } + v17 = state == MEMSTATE_FREE; + if (state) + v17 = state == MEMSTATE_RESERVED; + if (v17) { + perms = KMEMPERM_NONE; + goto LABEL_40; + } + if (state != KMEMSTATE_IO) + return 0xF8C007F4; + v18 = baseAddress | v13 | 0x405; // io +LABEL_39: + mmuAttrbis[0] = v18; +LABEL_40: + // KLightScopedMutex m{m_mutex}; + v28 = 0xE0E01BF5; + endArgVa = va + (numPages << 12); + if (!mmuAttrbis[0]) { + if (state == MEMSTATE_FREE) { + KPageTable::Unmap(this, va, numPages); + goto LABEL_146; + } + MemoryBlockContainingAddr = + KMemoryBlockManager::GetMemoryBlockContainingAddr(&this->memoryBlockMgr, va); + if (MemoryBlockContainingAddr) { + KMemoryBlock::GetInfo(&info, MemoryBlockContainingAddr); + v14 = info.state; + v28 = MEMSTATE_FREE; + region = info.baseAddress; + v10 = info.size; + } else { + KMemoryBlockManager::DumpInfoMaybe_stubbed(&this->memoryBlockMgr); + } + if (v28 >= MEMSTATE_FREE) { + v28 = 0xE0A01BF5; + if (v14) { + return v28; + } + if (region + v10 < endArgVa) { + return v28; + } + goto LABEL_146; + } + return v28; + } + if (!(_BYTE)updateFlags) { + v30 = KMemoryBlockManager::GetMemoryBlockContainingAddr(&this->memoryBlockMgr, va); + if (v30) { + KMemoryBlock::GetInfo(&info, v30); + baseAddress = info.baseAddress; + v28 = info.state; + v31 = 0; + size[0] = (KMemoryPermission)info.size; + } else { + KMemoryBlockManager::DumpInfoMaybe_stubbed(&this->memoryBlockMgr); + v31 = 0xE0E01BF5; + } + if (v31 >= 0) { + v31 = 0xD900060C; + v33 = v28 == MEMSTATE_FREE; + if (v28) + v33 = v28 == MEMSTATE_RESERVED; + if (!v33) { + if (!v14) { + v32 = this; + __mcr(15, 0, 0, 7, 10, 5); + this->mutex.lockingThread = 0; + if (this->mutex.numWaiters <= 0) + return v31; + goto LABEL_56; + } + state = v28; + } + if (size[0] + baseAddress - 1 >= endArgVa - 1) + goto LABEL_68; + } + return v31; + } +LABEL_68: + if (v14) { + v34 = KPageTable::ChangePageAttributes(this, va, numPages, mmuAttrbis); + if (v34 < 0) { + v28 = v34; + return v28; + } + goto LABEL_131; + } + if ((_BYTE)updateFlags) + goto LABEL_131; + if (!pa) { + if (state == KMEMSTATE_LINEAR) { // linear alloc can't be done by this function directly + v28 = 0xD90007EE; + v36 = this; + return v28; + } + v37 = numPages; + currentAllocVa = va; + + // KLightScopedMutex m{m_page_manager.GetMutex()}; + if ((region & 0x80000000) != 0) { + m_page_manager->GetKernelMemoryUsage() += (v37 << 12); + } + v42 = region & 0xF00; + switch (v42) { + case MEMOP_REGION_APPLICATION: + Backwards = KPageHeap::AllocateBackwards(&g_memoryManager.applicationHeap, v37); + __mcr(15, 0, 0, 7, 10, 5); + g_memoryManager.pgMgr.mutex.lockingThread = 0; + if (g_memoryManager.pgMgr.mutex.numWaiters <= 0) + goto LABEL_99; + break; + case MEMOP_REGION_SYSTEM: + Backwards = KPageHeap::AllocateBackwards(&g_memoryManager.systemHeap, v37); + __mcr(15, 0, 0, 7, 10, 5); + g_memoryManager.pgMgr.mutex.lockingThread = 0; + if (g_memoryManager.pgMgr.mutex.numWaiters <= 0) + goto LABEL_99; + break; + case MEMOP_REGION_BASE: + Backwards = KPageHeap::AllocateBackwards(&g_memoryManager.baseHeap, v37); + __mcr(15, 0, 0, 7, 10, 5); + g_memoryManager.pgMgr.mutex.lockingThread = 0; + if (g_memoryManager.pgMgr.mutex.numWaiters <= 0) { + LABEL_99: + if (Backwards) { + remainingAllocBase = numPages; + v45 = region & 0xF00; + while (1) { + v46 = Backwards->numPages; + v47 = (u32)Backwards; + v48 = (u32)Backwards; + Backwards = Backwards->link.next; + switch (v45) { + case MEMOP_REGION_APPLICATION: + v49 = v47 + (v46 << 12); + if (v46 >= g_memoryManager.applicationHeap.regionSize >> 12 || + v47 < g_memoryManager.applicationHeap.regionStart || v49 < v47 || + v49 > g_memoryManager.applicationHeap.regionStart + + g_memoryManager.applicationHeap.regionSize) { + LABEL_122: + kernelpanic(); + } + break; + case MEMOP_REGION_SYSTEM: + v50 = v47 + (v46 << 12); + if (v46 >= g_memoryManager.systemHeap.regionSize >> 12 || + v47 < g_memoryManager.systemHeap.regionStart || v50 < v47 || + v50 > g_memoryManager.systemHeap.regionStart + + g_memoryManager.systemHeap.regionSize) { + goto LABEL_122; + } + break; + case MEMOP_REGION_BASE: + v51 = v47 + (v46 << 12); + if (v46 >= g_memoryManager.baseHeap.regionSize >> 12 || + v47 < g_memoryManager.baseHeap.regionStart || v51 < v47 || + v51 > g_memoryManager.baseHeap.regionStart + + g_memoryManager.baseHeap.regionSize) { + goto LABEL_122; + } + break; + default: + goto LABEL_122; + } + size[0] = v46 << 12; + memset((void*)v48, 0, v46 << 12); + if (v45 != MEMOP_REGION_BASE) + KPageTable::CleanInvalidateDataCacheRange( + v48, + size[0]); // don't clean-invalidate cache for base memregion... why? + v52 = KPageTable::MapNewlyAllocatedPhysicalAddressRange( + this, currentAllocVa, + v48 - 0xC0000000, // pardon me? + v46, mmuAttrbis); + if (v52 < 0) + break; + currentAllocVa += v46 << 12; + remainingAllocBase -= v46; + if (!Backwards) { + if (remainingAllocBase) + kernelpanic(); + goto LABEL_131; + } + } + v28 = v52; + v36 = this; + __mcr(15, 0, 0, 7, 10, 5); + this->mutex.lockingThread = 0; + if (this->mutex.numWaiters <= 0) + return v28; + } else { + v28 = 0xD86007F3; + } + return v28; + } + break; + default: + kernelpanic(); + } + KLightMutex::UnlockImpl(&g_memoryManager.pgMgr.mutex); + goto LABEL_99; + } // mapping pa + v35 = KPageTable::MapContiguousPhysicalAddressRange(this, va, pa, numPages, mmuAttrbis); + if (v35 < 0) { + v28 = v35; + v36 = this; + return v28; + } +LABEL_131: + KPageTable::InvalidateAllTlbEntries(this); +LABEL_146: + KMemoryBlockManager::MutateRange(&this->memoryBlockMgr, va, numPages, state, perms, 0); + return 0; +} + +ResultCode KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, u32 num_pages) { + // KLightScopedMutex m{m_mutex}; + + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = + m_impl->BeginTraversal(std::addressof(next_entry), std::addressof(context), addr); + R_UNLESS(traverse_valid, ResultCode{0xE0A01BF5}); + + // Prepare tracking variables. + const size_t size = num_pages * Memory::CITRA_PAGE_SIZE; + PAddr cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + const auto IsFcramPhysicalAddress = [](PAddr addr) { + return addr >= Memory::FCRAM_PADDR && addr < Memory::FCRAM_N3DS_PADDR_END; + }; + + // Iterate, adding to group as we go. + while (tot_size < size) { + R_UNLESS(m_impl->ContinueTraversal(std::addressof(next_entry), std::addressof(context)), + ResultCode{0xE0A01BF5}); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + const size_t cur_pages = cur_size / Memory::CITRA_PAGE_SIZE; + + R_UNLESS(IsFcramPhysicalAddress(cur_addr), + ResultCode{0xE0A01BF5}); // "oops not in fcram" + pg.AddRange(cur_addr + 0xC0000000, cur_pages); + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + // Ensure we add the right amount for the last block. + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + // Add the last block. + const size_t cur_pages = cur_size / Memory::CITRA_PAGE_SIZE; + R_UNLESS(IsFcramPhysicalAddress(cur_addr), ResultCode{0xE0A01BF5}); + pg.AddRange(cur_addr + 0xC0000000, cur_pages); + + R_SUCCEED(); +} + +ResultCode KPageTable::QueryInfo(KMemoryInfo* out_info, u32* pageInfo, u32 addr) { + // KLightScopedMutex m{m_mutex}; + + // Find the block that contains the provided address. + KMemoryBlock* block = m_memory_block_manager.GetMemoryBlockContainingAddr(addr); + R_UNLESS(out_info && block, ERR_INVALID_ADDRESS); + + // Copy the block information to the output. + const KMemoryInfo info = block->GetInfo(); + std::memcpy(out_info, &info, sizeof(KMemoryInfo)); + + // We are finished. + *pageInfo = 0; + R_SUCCEED(); +} + +ResultCode KPageTable::CopyMemoryInterprocessForIpc(VAddr dstAddr, KPageTable* srcPgTbl, + VAddr srcAddr, u32 size) { + // Define a helper type. + struct CopyMemoryForIpcHelper { + public: + KPageTable& m_pt; + TraversalContext m_context; + TraversalEntry m_entry; + PAddr m_phys_addr; + size_t m_size; + + public: + CopyMemoryForIpcHelper(KPageTable& pt, VAddr address) : m_pt(pt) { + // Begin a traversal. + ASSERT(m_pt.GetImpl().BeginTraversal(std::addressof(m_entry), std::addressof(m_context), + address)); + + // Setup tracking fields. + m_phys_addr = m_entry.phys_addr; + m_size = m_entry.block_size - (m_phys_addr & (m_entry.block_size - 1)); + + // Consume the whole contiguous block. + this->DetermineContiguousBlockExtents(); + } + + bool IsValid() const noexcept { + return m_phys_addr == 0; + } + + void ContinueTraversal() { + // Update our remaining size. + m_remaining_size = m_remaining_size - m_cur_size; + + // Update our tracking fields. + if (m_remaining_size > 0) { + m_phys_addr = m_entry.phys_addr; + m_cur_size = std::min(m_remaining_size, m_entry.block_size); + + // Consume the whole contiguous block. + this->DetermineContiguousBlockExtents(); + } + } + + private: + }; + + // Create helpers for both tables. + CopyMemoryForIpcHelper src_helper(*srcPgTbl, srcAddr); + CopyMemoryForIpcHelper dst_helper(*this, dstAddr); + + // Validate address ranges. + const bool helper_invalid = !src_helper.IsValid() || !dst_helper.IsValid(); + R_SUCCEED_IF(helper_invalid && !size); + R_UNLESS(!helper_invalid, ResultCode{0xE0A01BF5}); + + // Traverse ranges. + while (true) { + if (dstHelper.size > srcHelper.size) + p_srcHelper = &srcHelper; + else + p_srcHelper = &dstHelper; + + if (dstHelper.size <= srcHelper.size) + srcAddr = (u32)&srcHelper; + if (dstHelper.size > srcHelper.size) + srcAddr = (u32)&dstHelper; + if (p_srcHelper->size + v7 >= size) + break; + KTranslationTableIterator::Advance(&p_srcHelper->pgTblPtr->L1Table, &ttblIterator, + &p_srcHelper->traversalContext); + v11 = p_srcHelper->size; + if (p_srcHelper->pa + v11 == ttblIterator.pa) { + p_srcHelper->size = ttblIterator.size + v11; + } else { + memcpy((void*)(dstHelper.pa + 0xC0000000), (const void*)(srcHelper.pa + 0xC0000000), + v11); + v12 = p_srcHelper->size; + *(_DWORD*)(srcAddr + 12) += v12; + v7 += v12; + *(_DWORD*)(srcAddr + 16) -= p_srcHelper->size; + *(KTranslationTableIterator*)&p_srcHelper->pa = ttblIterator; + } + } + memcpy((void*)(dstHelper.pa + 0xC0000000), (const void*)(srcHelper.pa + 0xC0000000), size - v7); + return 0; + + int v7; // r6 + bool v8; // zf + KCopyMemoryForIpcHelper* p_srcHelper; // r4 + u32 v11; // r2 + u32 v12; // r0 + KTranslationTableIterator v13; // [sp+0h] [bp-50h] BYREF + KCopyMemoryForIpcHelper srcHelper; // [sp+Ch] [bp-44h] BYREF + KTranslationTableIterator ttblIterator; // [sp+20h] [bp-30h] BYREF + KCopyMemoryForIpcHelper dstHelper; // [sp+28h] [bp-28h] BYREF + + dstHelper.pgTblPtr = this; + KTranslationTableIterator::CreateFromEntry(&this->L1Table, &v13, &dstHelper.traversalContext, + dstAddr); + dstHelper.pa = v13.pa; + dstHelper.size = v13.size - ((v13.size - 1) & v13.pa); + srcHelper.pgTblPtr = srcPgTbl; + KTranslationTableIterator::CreateFromEntry(&srcPgTbl->L1Table, &v13, + &srcHelper.traversalContext, srcAddr); + v7 = 0; + srcHelper.pa = v13.pa; + srcHelper.size = v13.size - (v13.pa & (v13.size - 1)); + v8 = dstHelper.pa == 0; + if (!v8) + v8 = srcHelper.pa == 0; + if (v8) { + if (size) + return 0xE0A01BF5; + else + return 0; + } else { + while (1) { + if (dstHelper.size > srcHelper.size) + p_srcHelper = &srcHelper; + else + p_srcHelper = &dstHelper; + if (dstHelper.size <= srcHelper.size) + srcAddr = (u32)&srcHelper; + if (dstHelper.size > srcHelper.size) + srcAddr = (u32)&dstHelper; + if (p_srcHelper->size + v7 >= size) + break; + KTranslationTableIterator::Advance(&p_srcHelper->pgTblPtr->L1Table, &ttblIterator, + &p_srcHelper->traversalContext); + v11 = p_srcHelper->size; + if (p_srcHelper->pa + v11 == ttblIterator.pa) { + p_srcHelper->size = ttblIterator.size + v11; + } else { + memcpy((void*)(dstHelper.pa + 0xC0000000), (const void*)(srcHelper.pa + 0xC0000000), + v11); + v12 = p_srcHelper->size; + *(_DWORD*)(srcAddr + 12) += v12; + v7 += v12; + *(_DWORD*)(srcAddr + 16) -= p_srcHelper->size; + *(KTranslationTableIterator*)&p_srcHelper->pa = ttblIterator; + } + } + memcpy((void*)(dstHelper.pa + 0xC0000000), (const void*)(srcHelper.pa + 0xC0000000), + size - v7); + return 0; + } +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h new file mode 100644 index 000000000..381ef32c8 --- /dev/null +++ b/src/core/hle/kernel/k_page_table.h @@ -0,0 +1,134 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_funcs.h" +#include "core/hle/kernel/k_memory_block_manager.h" +#include "core/hle/result.h" + +namespace Common { +class PageTable; +} + +namespace Kernel { + +enum class KMemoryUpdateFlags { + None = 0x0, + State = 0x1, + Perms = 0x100, + StateAndPerms = State | Perms, +}; +DECLARE_ENUM_FLAG_OPERATORS(KMemoryUpdateFlags) + +enum class MemoryOperation : u32; + +class KPageGroup; +class KPageManager; + +class KPageTable { +public: + explicit KPageTable(KernelSystem& kernel, KPageManager* page_manager) + : m_kernel{kernel}, m_page_manager{page_manager}, m_memory_block_manager{kernel} {} + ~KPageTable() = default; + + Common::PageTable& GetImpl() { + return *m_impl; + } + + void InitizalizeL1Table(u32** outL1TablePtr, u32* L1Table); + + ResultCode CheckAndUpdateAddrRangeMaskedStateAndPerms( + u32 addr, u32 num_pages, KMemoryState state_mask, KMemoryState expected_state, + KMemoryPermission min_perms, KMemoryState new_state, KMemoryPermission new_perms); + ResultCode CheckAddressRangeSizeAndState(u32 addr, u32 size, KMemoryState state); + ResultCode CheckAddressRangeSizeAndStateFlags(u32 addr, u32 size, KMemoryState stateMask, + KMemoryState expectedStateFlags); + ResultCode CheckMemoryBlockAttributes(u32 addr, u32 size, KMemoryState state, + KMemoryPermission perms); + ResultCode CheckAddrRangeMaskedStateAndPerms(u32 addr, u32 size, KMemoryState stateMask, + KMemoryState expectedState, + KMemoryPermission minPerms); + ResultCode CheckAndChangeGroupStateAndPerms(u32 addr, KPageGroup* pgGroup, + KMemoryState stateMask, KMemoryState expectedState, + KMemoryPermission minPerms, KMemoryState newState, + KMemoryPermission newPerms); + + ResultCode MapL2Entries(u32 va, u32 pa, u32 numPages_reused, u32* attribsPtr, bool isLarge); + ResultCode MapL1Entries(u32 va, u32 pa, u32 numPages, u32* attribsPtr, bool isLarge); + ResultCode MapContiguousPhysicalAddressRange(u32 va, u32 pa, u32 numPages, u32* mmuAttribs); + ResultCode MergeContiguousEntries(u32 va); + ResultCode MapNewlyAllocatedPhysicalAddressRange(u32 va, u32 pa, u32 numPages, u32* mmuAttrbis); + + ResultCode RemapMemoryInterprocess(KPageTable* dstPgTbl, KPageTable* srcPgTbl, u32 dstAddr, + u32 srcAddr, u32 numPages, KMemoryState dstMemState, + KMemoryPermission dstMemPerms); + + ResultCode ChangePageAttributes(u32 addr, u32 size, u32* mmuAttribs); + ResultCode CheckAndUnmapPageGroup(u32 addr, KPageGroup* pgGroup); + + ResultCode CreateAlias(u32 srcAddr, u32 dstAddr, u32 numPages, KMemoryState expectedStateSrc, + KMemoryPermission expectedMinPermsSrc, KMemoryState newStateSrc, + KMemoryPermission newPermsSrc, KMemoryState newStateDst, + KMemoryPermission newPermsDst); + ResultCode DestroyAlias(u32 srcAddr, u32 dstAddr, u32 numPages, KMemoryState expectedStateSrc, + KMemoryPermission expectedMinPermsSrc, KMemoryState expectedStateDst, + KMemoryPermission expectedMinPermsDst, KMemoryState newStateSrc, + KMemoryPermission newPermsSrc); + + void Unmap(u32 addr, u32 numPages); + void UnmapEntries(u32 currentVa, u32 numPages, KPageGroup* outPgGroupUnmapped); + + ResultCode OperateOnGroup(u32 addr, KPageGroup* pgGroup, KMemoryState state, + KMemoryPermission perms, KMemoryUpdateFlags updateFlags); + ResultCode OperateOnAnyFreeBlockInRegionWithGuardPage(u32* outAddr, u32 blockNumPages, + u32 regionStart, u32 regionNumPages, + u32 pa, KMemoryState state, + KMemoryPermission perms, + KMemoryUpdateFlags updateFlags, + MemoryOperation region); + ResultCode Operate(u32 va, u32 numPages, u32 pa, KMemoryState state, KMemoryPermission perms, + KMemoryUpdateFlags updateFlags, MemoryOperation region); + + ResultCode MakePageGroup(KPageGroup& pg, VAddr addr, u32 num_pages); + ResultCode QueryInfo(KMemoryInfo* outMemoryInfo, u32* pageInfo, u32 addr); + ResultCode CopyMemoryInterprocessForIpc(u32 dstAddr, KPageTable* srcPgTbl, u32 srcAddr, + u32 size); + ResultCode SplitContiguousEntries(u32 va, u32 size); + + u32 ConvertVaToPa(u32** L1TablePtr, u32 va); + + void InvalidateAllTlbEntries(); + void InvalidateEntireInstructionCache(); + void InvalidateEntireInstructionCacheLocal(); + void InvalidateTlbEntryByMva(u32 addr); + void InvalidateDataCacheRange(u32 addr, u32 size); + void InvalidateDataCacheRangeLocal(u32 addr, u32 size); + + void CleanInvalidateEntireDataCacheLocal(); + void CleanInvalidateDataCacheRangeLocal(u32 addr, u32 size); + void CleanInvalidateDataCacheRange(u32 addr, u32 size); + void CleanInvalidateInstructionCacheRange(u32 addr, u32 size); + void CleanInvalidateEntireDataCache(); + void CleanDataCacheRange(u32 addr, u32 size); + +private: + KernelSystem& m_kernel; + KPageManager* m_page_manager; + // KLightMutex mutex; + std::unique_ptr m_impl{}; + std::array m_tlb_needs_invalidating{}; + KMemoryBlockManager m_memory_block_manager; + u32 m_translation_table_base{}; + u8 m_asid{}; + bool m_is_kernel{}; + bool m_use_small_pages{}; + u32 m_address_space_start{}; + u32 m_address_space_end{}; + u32 m_linear_address_range_start{}; + u32 m_translation_table_size{}; + u32* m_l1_table{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h new file mode 100644 index 000000000..15e4a2d88 --- /dev/null +++ b/src/core/hle/kernel/k_slab_heap.h @@ -0,0 +1,191 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include + +#include "common/assert.h" +#include "common/atomic_ops.h" +#include "common/common_funcs.h" +#include "common/common_types.h" + +namespace Kernel { + +class KernelSystem; + +namespace impl { + +class KSlabHeapImpl { + CITRA_NON_COPYABLE(KSlabHeapImpl); + CITRA_NON_MOVEABLE(KSlabHeapImpl); + +public: + struct Node { + Node* next{}; + }; + +public: + constexpr KSlabHeapImpl() = default; + + void Initialize() { + ASSERT(m_head == nullptr); + } + + Node* GetHead() const { + return m_head; + } + + void* Allocate() { + Node* ret = m_head; + if (ret != nullptr) [[likely]] { + m_head = ret->next; + } + return ret; + } + + void Free(void* obj) { + Node* node = static_cast(obj); + node->next = m_head; + m_head = node; + } + +private: + std::atomic m_head{}; +}; + +} // namespace impl + +class KSlabHeapBase : protected impl::KSlabHeapImpl { + CITRA_NON_COPYABLE(KSlabHeapBase); + CITRA_NON_MOVEABLE(KSlabHeapBase); + +private: + size_t m_obj_size{}; + uintptr_t m_peak{}; + uintptr_t m_start{}; + uintptr_t m_end{}; + +private: + void UpdatePeakImpl(uintptr_t obj) { + const uintptr_t alloc_peak = obj + this->GetObjectSize(); + uintptr_t cur_peak = m_peak; + do { + if (alloc_peak <= cur_peak) { + break; + } + } while ( + !Common::AtomicCompareAndSwap(std::addressof(m_peak), alloc_peak, cur_peak, cur_peak)); + } + +public: + constexpr KSlabHeapBase() = default; + + bool Contains(uintptr_t address) const { + return m_start <= address && address < m_end; + } + + void Initialize(size_t obj_size, void* memory, size_t memory_size) { + // Ensure we don't initialize a slab using null memory. + ASSERT(memory != nullptr); + + // Set our object size. + m_obj_size = obj_size; + + // Initialize the base allocator. + KSlabHeapImpl::Initialize(); + + // Set our tracking variables. + const size_t num_obj = (memory_size / obj_size); + m_start = reinterpret_cast(memory); + m_end = m_start + num_obj * obj_size; + m_peak = m_start; + + // Free the objects. + u8* cur = reinterpret_cast(m_end); + + for (size_t i = 0; i < num_obj; i++) { + cur -= obj_size; + KSlabHeapImpl::Free(cur); + } + } + + size_t GetSlabHeapSize() const { + return (m_end - m_start) / this->GetObjectSize(); + } + + size_t GetObjectSize() const { + return m_obj_size; + } + + void* Allocate() { + void* obj = KSlabHeapImpl::Allocate(); + return obj; + } + + void Free(void* obj) { + // Don't allow freeing an object that wasn't allocated from this heap. + const bool contained = this->Contains(reinterpret_cast(obj)); + ASSERT(contained); + KSlabHeapImpl::Free(obj); + } + + size_t GetObjectIndex(const void* obj) const { + return (reinterpret_cast(obj) - m_start) / this->GetObjectSize(); + } + + size_t GetPeakIndex() const { + return this->GetObjectIndex(reinterpret_cast(m_peak)); + } + + uintptr_t GetSlabHeapAddress() const { + return m_start; + } + + size_t GetNumRemaining() const { + // Only calculate the number of remaining objects under debug configuration. + return 0; + } +}; + +template +class KSlabHeap final : public KSlabHeapBase { +private: + using BaseHeap = KSlabHeapBase; + +public: + constexpr KSlabHeap() = default; + + void Initialize(void* memory, size_t memory_size) { + BaseHeap::Initialize(sizeof(T), memory, memory_size); + } + + T* Allocate() { + T* obj = static_cast(BaseHeap::Allocate()); + + if (obj != nullptr) [[likely]] { + std::construct_at(obj); + } + return obj; + } + + T* Allocate(KernelSystem& kernel) { + T* obj = static_cast(BaseHeap::Allocate()); + + if (obj != nullptr) [[likely]] { + std::construct_at(obj, kernel); + } + return obj; + } + + void Free(T* obj) { + BaseHeap::Free(obj); + } + + size_t GetObjectIndex(const T* obj) const { + return BaseHeap::GetObjectIndex(obj); + } +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index d5cbf0fd6..d940b4f3b 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -8,9 +8,11 @@ #include "common/archives.h" #include "common/serialization/atomic.h" #include "core/hle/kernel/client_port.h" -#include "core/hle/kernel/config_mem.h" -#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/ipc_debugger/recorder.h" +#include "core/hle/kernel/k_linked_list.h" +#include "core/hle/kernel/k_memory_block.h" +#include "core/hle/kernel/k_page_group.h" +#include "core/hle/kernel/k_slab_heap.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/memory.h" #include "core/hle/kernel/process.h" @@ -29,6 +31,7 @@ KernelSystem::KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing, : memory(memory), timing(timing), prepare_reschedule_callback(std::move(prepare_reschedule_callback)), memory_mode(memory_mode), n3ds_hw_caps(n3ds_hw_caps) { + slab_heap_container = std::make_unique(); std::generate(memory_regions.begin(), memory_regions.end(), [] { return std::make_shared(); }); MemoryInit(memory_mode, n3ds_hw_caps.memory_mode, override_init_time); @@ -192,6 +195,27 @@ void KernelSystem::serialize(Archive& ar, const unsigned int file_version) { } } +struct KernelSystem::SlabHeapContainer { + KSlabHeap linked_list_node; + KSlabHeap block_info; + KSlabHeap memory_block; +}; + +template +KSlabHeap& KernelSystem::SlabHeap() { + if constexpr (std::is_same_v) { + return slab_heap_container->linked_list_node; + } else if constexpr (std::is_same_v) { + return slab_heap_container->block_info; + } else if constexpr (std::is_same_v) { + return slab_heap_container->memory_block; + } +} + +template KSlabHeap& KernelSystem::SlabHeap(); +template KSlabHeap& KernelSystem::SlabHeap(); +template KSlabHeap& KernelSystem::SlabHeap(); + SERIALIZE_IMPL(KernelSystem) } // namespace Kernel diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index c381e97cb..9535ec9f1 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -130,6 +130,9 @@ private: friend class boost::serialization::access; }; +template +class KSlabHeap; + class KernelSystem { public: explicit KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing, @@ -260,6 +263,10 @@ public: MemoryPermission other_permissions, std::string name = "Unknown Applet"); + /// Gets the slab heap for the specified kernel object type. + template + KSlabHeap& SlabHeap(); + u32 GenerateObjectID(); /// Retrieves a process from the current list of processes. @@ -369,6 +376,10 @@ private: MemoryMode memory_mode; New3dsHwCapabilities n3ds_hw_caps; + /// Helper to encapsulate all slab heaps in a single heap allocated container + struct SlabHeapContainer; + std::unique_ptr slab_heap_container; + friend class boost::serialization::access; template void serialize(Archive& ar, const unsigned int file_version); diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h new file mode 100644 index 000000000..3821d8126 --- /dev/null +++ b/src/core/hle/kernel/slab_helpers.h @@ -0,0 +1,130 @@ +// Copyright 2023 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/kernel.h" + +namespace Kernel { + +template +class KSlabAllocated { +public: + constexpr KSlabAllocated() = default; + + size_t GetSlabIndex(KernelSystem& kernel) const { + return kernel.SlabHeap().GetIndex(static_cast(this)); + } + +public: + static void InitializeSlabHeap(KernelSystem& kernel, void* memory, size_t memory_size) { + kernel.SlabHeap().Initialize(memory, memory_size); + } + + static Derived* Allocate(KernelSystem& kernel) { + return kernel.SlabHeap().Allocate(kernel); + } + + static void Free(KernelSystem& kernel, Derived* obj) { + kernel.SlabHeap().Free(obj); + } + + static size_t GetObjectSize(KernelSystem& kernel) { + return kernel.SlabHeap().GetObjectSize(); + } + + static size_t GetSlabHeapSize(KernelSystem& kernel) { + return kernel.SlabHeap().GetSlabHeapSize(); + } + + static size_t GetPeakIndex(KernelSystem& kernel) { + return kernel.SlabHeap().GetPeakIndex(); + } + + static uintptr_t GetSlabHeapAddress(KernelSystem& kernel) { + return kernel.SlabHeap().GetSlabHeapAddress(); + } + + static size_t GetNumRemaining(KernelSystem& kernel) { + return kernel.SlabHeap().GetNumRemaining(); + } +}; + +template +class KAutoObjectWithSlabHeap : public Base { + static_assert(std::is_base_of::value); + +private: + static Derived* Allocate(KernelSystem& kernel) { + return kernel.SlabHeap().Allocate(kernel); + } + + static void Free(KernelSystem& kernel, Derived* obj) { + kernel.SlabHeap().Free(obj); + } + +public: + explicit KAutoObjectWithSlabHeap(KernelSystem& kernel) : Base(kernel) {} + virtual ~KAutoObjectWithSlabHeap() = default; + + virtual void Destroy() override { + const bool is_initialized = this->IsInitialized(); + uintptr_t arg = 0; + if (is_initialized) { + arg = this->GetPostDestroyArgument(); + this->Finalize(); + } + Free(Base::m_kernel, static_cast(this)); + if (is_initialized) { + Derived::PostDestroy(arg); + } + } + + virtual bool IsInitialized() const { + return true; + } + virtual uintptr_t GetPostDestroyArgument() const { + return 0; + } + + size_t GetSlabIndex() const { + return SlabHeap(Base::m_kernel).GetObjectIndex(static_cast(this)); + } + +public: + static void InitializeSlabHeap(KernelSystem& kernel, void* memory, size_t memory_size) { + kernel.SlabHeap().Initialize(memory, memory_size); + } + + static Derived* Create(KernelSystem& kernel) { + Derived* obj = Allocate(kernel); + if (obj != nullptr) { + KAutoObject::Create(obj); + } + return obj; + } + + static size_t GetObjectSize(KernelSystem& kernel) { + return kernel.SlabHeap().GetObjectSize(); + } + + static size_t GetSlabHeapSize(KernelSystem& kernel) { + return kernel.SlabHeap().GetSlabHeapSize(); + } + + static size_t GetPeakIndex(KernelSystem& kernel) { + return kernel.SlabHeap().GetPeakIndex(); + } + + static uintptr_t GetSlabHeapAddress(KernelSystem& kernel) { + return kernel.SlabHeap().GetSlabHeapAddress(); + } + + static size_t GetNumRemaining(KernelSystem& kernel) { + return kernel.SlabHeap().GetNumRemaining(); + } +}; + +} // namespace Kernel diff --git a/src/core/hle/result.h b/src/core/hle/result.h index a1adaeeaa..115c766ea 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h @@ -408,3 +408,130 @@ private: auto CONCAT2(check_result_L, __LINE__) = source; \ if (CONCAT2(check_result_L, __LINE__).IsError()) \ return CONCAT2(check_result_L, __LINE__); + +#define R_SUCCEEDED(res) (static_cast(res).IsSuccess()) +#define R_FAILED(res) (!static_cast(res).IsSuccess()) + +namespace ResultImpl { +template +class ScopedResultGuard { +private: + ResultCode& m_ref; + F m_f; + +public: + constexpr ScopedResultGuard(ResultCode& ref, F f) : m_ref(ref), m_f(std::move(f)) {} + constexpr ~ScopedResultGuard() { + if (EvaluateResult(m_ref)) { + m_f(); + } + } +}; + +template +class ResultReferenceForScopedResultGuard { +private: + ResultCode& m_ref; + +public: + constexpr ResultReferenceForScopedResultGuard(ResultCode& r) : m_ref(r) {} + constexpr operator ResultCode&() const { + return m_ref; + } +}; + +template +constexpr ScopedResultGuard operator+( + ResultReferenceForScopedResultGuard ref, F&& f) { + return ScopedResultGuard(static_cast(ref), std::forward(f)); +} + +constexpr bool EvaluateResultSuccess(const ResultCode& r) { + return R_SUCCEEDED(r); +} +constexpr bool EvaluateResultFailure(const ResultCode& r) { + return R_FAILED(r); +} + +template +constexpr void UpdateCurrentResultReference(T result_reference, ResultCode result) = delete; +// Intentionally not defined + +template <> +constexpr void UpdateCurrentResultReference(ResultCode& result_reference, + ResultCode result) { + result_reference = result; +} + +template <> +constexpr void UpdateCurrentResultReference(ResultCode result_reference, + ResultCode result) {} +} // namespace ResultImpl + +#define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE) \ + [[maybe_unused]] constexpr bool CONCAT2(HasPrevRef_, COUNTER_VALUE) = \ + std::same_as; \ + [[maybe_unused]] Result CONCAT2(PrevRef_, COUNTER_VALUE) = __TmpCurrentResultReference; \ + [[maybe_unused]] Result CONCAT2(__tmp_result_, COUNTER_VALUE) = ResultSuccess; \ + Result& __TmpCurrentResultReference = CONCAT2(HasPrevRef_, COUNTER_VALUE) \ + ? CONCAT2(PrevRef_, COUNTER_VALUE) \ + : CONCAT2(__tmp_result_, COUNTER_VALUE) + +#define ON_RESULT_RETURN_IMPL(...) \ + static_assert(std::same_as); \ + auto CONCAT2(RESULT_GUARD_STATE_, __COUNTER__) = \ + ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>( \ + __TmpCurrentResultReference) + \ + [&]() + +#define ON_RESULT_FAILURE_2 ON_RESULT_RETURN_IMPL(ResultImpl::EvaluateResultFailure) + +#define ON_RESULT_FAILURE \ + DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(__COUNTER__); \ + ON_RESULT_FAILURE_2 + +#define ON_RESULT_SUCCESS_2 ON_RESULT_RETURN_IMPL(ResultImpl::EvaluateResultSuccess) + +#define ON_RESULT_SUCCESS \ + DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(__COUNTER__); \ + ON_RESULT_SUCCESS_2 + +constexpr inline ResultCode __TmpCurrentResultReference = RESULT_SUCCESS; + +/// Returns a result. +#define R_RETURN(res_expr) \ + { \ + const ResultCode _tmp_r_throw_rc = (res_expr); \ + ResultImpl::UpdateCurrentResultReference( \ + __TmpCurrentResultReference, _tmp_r_throw_rc); \ + return _tmp_r_throw_rc; \ + } + +/// Returns ResultSuccess() +#define R_SUCCEED() R_RETURN(RESULT_SUCCESS) + +/// Throws a result. +#define R_THROW(res_expr) R_RETURN(res_expr) + +/// Evaluates a boolean expression, and returns a result unless that expression is true. +#define R_UNLESS(expr, res) \ + { \ + if (!(expr)) { \ + R_THROW(res); \ + } \ + } + +/// Evaluates an expression that returns a result, and returns the result if it would fail. +#define R_TRY(res_expr) \ + { \ + const auto _tmp_r_try_rc = (res_expr); \ + if (R_FAILED(_tmp_r_try_rc)) { \ + R_THROW(_tmp_r_try_rc); \ + } \ + } + +/// Evaluates a boolean expression, and succeeds if that expression is true. +#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), RESULT_SUCCESS) + +/// Evaluates a boolean expression, and asserts if that expression is false. +#define R_ASSERT(expr) ASSERT(R_SUCCEEDED(expr)) diff --git a/src/core/memory.h b/src/core/memory.h index 69a499434..64bad5592 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -329,6 +329,11 @@ public: */ u8* GetPointer(VAddr vaddr); + template + T* GetPointer(VAddr vaddr) { + return reinterpret_cast(GetPointer(vaddr)); + } + /** * Gets a pointer to the given address. *