Compare commits

..

16 Commits

Author SHA1 Message Date
2e963dee02 core: Ignore GCC warning 2023-12-13 21:48:14 +02:00
b62278d16e gsp_gpu: Restore comment 2023-12-13 12:21:58 +02:00
3f55c6b813 bit_set: Use <bit> and concepts 2023-12-13 12:21:58 +02:00
6c55ebac77 ring_buffer: Use feature macro 2023-12-13 12:21:58 +02:00
5c2a0c4e37 bit_set: Make constexpr 2023-12-13 12:21:58 +02:00
0afaa31df5 common: Remove linear disk cache
* Has never been used?
2023-12-13 12:21:58 +02:00
bd1ffc34ef common: use SetThreadDescription API for thread names 2023-12-13 12:21:58 +02:00
d487afd43c common: Remove misc.cpp
* GetLastErrorMsg has been in error.h for a while and also helps removing a depedency from a hot header like common_funcs
2023-12-13 12:21:58 +02:00
d4c26a6d95 common/swap: Make use of std::endian
Allows removing a bunch of defines in favor of a two liner.
2023-12-13 12:21:58 +02:00
2db9328087 android: Upgrade to NDK 26
* Allows access to newer libc++
2023-12-13 12:21:58 +02:00
6b4ff943da code: Remove some old msvc workarounds 2023-12-13 12:21:51 +02:00
9a6d15ab74 ci: Only use Linux clang for app image build. (#7244)
* ci: Only use Linux clang for app image build.

* build: Re-add -Wno-attributes for GCC 11.
2023-12-12 09:48:06 -08:00
60584e861d fs: Stub ControlArchive. (#7237) 2023-12-08 23:35:01 -08:00
070853b465 apt: Stub ReplySleepQuery and ReplySleepNotificationComplete. (#7236) 2023-12-08 23:34:54 -08:00
24b5ffbfca boss: Implement Spotpass service (part 1) (#7232)
* boss: Implement Spotpass service (part 1)

* boss: Fix save state (de)serialization.

* boss: Fix casing of SpotPass in log messages.

* boss: Minor logging improvements.

* common: Add boost serialization support for std::variant.

---------

Co-authored-by: Rokkubro <lachlanb03@gmail.com>
Co-authored-by: FearlessTobi <thm.frey@gmail.com>
2023-12-08 23:34:44 -08:00
4d9eedd0d8 video_core/vulkan: Add debug object names (#7233)
* vk_platform: Add `SetObjectName`

Creates a name-info struct and automatically deduces the object handle type using vulkan-hpp's handle trait data.
Supports `string_view` and `fmt` arguments.

* vk_texture_runtime: Use `SetObjectName` for surface handles

Names both the image handle and the image-view.

* vk_stream_buffer: Add debug object names

Names the buffer and its device memory based on its size and type.

* vk_swapchain: Set swapchain handle debug names

Identifies the swapchain images themselves as well as the semaphores

* vk_present_window: Set handle debug names

* vk_resource_pool: Set debug handle names

* vk_blit_helper: Set debug handle names

* vk_platform: Use `VulkanHandleType` concept

Use a new `concept`-type rather than `enable_if`-patterns to restrict
this function to Vulkan handle-types only.
2023-12-08 06:58:47 +02:00
146 changed files with 5616 additions and 7784 deletions

View File

@ -1,13 +1,15 @@
#!/bin/sh -ex #!/bin/bash -ex
if [ "$TARGET" = "appimage" ]; then
export COMPILER_FLAGS=(-DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -DCMAKE_LINKER=/etc/bin/ld.lld)
fi
mkdir build && cd build mkdir build && cd build
cmake .. -G Ninja \ cmake .. -G Ninja \
-DCMAKE_BUILD_TYPE=Release \ -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \ -DCMAKE_C_COMPILER_LAUNCHER=ccache \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DCMAKE_CXX_COMPILER=clang++ \ "${COMPILER_FLAGS[@]}" \
-DCMAKE_C_COMPILER=clang \
-DCMAKE_LINKER=/etc/bin/ld.lld \
-DENABLE_QT_TRANSLATION=ON \ -DENABLE_QT_TRANSLATION=ON \
-DCITRA_ENABLE_COMPATIBILITY_REPORTING=ON \ -DCITRA_ENABLE_COMPATIBILITY_REPORTING=ON \
-DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON \ -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON \

View File

@ -124,6 +124,13 @@ else()
add_compile_options("-stdlib=libc++") add_compile_options("-stdlib=libc++")
endif() endif()
if (CMAKE_CXX_COMPILER_ID STREQUAL GNU)
# GCC may warn when it ignores attributes like maybe_unused,
# which is a problem for older versions (e.g. GCC 11).
add_compile_options("-Wno-attributes")
add_compile_options("-Wno-interference-size")
endif()
if (MINGW) if (MINGW)
add_definitions(-DMINGW_HAS_SECURE_API) add_definitions(-DMINGW_HAS_SECURE_API)
if (COMPILE_WITH_DWARF) if (COMPILE_WITH_DWARF)

View File

@ -29,7 +29,7 @@ android {
namespace = "org.citra.citra_emu" namespace = "org.citra.citra_emu"
compileSdkVersion = "android-34" compileSdkVersion = "android-34"
ndkVersion = "25.2.9519653" ndkVersion = "26.1.10909125"
compileOptions { compileOptions {
sourceCompatibility = JavaVersion.VERSION_17 sourceCompatibility = JavaVersion.VERSION_17

View File

@ -83,14 +83,6 @@ class GMainWindow : public QMainWindow {
/// Max number of recently loaded items to keep track of /// Max number of recently loaded items to keep track of
static const int max_recent_files_item = 10; static const int max_recent_files_item = 10;
// TODO: Make use of this!
enum {
UI_IDLE,
UI_EMU_BOOTING,
UI_EMU_RUNNING,
UI_EMU_STOPPING,
};
public: public:
void filterBarSetChecked(bool state); void filterBarSetChecked(bool state);
void UpdateUITheme(); void UpdateUITheme();

View File

@ -90,8 +90,6 @@ add_library(citra_common STATIC
file_util.cpp file_util.cpp
file_util.h file_util.h
hash.h hash.h
intrusive_list.h
linear_disk_cache.h
literals.h literals.h
logging/backend.cpp logging/backend.cpp
logging/backend.h logging/backend.h
@ -111,10 +109,8 @@ add_library(citra_common STATIC
microprofile.cpp microprofile.cpp
microprofile.h microprofile.h
microprofileui.h microprofileui.h
misc.cpp
param_package.cpp param_package.cpp
param_package.h param_package.h
parent_of_member.h
polyfill_thread.h polyfill_thread.h
precompiled_headers.h precompiled_headers.h
quaternion.h quaternion.h
@ -129,6 +125,7 @@ add_library(citra_common STATIC
serialization/boost_discrete_interval.hpp serialization/boost_discrete_interval.hpp
serialization/boost_flat_set.h serialization/boost_flat_set.h
serialization/boost_small_vector.hpp serialization/boost_small_vector.hpp
serialization/boost_std_variant.hpp
serialization/boost_vector.hpp serialization/boost_vector.hpp
static_lru_cache.h static_lru_cache.h
string_literal.h string_literal.h

View File

@ -21,10 +21,4 @@ template <typename T>
return static_cast<T>(value - value % size); return static_cast<T>(value - value % size);
} }
template <typename T>
requires std::is_unsigned_v<T>
[[nodiscard]] constexpr bool Is4KBAligned(T value) {
return (value & 0xFFF) == 0;
}
} // namespace Common } // namespace Common

View File

@ -2,78 +2,14 @@
#pragma once #pragma once
#include <bit>
#include <cstddef> #include <cstddef>
#ifdef _WIN32
#include <intrin.h>
#endif
#include <initializer_list> #include <initializer_list>
#include <new>
#include <type_traits> #include <type_traits>
#include "common/common_types.h" #include "common/common_types.h"
// namespace avoids conflict with OS X Carbon; don't use BitSet<T> directly
namespace Common { namespace Common {
// Helper functions:
#ifdef _MSC_VER
template <typename T>
static inline int CountSetBits(T v) {
// from https://graphics.stanford.edu/~seander/bithacks.html
// GCC has this built in, but MSVC's intrinsic will only emit the actual
// POPCNT instruction, which we're not depending on
v = v - ((v >> 1) & (T) ~(T)0 / 3);
v = (v & (T) ~(T)0 / 15 * 3) + ((v >> 2) & (T) ~(T)0 / 15 * 3);
v = (v + (v >> 4)) & (T) ~(T)0 / 255 * 15;
return (T)(v * ((T) ~(T)0 / 255)) >> (sizeof(T) - 1) * 8;
}
static inline int LeastSignificantSetBit(u8 val) {
unsigned long index;
_BitScanForward(&index, val);
return (int)index;
}
static inline int LeastSignificantSetBit(u16 val) {
unsigned long index;
_BitScanForward(&index, val);
return (int)index;
}
static inline int LeastSignificantSetBit(u32 val) {
unsigned long index;
_BitScanForward(&index, val);
return (int)index;
}
static inline int LeastSignificantSetBit(u64 val) {
unsigned long index;
_BitScanForward64(&index, val);
return (int)index;
}
#else
static inline int CountSetBits(u8 val) {
return __builtin_popcount(val);
}
static inline int CountSetBits(u16 val) {
return __builtin_popcount(val);
}
static inline int CountSetBits(u32 val) {
return __builtin_popcount(val);
}
static inline int CountSetBits(u64 val) {
return __builtin_popcountll(val);
}
static inline int LeastSignificantSetBit(u8 val) {
return __builtin_ctz(val);
}
static inline int LeastSignificantSetBit(u16 val) {
return __builtin_ctz(val);
}
static inline int LeastSignificantSetBit(u32 val) {
return __builtin_ctz(val);
}
static inline int LeastSignificantSetBit(u64 val) {
return __builtin_ctzll(val);
}
#endif
// Similar to std::bitset, this is a class which encapsulates a bitset, i.e. // Similar to std::bitset, this is a class which encapsulates a bitset, i.e.
// using the set bits of an integer to represent a set of integers. Like that // using the set bits of an integer to represent a set of integers. Like that
// class, it acts like an array of bools: // class, it acts like an array of bools:
@ -92,22 +28,19 @@ static inline int LeastSignificantSetBit(u64 val) {
// operation.) // operation.)
// - Counting set bits using .Count() - see comment on that method. // - Counting set bits using .Count() - see comment on that method.
// TODO: use constexpr when MSVC gets out of the Dark Ages
template <typename IntTy> template <typename IntTy>
requires std::is_unsigned_v<IntTy>
class BitSet { class BitSet {
static_assert(!std::is_signed_v<IntTy>, "BitSet should not be used with signed types");
public: public:
// A reference to a particular bit, returned from operator[]. // A reference to a particular bit, returned from operator[].
class Ref { class Ref {
public: public:
Ref(Ref&& other) : m_bs(other.m_bs), m_mask(other.m_mask) {} constexpr Ref(Ref&& other) : m_bs(other.m_bs), m_mask(other.m_mask) {}
Ref(BitSet* bs, IntTy mask) : m_bs(bs), m_mask(mask) {} constexpr Ref(BitSet* bs, IntTy mask) : m_bs(bs), m_mask(mask) {}
operator bool() const { constexpr operator bool() const {
return (m_bs->m_val & m_mask) != 0; return (m_bs->m_val & m_mask) != 0;
} }
bool operator=(bool set) { constexpr bool operator=(bool set) {
m_bs->m_val = (m_bs->m_val & ~m_mask) | (set ? m_mask : 0); m_bs->m_val = (m_bs->m_val & ~m_mask) | (set ? m_mask : 0);
return set; return set;
} }
@ -120,26 +53,26 @@ public:
// A STL-like iterator is required to be able to use range-based for loops. // A STL-like iterator is required to be able to use range-based for loops.
class Iterator { class Iterator {
public: public:
Iterator(const Iterator& other) : m_val(other.m_val) {} constexpr Iterator(const Iterator& other) : m_val(other.m_val) {}
Iterator(IntTy val) : m_val(val) {} constexpr Iterator(IntTy val) : m_val(val) {}
int operator*() { constexpr int operator*() {
// This will never be called when m_val == 0, because that would be the end() iterator // This will never be called when m_val == 0, because that would be the end() iterator
return LeastSignificantSetBit(m_val); return std::countr_zero(m_val);
} }
Iterator& operator++() { constexpr Iterator& operator++() {
// Unset least significant set bit // Unset least significant set bit
m_val &= m_val - IntTy(1); m_val &= m_val - IntTy(1);
return *this; return *this;
} }
Iterator operator++(int _) { constexpr Iterator operator++(int) {
Iterator other(*this); Iterator other(*this);
++*this; ++*this;
return other; return other;
} }
bool operator==(Iterator other) const { constexpr bool operator==(Iterator other) const {
return m_val == other.m_val; return m_val == other.m_val;
} }
bool operator!=(Iterator other) const { constexpr bool operator!=(Iterator other) const {
return m_val != other.m_val; return m_val != other.m_val;
} }
@ -147,74 +80,69 @@ public:
IntTy m_val; IntTy m_val;
}; };
BitSet() : m_val(0) {} constexpr BitSet() : m_val(0) {}
explicit BitSet(IntTy val) : m_val(val) {} constexpr explicit BitSet(IntTy val) : m_val(val) {}
BitSet(std::initializer_list<int> init) { constexpr BitSet(std::initializer_list<int> init) {
m_val = 0; m_val = 0;
for (int bit : init) for (int bit : init)
m_val |= (IntTy)1 << bit; m_val |= (IntTy)1 << bit;
} }
static BitSet AllTrue(std::size_t count) { constexpr static BitSet AllTrue(std::size_t count) {
return BitSet(count == sizeof(IntTy) * 8 ? ~(IntTy)0 : (((IntTy)1 << count) - 1)); return BitSet(count == sizeof(IntTy) * 8 ? ~(IntTy)0 : (((IntTy)1 << count) - 1));
} }
Ref operator[](std::size_t bit) { constexpr Ref operator[](std::size_t bit) {
return Ref(this, (IntTy)1 << bit); return Ref(this, (IntTy)1 << bit);
} }
const Ref operator[](std::size_t bit) const { constexpr const Ref operator[](std::size_t bit) const {
return (*const_cast<BitSet*>(this))[bit]; return (*const_cast<BitSet*>(this))[bit];
} }
bool operator==(BitSet other) const { constexpr bool operator==(BitSet other) const {
return m_val == other.m_val; return m_val == other.m_val;
} }
bool operator!=(BitSet other) const { constexpr bool operator!=(BitSet other) const {
return m_val != other.m_val; return m_val != other.m_val;
} }
bool operator<(BitSet other) const { constexpr bool operator<(BitSet other) const {
return m_val < other.m_val; return m_val < other.m_val;
} }
bool operator>(BitSet other) const { constexpr bool operator>(BitSet other) const {
return m_val > other.m_val; return m_val > other.m_val;
} }
BitSet operator|(BitSet other) const { constexpr BitSet operator|(BitSet other) const {
return BitSet(m_val | other.m_val); return BitSet(m_val | other.m_val);
} }
BitSet operator&(BitSet other) const { constexpr BitSet operator&(BitSet other) const {
return BitSet(m_val & other.m_val); return BitSet(m_val & other.m_val);
} }
BitSet operator^(BitSet other) const { constexpr BitSet operator^(BitSet other) const {
return BitSet(m_val ^ other.m_val); return BitSet(m_val ^ other.m_val);
} }
BitSet operator~() const { constexpr BitSet operator~() const {
return BitSet(~m_val); return BitSet(~m_val);
} }
BitSet& operator|=(BitSet other) { constexpr BitSet& operator|=(BitSet other) {
return *this = *this | other; return *this = *this | other;
} }
BitSet& operator&=(BitSet other) { constexpr BitSet& operator&=(BitSet other) {
return *this = *this & other; return *this = *this & other;
} }
BitSet& operator^=(BitSet other) { constexpr BitSet& operator^=(BitSet other) {
return *this = *this ^ other; return *this = *this ^ other;
} }
operator u32() = delete; operator u32() = delete;
operator bool() { constexpr operator bool() {
return m_val != 0; return m_val != 0;
} }
constexpr u32 Count() const {
// Warning: Even though on modern CPUs this is a single fast instruction, return std::popcount(m_val);
// Dolphin's official builds do not currently assume POPCNT support on x86,
// so slower explicit bit twiddling is generated. Still should generally
// be faster than a loop.
unsigned int Count() const {
return CountSetBits(m_val);
} }
Iterator begin() const { constexpr Iterator begin() const {
return Iterator(m_val); return Iterator(m_val);
} }
Iterator end() const { constexpr Iterator end() const {
return Iterator(0); return Iterator(0);
} }

View File

@ -4,7 +4,6 @@
#pragma once #pragma once
#include <string>
#include "common/common_types.h" #include "common/common_types.h"
/// Textually concatenates two tokens. The double-expansion is required by the C preprocessor. /// Textually concatenates two tokens. The double-expansion is required by the C preprocessor.
@ -46,14 +45,8 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
#endif #endif
#ifdef _MSC_VER #ifdef _MSC_VER
#if (_MSC_VER < 1900)
// Function Cross-Compatibility
#define snprintf _snprintf
#endif
// Locale Cross-Compatibility // Locale Cross-Compatibility
#define locale_t _locale_t #define locale_t _locale_t
#endif // _MSC_VER #endif // _MSC_VER
#define DECLARE_ENUM_FLAG_OPERATORS(type) \ #define DECLARE_ENUM_FLAG_OPERATORS(type) \
@ -109,17 +102,3 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
using T = std::underlying_type_t<type>; \ using T = std::underlying_type_t<type>; \
return static_cast<T>(key) == 0; \ return static_cast<T>(key) == 0; \
} }
#define CITRA_NON_COPYABLE(cls) \
cls(const cls&) = delete; \
cls& operator=(const cls&) = delete
#define CITRA_NON_MOVEABLE(cls) \
cls(cls&&) = delete; \
cls& operator=(cls&&) = delete
// Generic function to get last error message.
// Call directly after the command or use the error num.
// This function might change the error code.
// Defined in Misc.cpp.
[[nodiscard]] std::string GetLastErrorMsg();

View File

@ -14,6 +14,7 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/common_funcs.h" #include "common/common_funcs.h"
#include "common/common_paths.h" #include "common/common_paths.h"
#include "common/error.h"
#include "common/file_util.h" #include "common/file_util.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "common/scope_exit.h" #include "common/scope_exit.h"
@ -90,6 +91,8 @@
// REMEMBER: strdup considered harmful! // REMEMBER: strdup considered harmful!
namespace FileUtil { namespace FileUtil {
using Common::GetLastErrorMsg;
// Remove any ending forward slashes from directory paths // Remove any ending forward slashes from directory paths
// Modifies argument. // Modifies argument.
static void StripTailDirSlashes(std::string& fname) { static void StripTailDirSlashes(std::string& fname) {

View File

@ -1,631 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_funcs.h"
#include "common/parent_of_member.h"
namespace Common {
// Forward declare implementation class for Node.
namespace impl {
class IntrusiveListImpl;
}
class IntrusiveListNode {
CITRA_NON_COPYABLE(IntrusiveListNode);
private:
friend class impl::IntrusiveListImpl;
IntrusiveListNode* m_prev;
IntrusiveListNode* m_next;
public:
constexpr IntrusiveListNode() : m_prev(this), m_next(this) {}
constexpr bool IsLinked() const {
return m_next != this;
}
private:
constexpr void LinkPrev(IntrusiveListNode* node) {
// We can't link an already linked node.
ASSERT(!node->IsLinked());
this->SplicePrev(node, node);
}
constexpr void SplicePrev(IntrusiveListNode* first, IntrusiveListNode* last) {
// Splice a range into the list.
auto last_prev = last->m_prev;
first->m_prev = m_prev;
last_prev->m_next = this;
m_prev->m_next = first;
m_prev = last_prev;
}
constexpr void LinkNext(IntrusiveListNode* node) {
// We can't link an already linked node.
ASSERT(!node->IsLinked());
return this->SpliceNext(node, node);
}
constexpr void SpliceNext(IntrusiveListNode* first, IntrusiveListNode* last) {
// Splice a range into the list.
auto last_prev = last->m_prev;
first->m_prev = this;
last_prev->m_next = m_next;
m_next->m_prev = last_prev;
m_next = first;
}
constexpr void Unlink() {
this->Unlink(m_next);
}
constexpr void Unlink(IntrusiveListNode* last) {
// Unlink a node from a next node.
auto last_prev = last->m_prev;
m_prev->m_next = last;
last->m_prev = m_prev;
last_prev->m_next = this;
m_prev = last_prev;
}
constexpr IntrusiveListNode* GetPrev() {
return m_prev;
}
constexpr const IntrusiveListNode* GetPrev() const {
return m_prev;
}
constexpr IntrusiveListNode* GetNext() {
return m_next;
}
constexpr const IntrusiveListNode* GetNext() const {
return m_next;
}
};
// DEPRECATED: static_assert(std::is_literal_type<IntrusiveListNode>::value);
namespace impl {
class IntrusiveListImpl {
CITRA_NON_COPYABLE(IntrusiveListImpl);
private:
IntrusiveListNode m_root_node;
public:
template <bool Const>
class Iterator;
using value_type = IntrusiveListNode;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename IntrusiveListImpl::value_type;
using difference_type = typename IntrusiveListImpl::difference_type;
using pointer =
std::conditional_t<Const, IntrusiveListImpl::const_pointer, IntrusiveListImpl::pointer>;
using reference = std::conditional_t<Const, IntrusiveListImpl::const_reference,
IntrusiveListImpl::reference>;
private:
pointer m_node;
public:
constexpr explicit Iterator(pointer n) : m_node(n) {}
constexpr bool operator==(const Iterator& rhs) const {
return m_node == rhs.m_node;
}
constexpr pointer operator->() const {
return m_node;
}
constexpr reference operator*() const {
return *m_node;
}
constexpr Iterator& operator++() {
m_node = m_node->m_next;
return *this;
}
constexpr Iterator& operator--() {
m_node = m_node->m_prev;
return *this;
}
constexpr Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
constexpr Iterator operator--(int) {
const Iterator it{*this};
--(*this);
return it;
}
constexpr operator Iterator<true>() const {
return Iterator<true>(m_node);
}
constexpr Iterator<false> GetNonConstIterator() const {
return Iterator<false>(const_cast<IntrusiveListImpl::pointer>(m_node));
}
};
public:
constexpr IntrusiveListImpl() : m_root_node() {}
// Iterator accessors.
constexpr iterator begin() {
return iterator(m_root_node.GetNext());
}
constexpr const_iterator begin() const {
return const_iterator(m_root_node.GetNext());
}
constexpr iterator end() {
return iterator(std::addressof(m_root_node));
}
constexpr const_iterator end() const {
return const_iterator(std::addressof(m_root_node));
}
constexpr iterator iterator_to(reference v) {
// Only allow iterator_to for values in lists.
ASSERT(v.IsLinked());
return iterator(std::addressof(v));
}
constexpr const_iterator iterator_to(const_reference v) const {
// Only allow iterator_to for values in lists.
ASSERT(v.IsLinked());
return const_iterator(std::addressof(v));
}
// Content management.
constexpr bool empty() const {
return !m_root_node.IsLinked();
}
constexpr size_type size() const {
return static_cast<size_type>(std::distance(this->begin(), this->end()));
}
constexpr reference back() {
return *m_root_node.GetPrev();
}
constexpr const_reference back() const {
return *m_root_node.GetPrev();
}
constexpr reference front() {
return *m_root_node.GetNext();
}
constexpr const_reference front() const {
return *m_root_node.GetNext();
}
constexpr void push_back(reference node) {
m_root_node.LinkPrev(std::addressof(node));
}
constexpr void push_front(reference node) {
m_root_node.LinkNext(std::addressof(node));
}
constexpr void pop_back() {
m_root_node.GetPrev()->Unlink();
}
constexpr void pop_front() {
m_root_node.GetNext()->Unlink();
}
constexpr iterator insert(const_iterator pos, reference node) {
pos.GetNonConstIterator()->LinkPrev(std::addressof(node));
return iterator(std::addressof(node));
}
constexpr void splice(const_iterator pos, IntrusiveListImpl& o) {
splice_impl(pos, o.begin(), o.end());
}
constexpr void splice(const_iterator pos, IntrusiveListImpl&, const_iterator first) {
const_iterator last(first);
std::advance(last, 1);
splice_impl(pos, first, last);
}
constexpr void splice(const_iterator pos, IntrusiveListImpl&, const_iterator first,
const_iterator last) {
splice_impl(pos, first, last);
}
constexpr iterator erase(const_iterator pos) {
if (pos == this->end()) {
return this->end();
}
iterator it(pos.GetNonConstIterator());
(it++)->Unlink();
return it;
}
constexpr void clear() {
while (!this->empty()) {
this->pop_front();
}
}
private:
constexpr void splice_impl(const_iterator _pos, const_iterator _first, const_iterator _last) {
if (_first == _last) {
return;
}
iterator pos(_pos.GetNonConstIterator());
iterator first(_first.GetNonConstIterator());
iterator last(_last.GetNonConstIterator());
first->Unlink(std::addressof(*last));
pos->SplicePrev(std::addressof(*first), std::addressof(*first));
}
};
} // namespace impl
template <class T, class Traits>
class IntrusiveList {
CITRA_NON_COPYABLE(IntrusiveList);
private:
impl::IntrusiveListImpl m_impl;
public:
template <bool Const>
class Iterator;
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
public:
friend class Common::IntrusiveList<T, Traits>;
using ImplIterator =
std::conditional_t<Const, Common::impl::IntrusiveListImpl::const_iterator,
Common::impl::IntrusiveListImpl::iterator>;
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename IntrusiveList::value_type;
using difference_type = typename IntrusiveList::difference_type;
using pointer =
std::conditional_t<Const, IntrusiveList::const_pointer, IntrusiveList::pointer>;
using reference =
std::conditional_t<Const, IntrusiveList::const_reference, IntrusiveList::reference>;
private:
ImplIterator m_iterator;
private:
constexpr explicit Iterator(ImplIterator it) : m_iterator(it) {}
constexpr ImplIterator GetImplIterator() const {
return m_iterator;
}
public:
constexpr bool operator==(const Iterator& rhs) const {
return m_iterator == rhs.m_iterator;
}
constexpr pointer operator->() const {
return std::addressof(Traits::GetParent(*m_iterator));
}
constexpr reference operator*() const {
return Traits::GetParent(*m_iterator);
}
constexpr Iterator& operator++() {
++m_iterator;
return *this;
}
constexpr Iterator& operator--() {
--m_iterator;
return *this;
}
constexpr Iterator operator++(int) {
const Iterator it{*this};
++m_iterator;
return it;
}
constexpr Iterator operator--(int) {
const Iterator it{*this};
--m_iterator;
return it;
}
constexpr operator Iterator<true>() const {
return Iterator<true>(m_iterator);
}
};
private:
static constexpr IntrusiveListNode& GetNode(reference ref) {
return Traits::GetNode(ref);
}
static constexpr IntrusiveListNode const& GetNode(const_reference ref) {
return Traits::GetNode(ref);
}
static constexpr reference GetParent(IntrusiveListNode& node) {
return Traits::GetParent(node);
}
static constexpr const_reference GetParent(IntrusiveListNode const& node) {
return Traits::GetParent(node);
}
public:
constexpr IntrusiveList() : m_impl() {}
// Iterator accessors.
constexpr iterator begin() {
return iterator(m_impl.begin());
}
constexpr const_iterator begin() const {
return const_iterator(m_impl.begin());
}
constexpr iterator end() {
return iterator(m_impl.end());
}
constexpr const_iterator end() const {
return const_iterator(m_impl.end());
}
constexpr const_iterator cbegin() const {
return this->begin();
}
constexpr const_iterator cend() const {
return this->end();
}
constexpr reverse_iterator rbegin() {
return reverse_iterator(this->end());
}
constexpr const_reverse_iterator rbegin() const {
return const_reverse_iterator(this->end());
}
constexpr reverse_iterator rend() {
return reverse_iterator(this->begin());
}
constexpr const_reverse_iterator rend() const {
return const_reverse_iterator(this->begin());
}
constexpr const_reverse_iterator crbegin() const {
return this->rbegin();
}
constexpr const_reverse_iterator crend() const {
return this->rend();
}
constexpr iterator iterator_to(reference v) {
return iterator(m_impl.iterator_to(GetNode(v)));
}
constexpr const_iterator iterator_to(const_reference v) const {
return const_iterator(m_impl.iterator_to(GetNode(v)));
}
// Content management.
constexpr bool empty() const {
return m_impl.empty();
}
constexpr size_type size() const {
return m_impl.size();
}
constexpr reference back() {
return GetParent(m_impl.back());
}
constexpr const_reference back() const {
return GetParent(m_impl.back());
}
constexpr reference front() {
return GetParent(m_impl.front());
}
constexpr const_reference front() const {
return GetParent(m_impl.front());
}
constexpr void push_back(reference ref) {
m_impl.push_back(GetNode(ref));
}
constexpr void push_front(reference ref) {
m_impl.push_front(GetNode(ref));
}
constexpr void pop_back() {
m_impl.pop_back();
}
constexpr void pop_front() {
m_impl.pop_front();
}
constexpr iterator insert(const_iterator pos, reference ref) {
return iterator(m_impl.insert(pos.GetImplIterator(), GetNode(ref)));
}
constexpr void splice(const_iterator pos, IntrusiveList& o) {
m_impl.splice(pos.GetImplIterator(), o.m_impl);
}
constexpr void splice(const_iterator pos, IntrusiveList& o, const_iterator first) {
m_impl.splice(pos.GetImplIterator(), o.m_impl, first.GetImplIterator());
}
constexpr void splice(const_iterator pos, IntrusiveList& o, const_iterator first,
const_iterator last) {
m_impl.splice(pos.GetImplIterator(), o.m_impl, first.GetImplIterator(),
last.GetImplIterator());
}
constexpr iterator erase(const_iterator pos) {
return iterator(m_impl.erase(pos.GetImplIterator()));
}
constexpr void clear() {
m_impl.clear();
}
};
template <auto T, class Derived = Common::impl::GetParentType<T>>
class IntrusiveListMemberTraits;
template <class Parent, IntrusiveListNode Parent::*Member, class Derived>
class IntrusiveListMemberTraits<Member, Derived> {
public:
using ListType = IntrusiveList<Derived, IntrusiveListMemberTraits>;
private:
friend class IntrusiveList<Derived, IntrusiveListMemberTraits>;
static constexpr IntrusiveListNode& GetNode(Derived& parent) {
return parent.*Member;
}
static constexpr IntrusiveListNode const& GetNode(Derived const& parent) {
return parent.*Member;
}
static Derived& GetParent(IntrusiveListNode& node) {
return Common::GetParentReference<Member, Derived>(std::addressof(node));
}
static Derived const& GetParent(IntrusiveListNode const& node) {
return Common::GetParentReference<Member, Derived>(std::addressof(node));
}
};
template <auto T, class Derived = Common::impl::GetParentType<T>>
class IntrusiveListMemberTraitsByNonConstexprOffsetOf;
template <class Parent, IntrusiveListNode Parent::*Member, class Derived>
class IntrusiveListMemberTraitsByNonConstexprOffsetOf<Member, Derived> {
public:
using ListType = IntrusiveList<Derived, IntrusiveListMemberTraitsByNonConstexprOffsetOf>;
private:
friend class IntrusiveList<Derived, IntrusiveListMemberTraitsByNonConstexprOffsetOf>;
static constexpr IntrusiveListNode& GetNode(Derived& parent) {
return parent.*Member;
}
static constexpr IntrusiveListNode const& GetNode(Derived const& parent) {
return parent.*Member;
}
static Derived& GetParent(IntrusiveListNode& node) {
return *reinterpret_cast<Derived*>(reinterpret_cast<char*>(std::addressof(node)) -
GetOffset());
}
static Derived const& GetParent(IntrusiveListNode const& node) {
return *reinterpret_cast<const Derived*>(
reinterpret_cast<const char*>(std::addressof(node)) - GetOffset());
}
static uintptr_t GetOffset() {
return reinterpret_cast<uintptr_t>(std::addressof(reinterpret_cast<Derived*>(0)->*Member));
}
};
template <class Derived>
class IntrusiveListBaseNode : public IntrusiveListNode {};
template <class Derived>
class IntrusiveListBaseTraits {
public:
using ListType = IntrusiveList<Derived, IntrusiveListBaseTraits>;
private:
friend class IntrusiveList<Derived, IntrusiveListBaseTraits>;
static constexpr IntrusiveListNode& GetNode(Derived& parent) {
return static_cast<IntrusiveListNode&>(
static_cast<IntrusiveListBaseNode<Derived>&>(parent));
}
static constexpr IntrusiveListNode const& GetNode(Derived const& parent) {
return static_cast<const IntrusiveListNode&>(
static_cast<const IntrusiveListBaseNode<Derived>&>(parent));
}
static constexpr Derived& GetParent(IntrusiveListNode& node) {
return static_cast<Derived&>(static_cast<IntrusiveListBaseNode<Derived>&>(node));
}
static constexpr Derived const& GetParent(IntrusiveListNode const& node) {
return static_cast<const Derived&>(
static_cast<const IntrusiveListBaseNode<Derived>&>(node));
}
};
} // namespace Common

View File

@ -1,168 +0,0 @@
// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <fstream>
#include <vector>
#include "common/common_types.h"
// defined in Version.cpp
extern const char* scm_rev_git_str;
// On disk format:
// header{
// u32 'DCAC';
// u32 version; // svn_rev
// u16 sizeof(key_type);
// u16 sizeof(value_type);
//}
// key_value_pair{
// u32 value_size;
// key_type key;
// value_type[value_size] value;
//}
template <typename K, typename V>
class LinearDiskCacheReader {
public:
virtual void Read(const K& key, const V* value, u32 value_size) = 0;
};
// Dead simple unsorted key-value store with append functionality.
// No random read functionality, all reading is done in OpenAndRead.
// Keys and values can contain any characters, including \0.
//
// Suitable for caching generated shader bytecode between executions.
// Not tuned for extreme performance but should be reasonably fast.
// Does not support keys or values larger than 2GB, which should be reasonable.
// Keys must have non-zero length; values can have zero length.
// K and V are some POD type
// K : the key type
// V : value array type
template <typename K, typename V>
class LinearDiskCache {
public:
// return number of read entries
u32 OpenAndRead(const char* filename, LinearDiskCacheReader<K, V>& reader) {
using std::ios_base;
// close any currently opened file
Close();
m_num_entries = 0;
// try opening for reading/writing
OpenFStream(m_file, filename, ios_base::in | ios_base::out | ios_base::binary);
m_file.seekg(0, std::ios::end);
std::fstream::pos_type end_pos = m_file.tellg();
m_file.seekg(0, std::ios::beg);
std::fstream::pos_type start_pos = m_file.tellg();
std::streamoff file_size = end_pos - start_pos;
if (m_file.is_open() && ValidateHeader()) {
// good header, read some key/value pairs
K key;
std::vector<V> value;
u32 value_size;
u32 entry_number;
std::fstream::pos_type last_pos = m_file.tellg();
while (Read(&value_size)) {
std::streamoff next_extent =
(last_pos - start_pos) + sizeof(value_size) + value_size;
if (next_extent > file_size)
break;
value.clear();
value.resize(value_size);
// read key/value and pass to reader
if (Read(&key) && Read(value.data(), value_size) && Read(&entry_number) &&
entry_number == m_num_entries + 1) {
reader.Read(key, value.data(), value_size);
} else {
break;
}
m_num_entries++;
last_pos = m_file.tellg();
}
m_file.seekp(last_pos);
m_file.clear();
value.clear();
return m_num_entries;
}
// failed to open file for reading or bad header
// close and recreate file
Close();
m_file.open(filename, ios_base::out | ios_base::trunc | ios_base::binary);
WriteHeader();
return 0;
}
void Sync() {
m_file.flush();
}
void Close() {
if (m_file.is_open())
m_file.close();
// clear any error flags
m_file.clear();
}
// Appends a key-value pair to the store.
void Append(const K& key, const V* value, u32 value_size) {
// TODO: Should do a check that we don't already have "key"? (I think each caller does that
// already.)
Write(&value_size);
Write(&key);
Write(value, value_size);
m_num_entries++;
Write(&m_num_entries);
}
private:
void WriteHeader() {
Write(&m_header);
}
bool ValidateHeader() {
char file_header[sizeof(Header)];
return (Read(file_header, sizeof(Header)) &&
!memcmp((const char*)&m_header, file_header, sizeof(Header)));
}
template <typename D>
bool Write(const D* data, u32 count = 1) {
return m_file.write((const char*)data, count * sizeof(D)).good();
}
template <typename D>
bool Read(const D* data, u32 count = 1) {
return m_file.read((char*)data, count * sizeof(D)).good();
}
struct Header {
Header() : id(*(u32*)"DCAC"), key_t_size(sizeof(K)), value_t_size(sizeof(V)) {
std::memcpy(ver, scm_rev_git_str, 40);
}
const u32 id;
const u16 key_t_size, value_t_size;
char ver[40];
} m_header;
std::fstream m_file;
u32 m_num_entries;
};

View File

@ -1,48 +0,0 @@
// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <cstddef>
#ifdef _WIN32
#include <windows.h>
#else
#include <cerrno>
#include <cstring>
#endif
#include "common/common_funcs.h"
// Generic function to get last error message.
// Call directly after the command or use the error num.
// This function might change the error code.
std::string GetLastErrorMsg() {
#ifdef _WIN32
LPSTR err_str;
DWORD res = FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_IGNORE_INSERTS,
nullptr, GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
reinterpret_cast<LPSTR>(&err_str), 1, nullptr);
if (!res) {
return "(FormatMessageA failed to format error)";
}
std::string ret(err_str);
LocalFree(err_str);
return ret;
#else
char err_str[255];
#if (defined(__GLIBC__) || __ANDROID_API__ >= 23) && \
(_GNU_SOURCE || (_POSIX_C_SOURCE < 200112L && _XOPEN_SOURCE < 600))
// Thread safe (GNU-specific)
const char* str = strerror_r(errno, err_str, sizeof(err_str));
return std::string(str);
#else
// Thread safe (XSI-compliant)
int second_err = strerror_r(errno, err_str, sizeof(err_str));
if (second_err != 0) {
return "(strerror_r failed to format error)";
}
return std::string(err_str);
#endif // GLIBC etc.
#endif // _WIN32
}

View File

@ -1,190 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <type_traits>
#include "common/assert.h"
namespace Common {
namespace detail {
template <typename T, size_t Size, size_t Align>
struct TypedStorageImpl {
alignas(Align) u8 storage_[Size];
};
} // namespace detail
template <typename T>
using TypedStorage = detail::TypedStorageImpl<T, sizeof(T), alignof(T)>;
template <typename T>
static constexpr T* GetPointer(TypedStorage<T>& ts) {
return static_cast<T*>(static_cast<void*>(std::addressof(ts.storage_)));
}
template <typename T>
static constexpr const T* GetPointer(const TypedStorage<T>& ts) {
return static_cast<const T*>(static_cast<const void*>(std::addressof(ts.storage_)));
}
namespace impl {
template <size_t MaxDepth>
struct OffsetOfUnionHolder {
template <typename ParentType, typename MemberType, size_t Offset>
union UnionImpl {
using PaddingMember = char;
static constexpr size_t GetOffset() {
return Offset;
}
#pragma pack(push, 1)
struct {
PaddingMember padding[Offset];
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
} data;
#pragma pack(pop)
UnionImpl<ParentType, MemberType, Offset + 1> next_union;
};
template <typename ParentType, typename MemberType>
union UnionImpl<ParentType, MemberType, 0> {
static constexpr size_t GetOffset() {
return 0;
}
struct {
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
} data;
UnionImpl<ParentType, MemberType, 1> next_union;
};
template <typename ParentType, typename MemberType>
union UnionImpl<ParentType, MemberType, MaxDepth> {};
};
template <typename ParentType, typename MemberType>
struct OffsetOfCalculator {
using UnionHolder =
typename OffsetOfUnionHolder<sizeof(MemberType)>::template UnionImpl<ParentType, MemberType,
0>;
union Union {
char c{};
UnionHolder first_union;
TypedStorage<ParentType> parent;
constexpr Union() : c() {}
};
static constexpr Union U = {};
static constexpr const MemberType* GetNextAddress(const MemberType* start,
const MemberType* target) {
while (start < target) {
start++;
}
return start;
}
static constexpr std::ptrdiff_t GetDifference(const MemberType* start,
const MemberType* target) {
return (target - start) * sizeof(MemberType);
}
template <typename CurUnion>
static constexpr std::ptrdiff_t OffsetOfImpl(MemberType ParentType::*member,
CurUnion& cur_union) {
constexpr size_t Offset = CurUnion::GetOffset();
const auto target = std::addressof(GetPointer(U.parent)->*member);
const auto start = std::addressof(cur_union.data.members[0]);
const auto next = GetNextAddress(start, target);
if (next != target) {
if constexpr (Offset < sizeof(MemberType) - 1) {
return OffsetOfImpl(member, cur_union.next_union);
} else {
UNREACHABLE();
}
}
return static_cast<ptrdiff_t>(static_cast<size_t>(next - start) * sizeof(MemberType) +
Offset);
}
static constexpr std::ptrdiff_t OffsetOf(MemberType ParentType::*member) {
return OffsetOfImpl(member, U.first_union);
}
};
template <typename T>
struct GetMemberPointerTraits;
template <typename P, typename M>
struct GetMemberPointerTraits<M P::*> {
using Parent = P;
using Member = M;
};
template <auto MemberPtr>
using GetParentType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Parent;
template <auto MemberPtr>
using GetMemberType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Member;
template <auto MemberPtr, typename RealParentType = GetParentType<MemberPtr>>
constexpr std::ptrdiff_t OffsetOf() {
using DeducedParentType = GetParentType<MemberPtr>;
using MemberType = GetMemberType<MemberPtr>;
static_assert(std::is_base_of<DeducedParentType, RealParentType>::value ||
std::is_same<RealParentType, DeducedParentType>::value);
return OffsetOfCalculator<RealParentType, MemberType>::OffsetOf(MemberPtr);
};
} // namespace impl
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>* member) {
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>();
return *static_cast<RealParentType*>(
static_cast<void*>(static_cast<uint8_t*>(static_cast<void*>(member)) - Offset));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const* member) {
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>();
return *static_cast<const RealParentType*>(static_cast<const void*>(
static_cast<const uint8_t*>(static_cast<const void*>(member)) - Offset));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>* member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const* member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>& member) {
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const& member) {
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>& member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const& member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
} // namespace Common

View File

@ -105,9 +105,7 @@ public:
private: private:
// It is important to separate the below atomics for performance reasons: // It is important to separate the below atomics for performance reasons:
// Having them on the same cache-line would result in false-sharing between them. // Having them on the same cache-line would result in false-sharing between them.
// TODO: Remove this ifdef whenever clang and GCC support #ifdef __cpp_lib_hardware_interference_size
// std::hardware_destructive_interference_size.
#if defined(_MSC_VER) && _MSC_VER >= 1911
static constexpr std::size_t padding_size = static constexpr std::size_t padding_size =
std::hardware_destructive_interference_size - sizeof(std::atomic_size_t); std::hardware_destructive_interference_size - sizeof(std::atomic_size_t);
#else #else

View File

@ -0,0 +1,204 @@
#ifndef BOOST_SERIALIZATION_STD_VARIANT_HPP
#define BOOST_SERIALIZATION_STD_VARIANT_HPP
// MS compatible compilers support #pragma once
#if defined(_MSC_VER)
# pragma once
#endif
/////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8
// variant.hpp - non-intrusive serialization of variant types
//
// copyright (c) 2019 Samuel Debionne, ESRF
//
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org for updates, documentation, and revision history.
//
// Widely inspired form boost::variant serialization
//
#include <boost/serialization/throw_exception.hpp>
#include <variant>
#include <boost/archive/archive_exception.hpp>
#include <boost/serialization/split_free.hpp>
#include <boost/serialization/serialization.hpp>
#include <boost/serialization/nvp.hpp>
namespace boost {
namespace serialization {
template<class Archive>
struct std_variant_save_visitor
{
std_variant_save_visitor(Archive& ar) :
m_ar(ar)
{}
template<class T>
void operator()(T const & value) const
{
m_ar << BOOST_SERIALIZATION_NVP(value);
}
private:
Archive & m_ar;
};
template<class Archive>
struct std_variant_load_visitor
{
std_variant_load_visitor(Archive& ar) :
m_ar(ar)
{}
template<class T>
void operator()(T & value) const
{
m_ar >> BOOST_SERIALIZATION_NVP(value);
}
private:
Archive & m_ar;
};
template<class Archive, class ...Types>
void save(
Archive & ar,
std::variant<Types...> const & v,
unsigned int /*version*/
){
const std::size_t which = v.index();
ar << BOOST_SERIALIZATION_NVP(which);
std_variant_save_visitor<Archive> visitor(ar);
std::visit(visitor, v);
}
// Minimalist metaprogramming for handling parameter pack
namespace mp {
namespace detail {
template <typename Seq>
struct front_impl;
template <template <typename...> class Seq, typename T, typename... Ts>
struct front_impl<Seq<T, Ts...>> {
using type = T;
};
template <typename Seq>
struct pop_front_impl;
template <template <typename...> class Seq, typename T, typename... Ts>
struct pop_front_impl<Seq<T, Ts...>> {
using type = Seq<Ts...>;
};
} //namespace detail
template <typename... Ts>
struct typelist {};
template <typename Seq>
using front = typename detail::front_impl<Seq>::type;
template <typename Seq>
using pop_front = typename detail::pop_front_impl<Seq>::type;
} // namespace mp
template<std::size_t N, class Seq>
struct variant_impl
{
template<class Archive, class V>
static void load (
Archive & ar,
std::size_t which,
V & v,
const unsigned int version
){
if(which == 0){
// note: A non-intrusive implementation (such as this one)
// necessary has to copy the value. This wouldn't be necessary
// with an implementation that de-serialized to the address of the
// aligned storage included in the variant.
using type = mp::front<Seq>;
type value;
ar >> BOOST_SERIALIZATION_NVP(value);
v = std::move(value);
type * new_address = & std::get<type>(v);
ar.reset_object_address(new_address, & value);
return;
}
//typedef typename mpl::pop_front<S>::type type;
using types = mp::pop_front<Seq>;
variant_impl<N - 1, types>::load(ar, which - 1, v, version);
}
};
template<class Seq>
struct variant_impl<0, Seq>
{
template<class Archive, class V>
static void load (
Archive & /*ar*/,
std::size_t /*which*/,
V & /*v*/,
const unsigned int /*version*/
){}
};
template<class Archive, class... Types>
void load(
Archive & ar,
std::variant<Types...>& v,
const unsigned int version
){
std::size_t which;
ar >> BOOST_SERIALIZATION_NVP(which);
if(which >= sizeof...(Types))
// this might happen if a type was removed from the list of variant types
boost::serialization::throw_exception(
boost::archive::archive_exception(
boost::archive::archive_exception::unsupported_version
)
);
variant_impl<sizeof...(Types), mp::typelist<Types...>>::load(ar, which, v, version);
}
template<class Archive,class... Types>
inline void serialize(
Archive & ar,
std::variant<Types...> & v,
const unsigned int file_version
){
split_free(ar,v,file_version);
}
// Specialization for std::monostate
template<class Archive>
void serialize(Archive &ar, std::monostate &, const unsigned int /*version*/)
{}
} // namespace serialization
} // namespace boost
//template<typename T0_, BOOST_VARIANT_ENUM_SHIFTED_PARAMS(typename T)>
#include <boost/serialization/tracking.hpp>
namespace boost {
namespace serialization {
template<class... Types>
struct tracking_level<
std::variant<Types...>
>{
typedef mpl::integral_c_tag tag;
typedef mpl::int_< ::boost::serialization::track_always> type;
BOOST_STATIC_CONSTANT(int, value = type::value);
};
} // namespace serialization
} // namespace boost
#endif //BOOST_SERIALIZATION_VARIANT_HPP

View File

@ -17,43 +17,14 @@
#pragma once #pragma once
#include <type_traits>
#if defined(_MSC_VER) #if defined(_MSC_VER)
#include <cstdlib> #include <cstdlib>
#endif #endif
#include <bit>
#include <cstring> #include <cstring>
#include <type_traits>
#include "common/common_types.h" #include "common/common_types.h"
// GCC
#ifdef __GNUC__
#if __BYTE_ORDER__ && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) && !defined(COMMON_LITTLE_ENDIAN)
#define COMMON_LITTLE_ENDIAN 1
#elif __BYTE_ORDER__ && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) && !defined(COMMON_BIG_ENDIAN)
#define COMMON_BIG_ENDIAN 1
#endif
// LLVM/clang
#elif defined(__clang__)
#if __LITTLE_ENDIAN__ && !defined(COMMON_LITTLE_ENDIAN)
#define COMMON_LITTLE_ENDIAN 1
#elif __BIG_ENDIAN__ && !defined(COMMON_BIG_ENDIAN)
#define COMMON_BIG_ENDIAN 1
#endif
// MSVC
#elif defined(_MSC_VER) && !defined(COMMON_BIG_ENDIAN) && !defined(COMMON_LITTLE_ENDIAN)
#define COMMON_LITTLE_ENDIAN 1
#endif
// Worst case, default to little endian.
#if !COMMON_BIG_ENDIAN && !COMMON_LITTLE_ENDIAN
#define COMMON_LITTLE_ENDIAN 1
#endif
namespace Common { namespace Common {
#ifdef _MSC_VER #ifdef _MSC_VER
@ -675,17 +646,8 @@ struct AddEndian<T, SwapTag> {
}; };
// Alias LETag/BETag as KeepTag/SwapTag depending on the system // Alias LETag/BETag as KeepTag/SwapTag depending on the system
#if COMMON_LITTLE_ENDIAN using LETag = std::conditional_t<std::endian::native == std::endian::little, KeepTag, SwapTag>;
using BETag = std::conditional_t<std::endian::native == std::endian::big, KeepTag, SwapTag>;
using LETag = KeepTag;
using BETag = SwapTag;
#else
using BETag = KeepTag;
using LETag = SwapTag;
#endif
// Aliases for LE types // Aliases for LE types
using u16_le = AddEndian<u16, LETag>::type; using u16_le = AddEndian<u16, LETag>::type;

View File

@ -11,6 +11,7 @@
#include <mach/mach.h> #include <mach/mach.h>
#elif defined(_WIN32) #elif defined(_WIN32)
#include <windows.h> #include <windows.h>
#include "common/string_util.h"
#else #else
#if defined(__Bitrig__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) #if defined(__Bitrig__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
#include <pthread_np.h> #include <pthread_np.h>
@ -82,29 +83,8 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
#ifdef _MSC_VER #ifdef _MSC_VER
// Sets the debugger-visible name of the current thread. // Sets the debugger-visible name of the current thread.
// Uses trick documented in:
// https://docs.microsoft.com/en-us/visualstudio/debugger/how-to-set-a-thread-name-in-native-code
void SetCurrentThreadName(const char* name) { void SetCurrentThreadName(const char* name) {
static const DWORD MS_VC_EXCEPTION = 0x406D1388; SetThreadDescription(GetCurrentThread(), UTF8ToUTF16W(name).data());
#pragma pack(push, 8)
struct THREADNAME_INFO {
DWORD dwType; // must be 0x1000
LPCSTR szName; // pointer to name (in user addr space)
DWORD dwThreadID; // thread ID (-1=caller thread)
DWORD dwFlags; // reserved for future use, must be zero
} info;
#pragma pack(pop)
info.dwType = 0x1000;
info.szName = name;
info.dwThreadID = std::numeric_limits<DWORD>::max();
info.dwFlags = 0;
__try {
RaiseException(MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info);
} __except (EXCEPTION_CONTINUE_EXECUTION) {
}
} }
#else // !MSVC_VER, so must be POSIX threads #else // !MSVC_VER, so must be POSIX threads

View File

@ -128,73 +128,60 @@ add_library(citra_core STATIC
hle/applets/swkbd.h hle/applets/swkbd.h
hle/ipc.h hle/ipc.h
hle/ipc_helpers.h hle/ipc_helpers.h
hle/kernel/address_arbiter.cpp
hle/kernel/address_arbiter.h
hle/kernel/client_port.cpp
hle/kernel/client_port.h
hle/kernel/client_session.cpp
hle/kernel/client_session.h
hle/kernel/config_mem.cpp hle/kernel/config_mem.cpp
hle/kernel/config_mem.h hle/kernel/config_mem.h
hle/kernel/errors.h hle/kernel/errors.h
hle/kernel/event.cpp
hle/kernel/event.h
hle/kernel/handle_table.cpp
hle/kernel/handle_table.h
hle/kernel/hle_ipc.cpp hle/kernel/hle_ipc.cpp
hle/kernel/hle_ipc.h hle/kernel/hle_ipc.h
hle/kernel/ipc.cpp hle/kernel/ipc.cpp
hle/kernel/ipc.h hle/kernel/ipc.h
hle/kernel/ipc_debugger/recorder.cpp hle/kernel/ipc_debugger/recorder.cpp
hle/kernel/ipc_debugger/recorder.h hle/kernel/ipc_debugger/recorder.h
hle/kernel/k_address_arbiter.cpp
hle/kernel/k_address_arbiter.h
hle/kernel/k_auto_object.cpp
hle/kernel/k_auto_object.h
hle/kernel/k_auto_object_container.cpp
hle/kernel/k_auto_object_container.h
hle/kernel/k_client_port.cpp
hle/kernel/k_client_port.h
hle/kernel/k_client_session.cpp
hle/kernel/k_client_session.h
hle/kernel/k_code_set.h
hle/kernel/k_event.cpp
hle/kernel/k_event.h
hle/kernel/k_handle_table.cpp
hle/kernel/k_handle_table.h
hle/kernel/k_linked_list.h
hle/kernel/k_mutex.cpp
hle/kernel/k_mutex.h
hle/kernel/k_object_name.cpp
hle/kernel/k_object_name.h
hle/kernel/k_port.cpp
hle/kernel/k_port.h
hle/kernel/k_resource_limit.cpp
hle/kernel/k_resource_limit.h
hle/kernel/k_scoped_resource_reservation.h
hle/kernel/k_semaphore.cpp
hle/kernel/k_semaphore.h
hle/kernel/k_server_port.cpp
hle/kernel/k_server_port.h
hle/kernel/k_server_session.cpp
hle/kernel/k_server_session.h
hle/kernel/k_session.cpp
hle/kernel/k_session.h
hle/kernel/k_shared_memory.cpp
hle/kernel/k_shared_memory.h
hle/kernel/k_slab_heap.h
hle/kernel/k_synchronization_object.cpp
hle/kernel/k_synchronization_object.h
hle/kernel/k_timer.cpp
hle/kernel/k_timer.h
hle/kernel/kernel.cpp hle/kernel/kernel.cpp
hle/kernel/kernel.h hle/kernel/kernel.h
hle/kernel/memory.cpp hle/kernel/memory.cpp
hle/kernel/memory.h hle/kernel/memory.h
hle/kernel/mutex.cpp
hle/kernel/mutex.h
hle/kernel/object.cpp hle/kernel/object.cpp
hle/kernel/object.h hle/kernel/object.h
hle/kernel/process.cpp hle/kernel/process.cpp
hle/kernel/process.h hle/kernel/process.h
hle/kernel/resource_limit.cpp
hle/kernel/resource_limit.h
hle/kernel/semaphore.cpp
hle/kernel/semaphore.h
hle/kernel/server_port.cpp
hle/kernel/server_port.h
hle/kernel/server_session.cpp
hle/kernel/server_session.h
hle/kernel/session.h
hle/kernel/session.cpp
hle/kernel/shared_memory.cpp
hle/kernel/shared_memory.h
hle/kernel/shared_page.cpp hle/kernel/shared_page.cpp
hle/kernel/shared_page.h hle/kernel/shared_page.h
hle/kernel/slab_helpers.h
hle/kernel/svc.cpp hle/kernel/svc.cpp
hle/kernel/svc.h hle/kernel/svc.h
hle/kernel/svc_wrapper.h hle/kernel/svc_wrapper.h
hle/kernel/thread.cpp hle/kernel/thread.cpp
hle/kernel/thread.h hle/kernel/thread.h
hle/kernel/timer.cpp
hle/kernel/timer.h
hle/kernel/vm_manager.cpp hle/kernel/vm_manager.cpp
hle/kernel/vm_manager.h hle/kernel/vm_manager.h
hle/kernel/wait_object.cpp
hle/kernel/wait_object.h
hle/mii.h hle/mii.h
hle/mii.cpp hle/mii.cpp
hle/result.h hle/result.h
@ -247,6 +234,8 @@ add_library(citra_core STATIC
hle/service/boss/boss_p.h hle/service/boss/boss_p.h
hle/service/boss/boss_u.cpp hle/service/boss/boss_u.cpp
hle/service/boss/boss_u.h hle/service/boss/boss_u.h
hle/service/boss/online_service.cpp
hle/service/boss/online_service.h
hle/service/cam/cam.cpp hle/service/cam/cam.cpp
hle/service/cam/cam.h hle/service/cam/cam.h
hle/service/cam/cam_c.cpp hle/service/cam/cam_c.cpp
@ -332,8 +321,6 @@ add_library(citra_core STATIC
hle/service/ir/ir_u.h hle/service/ir/ir_u.h
hle/service/ir/ir_user.cpp hle/service/ir/ir_user.cpp
hle/service/ir/ir_user.h hle/service/ir/ir_user.h
hle/service/kernel_helpers.cpp
hle/service/kernel_helpers.h
hle/service/ldr_ro/cro_helper.cpp hle/service/ldr_ro/cro_helper.cpp
hle/service/ldr_ro/cro_helper.h hle/service/ldr_ro/cro_helper.h
hle/service/ldr_ro/ldr_ro.cpp hle/service/ldr_ro/ldr_ro.cpp

View File

@ -5,6 +5,7 @@
#include <algorithm> #include <algorithm>
#include <memory> #include <memory>
#include "common/archives.h" #include "common/archives.h"
#include "common/error.h"
#include "common/file_util.h" #include "common/file_util.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "common/settings.h" #include "common/settings.h"
@ -103,7 +104,7 @@ ResultVal<std::unique_ptr<FileBackend>> SDMCArchive::OpenFileBase(const Path& pa
FileUtil::IOFile file(full_path, mode.write_flag ? "r+b" : "rb"); FileUtil::IOFile file(full_path, mode.write_flag ? "r+b" : "rb");
if (!file.IsOpen()) { if (!file.IsOpen()) {
LOG_CRITICAL(Service_FS, "Error opening {}: {}", full_path, GetLastErrorMsg()); LOG_CRITICAL(Service_FS, "Error opening {}: {}", full_path, Common::GetLastErrorMsg());
return ERROR_NOT_FOUND; return ERROR_NOT_FOUND;
} }

View File

@ -26,24 +26,24 @@ static Core::TimingEventType* applet_update_event = nullptr;
/// The interval at which the Applet update callback will be called, 16.6ms /// The interval at which the Applet update callback will be called, 16.6ms
static const u64 applet_update_interval_us = 16666; static const u64 applet_update_interval_us = 16666;
ResultCode Applet::Create(Core::System& system, Service::APT::AppletId id, Service::APT::AppletId parent, bool preload, ResultCode Applet::Create(Service::APT::AppletId id, Service::APT::AppletId parent, bool preload,
const std::shared_ptr<Service::APT::AppletManager>& manager) { const std::shared_ptr<Service::APT::AppletManager>& manager) {
switch (id) { switch (id) {
case Service::APT::AppletId::SoftwareKeyboard1: case Service::APT::AppletId::SoftwareKeyboard1:
case Service::APT::AppletId::SoftwareKeyboard2: case Service::APT::AppletId::SoftwareKeyboard2:
applets[id] = std::make_shared<SoftwareKeyboard>(system, id, parent, preload, manager); applets[id] = std::make_shared<SoftwareKeyboard>(id, parent, preload, manager);
break; break;
case Service::APT::AppletId::Ed1: case Service::APT::AppletId::Ed1:
case Service::APT::AppletId::Ed2: case Service::APT::AppletId::Ed2:
applets[id] = std::make_shared<MiiSelector>(system, id, parent, preload, manager); applets[id] = std::make_shared<MiiSelector>(id, parent, preload, manager);
break; break;
case Service::APT::AppletId::Error: case Service::APT::AppletId::Error:
case Service::APT::AppletId::Error2: case Service::APT::AppletId::Error2:
applets[id] = std::make_shared<ErrEula>(system, id, parent, preload, manager); applets[id] = std::make_shared<ErrEula>(id, parent, preload, manager);
break; break;
case Service::APT::AppletId::Mint: case Service::APT::AppletId::Mint:
case Service::APT::AppletId::Mint2: case Service::APT::AppletId::Mint2:
applets[id] = std::make_shared<Mint>(system, id, parent, preload, manager); applets[id] = std::make_shared<Mint>(id, parent, preload, manager);
break; break;
default: default:
LOG_ERROR(Service_APT, "Could not create applet {}", id); LOG_ERROR(Service_APT, "Could not create applet {}", id);
@ -64,7 +64,7 @@ ResultCode Applet::Create(Core::System& system, Service::APT::AppletId id, Servi
} }
// Schedule the update event // Schedule the update event
system.CoreTiming().ScheduleEvent( Core::System::GetInstance().CoreTiming().ScheduleEvent(
usToCycles(applet_update_interval_us), applet_update_event, static_cast<u64>(id)); usToCycles(applet_update_interval_us), applet_update_event, static_cast<u64>(id));
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
@ -149,5 +149,4 @@ void Init() {
void Shutdown() { void Shutdown() {
Core::System::GetInstance().CoreTiming().RemoveEvent(applet_update_event); Core::System::GetInstance().CoreTiming().RemoveEvent(applet_update_event);
} }
} // namespace HLE::Applets } // namespace HLE::Applets

View File

@ -8,10 +8,6 @@
#include "core/hle/result.h" #include "core/hle/result.h"
#include "core/hle/service/apt/applet_manager.h" #include "core/hle/service/apt/applet_manager.h"
namespace Core {
class System;
}
namespace HLE::Applets { namespace HLE::Applets {
class Applet { class Applet {
@ -26,7 +22,7 @@ public:
* @param preload Whether the applet is being preloaded. * @param preload Whether the applet is being preloaded.
* @returns ResultCode Whether the operation was successful or not. * @returns ResultCode Whether the operation was successful or not.
*/ */
static ResultCode Create(Core::System& system, Service::APT::AppletId id, Service::APT::AppletId parent, bool preload, static ResultCode Create(Service::APT::AppletId id, Service::APT::AppletId parent, bool preload,
const std::shared_ptr<Service::APT::AppletManager>& manager); const std::shared_ptr<Service::APT::AppletManager>& manager);
/** /**
@ -59,9 +55,9 @@ public:
virtual void Update() = 0; virtual void Update() = 0;
protected: protected:
Applet(Core::System& system_, Service::APT::AppletId id, Service::APT::AppletId parent, bool preload, Applet(Service::APT::AppletId id, Service::APT::AppletId parent, bool preload,
std::weak_ptr<Service::APT::AppletManager> manager) std::weak_ptr<Service::APT::AppletManager> manager)
: system(system_), id(id), parent(parent), preload(preload), service_context(system, "Applet"), manager(std::move(manager)) {} : id(id), parent(parent), preload(preload), manager(std::move(manager)) {}
/** /**
* Handles a parameter from the application. * Handles a parameter from the application.
@ -83,12 +79,10 @@ protected:
*/ */
virtual ResultCode Finalize() = 0; virtual ResultCode Finalize() = 0;
Core::System& system;
Service::APT::AppletId id; ///< Id of this Applet Service::APT::AppletId id; ///< Id of this Applet
Service::APT::AppletId parent; ///< Id of this Applet's parent Service::APT::AppletId parent; ///< Id of this Applet's parent
bool preload; ///< Whether the Applet is being preloaded. bool preload; ///< Whether the Applet is being preloaded.
std::shared_ptr<std::vector<u8>> heap_memory; ///< Heap memory for this Applet std::shared_ptr<std::vector<u8>> heap_memory; ///< Heap memory for this Applet
Service::KernelHelpers::ServiceContext service_context;
/// Whether this applet is running. /// Whether this applet is running.
bool is_running = true; bool is_running = true;

View File

@ -5,7 +5,6 @@
#include "common/string_util.h" #include "common/string_util.h"
#include "core/core.h" #include "core/core.h"
#include "core/hle/applets/erreula.h" #include "core/hle/applets/erreula.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/service/apt/apt.h" #include "core/hle/service/apt/apt.h"
namespace HLE::Applets { namespace HLE::Applets {
@ -29,8 +28,9 @@ ResultCode ErrEula::ReceiveParameterImpl(const Service::APT::MessageParameter& p
// TODO: allocated memory never released // TODO: allocated memory never released
using Kernel::MemoryPermission; using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block. // Create a SharedMemory that directly points to this heap block.
framebuffer_memory = service_context.CreateSharedMemoryForApplet("ErrEula Memory", framebuffer_memory = Core::System::GetInstance().Kernel().CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite); 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"ErrEula Memory");
// Send the response message with the newly created SharedMemory // Send the response message with the newly created SharedMemory
SendParameter({ SendParameter({

View File

@ -5,18 +5,15 @@
#pragma once #pragma once
#include "core/hle/applets/applet.h" #include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
namespace Kernel {
class KSharedMemory;
}
namespace HLE::Applets { namespace HLE::Applets {
class ErrEula final : public Applet { class ErrEula final : public Applet {
public: public:
explicit ErrEula(Core::System& system, Service::APT::AppletId id, Service::APT::AppletId parent, bool preload, explicit ErrEula(Service::APT::AppletId id, Service::APT::AppletId parent, bool preload,
std::weak_ptr<Service::APT::AppletManager> manager) std::weak_ptr<Service::APT::AppletManager> manager)
: Applet(system, id, parent, preload, std::move(manager)) {} : Applet(id, parent, preload, std::move(manager)) {}
ResultCode ReceiveParameterImpl(const Service::APT::MessageParameter& parameter) override; ResultCode ReceiveParameterImpl(const Service::APT::MessageParameter& parameter) override;
ResultCode Start(const Service::APT::MessageParameter& parameter) override; ResultCode Start(const Service::APT::MessageParameter& parameter) override;
@ -27,7 +24,7 @@ private:
/// This SharedMemory will be created when we receive the LibAppJustStarted message. /// This SharedMemory will be created when we receive the LibAppJustStarted message.
/// It holds the framebuffer info retrieved by the application with /// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo /// GSPGPU::ImportDisplayCaptureInfo
Kernel::KSharedMemory* framebuffer_memory; std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
/// Parameter received by the applet on start. /// Parameter received by the applet on start.
std::vector<u8> startup_param; std::vector<u8> startup_param;

View File

@ -12,7 +12,7 @@
#include "core/frontend/applets/mii_selector.h" #include "core/frontend/applets/mii_selector.h"
#include "core/hle/applets/mii_selector.h" #include "core/hle/applets/mii_selector.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/shared_memory.h"
#include "core/hle/result.h" #include "core/hle/result.h"
namespace HLE::Applets { namespace HLE::Applets {
@ -35,8 +35,9 @@ ResultCode MiiSelector::ReceiveParameterImpl(const Service::APT::MessageParamete
using Kernel::MemoryPermission; using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block. // Create a SharedMemory that directly points to this heap block.
framebuffer_memory = service_context.CreateSharedMemoryForApplet("MiiSelector Memory", framebuffer_memory = Core::System::GetInstance().Kernel().CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite); 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"MiiSelector Memory");
// Send the response message with the newly created SharedMemory // Send the response message with the newly created SharedMemory
SendParameter({ SendParameter({
@ -56,7 +57,7 @@ ResultCode MiiSelector::Start(const Service::APT::MessageParameter& parameter) {
std::memcpy(&config, parameter.buffer.data(), parameter.buffer.size()); std::memcpy(&config, parameter.buffer.data(), parameter.buffer.size());
using namespace Frontend; using namespace Frontend;
frontend_applet = system.GetMiiSelector(); frontend_applet = Core::System::GetInstance().GetMiiSelector();
ASSERT(frontend_applet); ASSERT(frontend_applet);
MiiSelectorConfig frontend_config = ToFrontendConfig(config); MiiSelectorConfig frontend_config = ToFrontendConfig(config);

View File

@ -8,6 +8,7 @@
#include "common/common_funcs.h" #include "common/common_funcs.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/applets/applet.h" #include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/mii.h" #include "core/hle/mii.h"
#include "core/hle/result.h" #include "core/hle/result.h"
#include "core/hle/service/apt/apt.h" #include "core/hle/service/apt/apt.h"
@ -17,10 +18,6 @@ class MiiSelector;
struct MiiSelectorConfig; struct MiiSelectorConfig;
} // namespace Frontend } // namespace Frontend
namespace Kernel {
class KSharedMemory;
}
namespace HLE::Applets { namespace HLE::Applets {
struct MiiConfig { struct MiiConfig {
@ -65,9 +62,9 @@ ASSERT_REG_POSITION(guest_mii_name, 0x6C);
class MiiSelector final : public Applet { class MiiSelector final : public Applet {
public: public:
MiiSelector(Core::System& system, Service::APT::AppletId id, Service::APT::AppletId parent, bool preload, MiiSelector(Service::APT::AppletId id, Service::APT::AppletId parent, bool preload,
std::weak_ptr<Service::APT::AppletManager> manager) std::weak_ptr<Service::APT::AppletManager> manager)
: Applet(system, id, parent, preload, std::move(manager)) {} : Applet(id, parent, preload, std::move(manager)) {}
ResultCode ReceiveParameterImpl(const Service::APT::MessageParameter& parameter) override; ResultCode ReceiveParameterImpl(const Service::APT::MessageParameter& parameter) override;
ResultCode Start(const Service::APT::MessageParameter& parameter) override; ResultCode Start(const Service::APT::MessageParameter& parameter) override;
@ -82,7 +79,7 @@ private:
/// This SharedMemory will be created when we receive the LibAppJustStarted message. /// This SharedMemory will be created when we receive the LibAppJustStarted message.
/// It holds the framebuffer info retrieved by the application with /// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo /// GSPGPU::ImportDisplayCaptureInfo
Kernel::KSharedMemory* framebuffer_memory; std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
MiiConfig config; MiiConfig config;

View File

@ -5,7 +5,6 @@
#include "common/string_util.h" #include "common/string_util.h"
#include "core/core.h" #include "core/core.h"
#include "core/hle/applets/mint.h" #include "core/hle/applets/mint.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/service/apt/apt.h" #include "core/hle/service/apt/apt.h"
namespace HLE::Applets { namespace HLE::Applets {
@ -29,8 +28,9 @@ ResultCode Mint::ReceiveParameterImpl(const Service::APT::MessageParameter& para
// TODO: allocated memory never released // TODO: allocated memory never released
using Kernel::MemoryPermission; using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block. // Create a SharedMemory that directly points to this heap block.
framebuffer_memory = service_context.CreateSharedMemoryForApplet("Mint Memory", framebuffer_memory = Core::System::GetInstance().Kernel().CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite); 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"Mint Memory");
// Send the response message with the newly created SharedMemory // Send the response message with the newly created SharedMemory
SendParameter({ SendParameter({

View File

@ -5,18 +5,15 @@
#pragma once #pragma once
#include "core/hle/applets/applet.h" #include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
namespace Kernel {
class KSharedMemory;
}
namespace HLE::Applets { namespace HLE::Applets {
class Mint final : public Applet { class Mint final : public Applet {
public: public:
explicit Mint(Core::System& system, Service::APT::AppletId id, Service::APT::AppletId parent, bool preload, explicit Mint(Service::APT::AppletId id, Service::APT::AppletId parent, bool preload,
std::weak_ptr<Service::APT::AppletManager> manager) std::weak_ptr<Service::APT::AppletManager> manager)
: Applet(system, id, parent, preload, std::move(manager)) {} : Applet(id, parent, preload, std::move(manager)) {}
ResultCode ReceiveParameterImpl(const Service::APT::MessageParameter& parameter) override; ResultCode ReceiveParameterImpl(const Service::APT::MessageParameter& parameter) override;
ResultCode Start(const Service::APT::MessageParameter& parameter) override; ResultCode Start(const Service::APT::MessageParameter& parameter) override;
@ -27,7 +24,7 @@ private:
/// This SharedMemory will be created when we receive the Request message. /// This SharedMemory will be created when we receive the Request message.
/// It holds the framebuffer info retrieved by the application with /// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo /// GSPGPU::ImportDisplayCaptureInfo
Kernel::KSharedMemory* framebuffer_memory; std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
/// Parameter received by the applet on start. /// Parameter received by the applet on start.
std::vector<u8> startup_param; std::vector<u8> startup_param;

View File

@ -32,8 +32,9 @@ ResultCode SoftwareKeyboard::ReceiveParameterImpl(Service::APT::MessageParameter
using Kernel::MemoryPermission; using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block. // Create a SharedMemory that directly points to this heap block.
framebuffer_memory = service_context.CreateSharedMemoryForApplet("SoftwareKeyboard Memory", framebuffer_memory = Core::System::GetInstance().Kernel().CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite); 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"SoftwareKeyboard Memory");
// Send the response message with the newly created SharedMemory // Send the response message with the newly created SharedMemory
SendParameter({ SendParameter({
@ -93,12 +94,12 @@ ResultCode SoftwareKeyboard::Start(Service::APT::MessageParameter const& paramet
"The size of the parameter (SoftwareKeyboardConfig) is wrong"); "The size of the parameter (SoftwareKeyboardConfig) is wrong");
std::memcpy(&config, parameter.buffer.data(), parameter.buffer.size()); std::memcpy(&config, parameter.buffer.data(), parameter.buffer.size());
text_memory = parameter.object->DynamicCast<Kernel::KSharedMemory*>(); text_memory = std::static_pointer_cast<Kernel::SharedMemory, Kernel::Object>(parameter.object);
DrawScreenKeyboard(); DrawScreenKeyboard();
using namespace Frontend; using namespace Frontend;
frontend_applet = system.GetSoftwareKeyboard(); frontend_applet = Core::System::GetInstance().GetSoftwareKeyboard();
ASSERT(frontend_applet); ASSERT(frontend_applet);
frontend_applet->Execute(ToFrontendConfig(config)); frontend_applet->Execute(ToFrontendConfig(config));

View File

@ -9,6 +9,7 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "core/frontend/applets/swkbd.h" #include "core/frontend/applets/swkbd.h"
#include "core/hle/applets/applet.h" #include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/result.h" #include "core/hle/result.h"
#include "core/hle/service/apt/apt.h" #include "core/hle/service/apt/apt.h"
@ -174,9 +175,9 @@ static_assert(sizeof(SoftwareKeyboardConfig) == 0x400, "Software Keyboard Config
class SoftwareKeyboard final : public Applet { class SoftwareKeyboard final : public Applet {
public: public:
SoftwareKeyboard(Core::System& system, Service::APT::AppletId id, Service::APT::AppletId parent, bool preload, SoftwareKeyboard(Service::APT::AppletId id, Service::APT::AppletId parent, bool preload,
std::weak_ptr<Service::APT::AppletManager> manager) std::weak_ptr<Service::APT::AppletManager> manager)
: Applet(system, id, parent, preload, std::move(manager)) {} : Applet(id, parent, preload, std::move(manager)) {}
ResultCode ReceiveParameterImpl(const Service::APT::MessageParameter& parameter) override; ResultCode ReceiveParameterImpl(const Service::APT::MessageParameter& parameter) override;
ResultCode Start(const Service::APT::MessageParameter& parameter) override; ResultCode Start(const Service::APT::MessageParameter& parameter) override;
@ -194,10 +195,10 @@ private:
/// This SharedMemory will be created when we receive the LibAppJustStarted message. /// This SharedMemory will be created when we receive the LibAppJustStarted message.
/// It holds the framebuffer info retrieved by the application with /// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo /// GSPGPU::ImportDisplayCaptureInfo
Kernel::KSharedMemory* framebuffer_memory; std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
/// SharedMemory where the output text will be stored /// SharedMemory where the output text will be stored
Kernel::KSharedMemory* text_memory; std::shared_ptr<Kernel::SharedMemory> text_memory;
/// Configuration of this instance of the SoftwareKeyboard, as received from the application /// Configuration of this instance of the SoftwareKeyboard, as received from the application
SoftwareKeyboardConfig config; SoftwareKeyboardConfig config;

View File

@ -87,11 +87,11 @@ public:
void PushRaw(const T& value); void PushRaw(const T& value);
// TODO : ensure that translate params are added after all regular params // TODO : ensure that translate params are added after all regular params
template <typename... T> template <typename... O>
void PushCopyObjects(T... pointers); void PushCopyObjects(std::shared_ptr<O>... pointers);
template <typename... T> template <typename... O>
void PushMoveObjects(T... pointers); void PushMoveObjects(std::shared_ptr<O>... pointers);
void PushStaticBuffer(std::vector<u8> buffer, u8 buffer_id); void PushStaticBuffer(std::vector<u8> buffer, u8 buffer_id);
@ -183,14 +183,14 @@ inline void RequestBuilder::PushMoveHLEHandles(H... handles) {
Push(static_cast<u32>(handles)...); Push(static_cast<u32>(handles)...);
} }
template <typename... T> template <typename... O>
inline void RequestBuilder::PushCopyObjects(T... pointers) { inline void RequestBuilder::PushCopyObjects(std::shared_ptr<O>... pointers) {
PushCopyHLEHandles(context->AddOutgoingHandle(pointers)...); PushCopyHLEHandles(context->AddOutgoingHandle(std::move(pointers))...);
} }
template <typename... T> template <typename... O>
inline void RequestBuilder::PushMoveObjects(T... pointers) { inline void RequestBuilder::PushMoveObjects(std::shared_ptr<O>... pointers) {
PushMoveHLEHandles(context->AddOutgoingHandle(pointers)...); PushMoveHLEHandles(context->AddOutgoingHandle(std::move(pointers))...);
} }
inline void RequestBuilder::PushStaticBuffer(std::vector<u8> buffer, u8 buffer_id) { inline void RequestBuilder::PushStaticBuffer(std::vector<u8> buffer, u8 buffer_id) {

View File

@ -1,31 +1,95 @@
// Copyright 2023 Citra Emulator Project // Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version // Licensed under GPLv2 or any later version
// Refer to the license.txt file included. // Refer to the license.txt file included.
#include <algorithm> #include <algorithm>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h" #include "common/archives.h"
#include "common/common_types.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/errors.h" #include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h" #include "core/hle/kernel/thread.h"
#include "core/memory.h" #include "core/memory.h"
namespace Kernel { namespace Kernel {
class KAddressArbiter::Callback : public WakeupCallback { void AddressArbiter::WaitThread(std::shared_ptr<Thread> thread, VAddr wait_address) {
public: thread->wait_address = wait_address;
explicit Callback(KAddressArbiter& _parent) : parent(_parent) {} thread->status = ThreadStatus::WaitArb;
KAddressArbiter& parent; waiting_threads.emplace_back(std::move(thread));
}
void WakeUp(ThreadWakeupReason reason, Thread* thread, u64 AddressArbiter::ResumeAllThreads(VAddr address) {
KSynchronizationObject* object) override { // Determine which threads are waiting on this address, those should be woken up.
parent.WakeUp(reason, thread, object); auto itr = std::stable_partition(waiting_threads.begin(), waiting_threads.end(),
[address](const auto& thread) {
ASSERT_MSG(thread->status == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->wait_address != address;
});
// Wake up all the found threads
const u64 num_threads = std::distance(itr, waiting_threads.end());
std::for_each(itr, waiting_threads.end(), [](auto& thread) { thread->ResumeFromWait(); });
// Remove the woken up threads from the wait list.
waiting_threads.erase(itr, waiting_threads.end());
return num_threads;
}
bool AddressArbiter::ResumeHighestPriorityThread(VAddr address) {
// Determine which threads are waiting on this address, those should be considered for wakeup.
auto matches_start = std::stable_partition(
waiting_threads.begin(), waiting_threads.end(), [address](const auto& thread) {
ASSERT_MSG(thread->status == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->wait_address != address;
});
// Iterate through threads, find highest priority thread that is waiting to be arbitrated.
// Note: The real kernel will pick the first thread in the list if more than one have the
// same highest priority value. Lower priority values mean higher priority.
auto itr = std::min_element(matches_start, waiting_threads.end(),
[](const auto& lhs, const auto& rhs) {
return lhs->current_priority < rhs->current_priority;
});
if (itr == waiting_threads.end()) {
return false;
}
auto thread = *itr;
thread->ResumeFromWait();
waiting_threads.erase(itr);
return true;
}
AddressArbiter::AddressArbiter(KernelSystem& kernel)
: Object(kernel), kernel(kernel), timeout_callback(std::make_shared<Callback>(*this)) {}
AddressArbiter::~AddressArbiter() {
if (resource_limit) {
resource_limit->Release(ResourceLimitType::AddressArbiter, 1);
}
}
std::shared_ptr<AddressArbiter> KernelSystem::CreateAddressArbiter(std::string name) {
auto address_arbiter = std::make_shared<AddressArbiter>(*this);
address_arbiter->name = std::move(name);
return address_arbiter;
}
class AddressArbiter::Callback : public WakeupCallback {
public:
explicit Callback(AddressArbiter& _parent) : parent(_parent) {}
AddressArbiter& parent;
void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) override {
parent.WakeUp(reason, std::move(thread), std::move(object));
} }
private: private:
@ -36,87 +100,16 @@ private:
friend class boost::serialization::access; friend class boost::serialization::access;
}; };
KAddressArbiter::KAddressArbiter(KernelSystem& kernel) void AddressArbiter::WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
: KAutoObjectWithSlabHeapAndContainer{kernel}, std::shared_ptr<WaitObject> object) {
m_timeout_callback(std::make_shared<Callback>(*this)) {}
KAddressArbiter::~KAddressArbiter() = default;
void KAddressArbiter::Initialize(Process* owner) {
m_owner = owner;
m_owner->Open();
}
void KAddressArbiter::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::AddressArbiter, 1);
owner->Close();
}
}
void KAddressArbiter::WaitThread(Thread* thread, VAddr wait_address) {
thread->m_wait_address = wait_address;
thread->m_status = ThreadStatus::WaitArb;
m_waiting_threads.emplace_back(thread);
}
u64 KAddressArbiter::ResumeAllThreads(VAddr address) {
// Determine which threads are waiting on this address, those should be woken up.
auto itr = std::stable_partition(m_waiting_threads.begin(), m_waiting_threads.end(),
[address](const auto& thread) {
ASSERT_MSG(thread->status == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->wait_address != address;
});
// Wake up all the found threads
const u64 num_threads = std::distance(itr, m_waiting_threads.end());
std::for_each(itr, m_waiting_threads.end(), [](auto& thread) { thread->ResumeFromWait(); });
// Remove the woken up threads from the wait list.
m_waiting_threads.erase(itr, m_waiting_threads.end());
return num_threads;
}
bool KAddressArbiter::ResumeHighestPriorityThread(VAddr address) {
// Determine which threads are waiting on this address, those should be considered for wakeup.
auto matches_start = std::stable_partition(
m_waiting_threads.begin(), m_waiting_threads.end(), [address](const auto& thread) {
ASSERT_MSG(thread->status == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->wait_address != address;
});
// Iterate through threads, find highest priority thread that is waiting to be arbitrated.
// Note: The real kernel will pick the first thread in the list if more than one have the
// same highest priority value. Lower priority values mean higher priority.
auto itr = std::min_element(matches_start, m_waiting_threads.end(),
[](const auto lhs, const auto rhs) {
return lhs->GetCurrentPriority() < rhs->GetCurrentPriority();
});
if (itr == m_waiting_threads.end()) {
return false;
}
auto thread = *itr;
thread->ResumeFromWait();
m_waiting_threads.erase(itr);
return true;
}
void KAddressArbiter::WakeUp(ThreadWakeupReason reason, Thread* thread,
KSynchronizationObject* object) {
ASSERT(reason == ThreadWakeupReason::Timeout); ASSERT(reason == ThreadWakeupReason::Timeout);
// Remove the newly-awakened thread from the Arbiter's waiting list. // Remove the newly-awakened thread from the Arbiter's waiting list.
m_waiting_threads.erase(std::remove(m_waiting_threads.begin(), m_waiting_threads.end(), thread), waiting_threads.erase(std::remove(waiting_threads.begin(), waiting_threads.end(), thread),
m_waiting_threads.end()); waiting_threads.end());
}; };
ResultCode KAddressArbiter::ArbitrateAddress(Thread* thread, ArbitrationType type, VAddr address, ResultCode AddressArbiter::ArbitrateAddress(std::shared_ptr<Thread> thread, ArbitrationType type,
s32 value, u64 nanoseconds) { VAddr address, s32 value, u64 nanoseconds) {
switch (type) { switch (type) {
// Signal thread(s) waiting for arbitrate address... // Signal thread(s) waiting for arbitrate address...
@ -137,42 +130,41 @@ ResultCode KAddressArbiter::ArbitrateAddress(Thread* thread, ArbitrationType typ
// The tick count is taken directly from official HOS kernel. The priority value is one less // The tick count is taken directly from official HOS kernel. The priority value is one less
// than official kernel as the affected FMV threads dont meet the priority threshold of 50. // than official kernel as the affected FMV threads dont meet the priority threshold of 50.
// TODO: Revisit this when scheduler is rewritten and adjust if there isn't a problem there. // TODO: Revisit this when scheduler is rewritten and adjust if there isn't a problem there.
auto* core = m_kernel.current_cpu; if (num_threads == 0 && thread->current_priority >= 49) {
if (num_threads == 0 && core->GetID() == 0 && thread->GetCurrentPriority() >= 49) { kernel.current_cpu->GetTimer().AddTicks(1614u);
core->GetTimer().AddTicks(1614u);
} }
break; break;
} }
// Wait current thread (acquire the arbiter)... // Wait current thread (acquire the arbiter)...
case ArbitrationType::WaitIfLessThan: case ArbitrationType::WaitIfLessThan:
if ((s32)m_kernel.memory.Read32(address) < value) { if ((s32)kernel.memory.Read32(address) < value) {
WaitThread(thread, address); WaitThread(std::move(thread), address);
} }
break; break;
case ArbitrationType::WaitIfLessThanWithTimeout: case ArbitrationType::WaitIfLessThanWithTimeout:
if ((s32)m_kernel.memory.Read32(address) < value) { if ((s32)kernel.memory.Read32(address) < value) {
thread->SetWakeupCallback(m_timeout_callback); thread->wakeup_callback = timeout_callback;
thread->WakeAfterDelay(nanoseconds); thread->WakeAfterDelay(nanoseconds);
WaitThread(thread, address); WaitThread(std::move(thread), address);
} }
break; break;
case ArbitrationType::DecrementAndWaitIfLessThan: { case ArbitrationType::DecrementAndWaitIfLessThan: {
s32 memory_value = m_kernel.memory.Read32(address); s32 memory_value = kernel.memory.Read32(address);
if (memory_value < value) { if (memory_value < value) {
// Only change the memory value if the thread should wait // Only change the memory value if the thread should wait
m_kernel.memory.Write32(address, (s32)memory_value - 1); kernel.memory.Write32(address, (s32)memory_value - 1);
WaitThread(thread, address); WaitThread(std::move(thread), address);
} }
break; break;
} }
case ArbitrationType::DecrementAndWaitIfLessThanWithTimeout: { case ArbitrationType::DecrementAndWaitIfLessThanWithTimeout: {
s32 memory_value = m_kernel.memory.Read32(address); s32 memory_value = kernel.memory.Read32(address);
if (memory_value < value) { if (memory_value < value) {
// Only change the memory value if the thread should wait // Only change the memory value if the thread should wait
m_kernel.memory.Write32(address, (s32)memory_value - 1); kernel.memory.Write32(address, (s32)memory_value - 1);
thread->SetWakeupCallback(m_timeout_callback); thread->wakeup_callback = timeout_callback;
thread->WakeAfterDelay(nanoseconds); thread->WakeAfterDelay(nanoseconds);
WaitThread(thread, address); WaitThread(std::move(thread), address);
} }
break; break;
} }
@ -186,23 +178,30 @@ ResultCode KAddressArbiter::ArbitrateAddress(Thread* thread, ArbitrationType typ
// the thread to sleep // the thread to sleep
if (type == ArbitrationType::WaitIfLessThanWithTimeout || if (type == ArbitrationType::WaitIfLessThanWithTimeout ||
type == ArbitrationType::DecrementAndWaitIfLessThanWithTimeout) { type == ArbitrationType::DecrementAndWaitIfLessThanWithTimeout) {
return RESULT_TIMEOUT; return RESULT_TIMEOUT;
} }
return RESULT_SUCCESS;
R_SUCCEED();
} }
template <class Archive>
void KAddressArbiter::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& m_name;
ar& m_waiting_threads;
ar& m_timeout_callback;
}
SERIALIZE_IMPL(KAddressArbiter)
} // namespace Kernel } // namespace Kernel
namespace boost::serialization {
template <class Archive>
void save_construct_data(Archive& ar, const Kernel::AddressArbiter::Callback* t,
const unsigned int) {
ar << Kernel::SharedFrom(&t->parent);
}
template <class Archive>
void load_construct_data(Archive& ar, Kernel::AddressArbiter::Callback* t, const unsigned int) {
std::shared_ptr<Kernel::AddressArbiter> parent;
ar >> parent;
::new (t) Kernel::AddressArbiter::Callback(*parent);
}
} // namespace boost::serialization
SERIALIZE_EXPORT_IMPL(Kernel::AddressArbiter) SERIALIZE_EXPORT_IMPL(Kernel::AddressArbiter)
SERIALIZE_EXPORT_IMPL(Kernel::KAddressArbiter::Callback) SERIALIZE_EXPORT_IMPL(Kernel::AddressArbiter::Callback)

View File

@ -0,0 +1,100 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/export.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/serialization/version.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
// Address arbiters are an underlying kernel synchronization object that can be created/used via
// supervisor calls (SVCs). They function as sort of a global lock. Typically, games/other CTR
// applications use them as an underlying mechanism to implement thread-safe barriers, events, and
// semaphores.
namespace Kernel {
class Thread;
class ResourceLimit;
enum class ArbitrationType : u32 {
Signal,
WaitIfLessThan,
DecrementAndWaitIfLessThan,
WaitIfLessThanWithTimeout,
DecrementAndWaitIfLessThanWithTimeout,
};
class AddressArbiter final : public Object, public WakeupCallback {
public:
explicit AddressArbiter(KernelSystem& kernel);
~AddressArbiter() override;
std::string GetTypeName() const override {
return "Arbiter";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::AddressArbiter;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ResourceLimit> resource_limit;
std::string name; ///< Name of address arbiter object (optional)
ResultCode ArbitrateAddress(std::shared_ptr<Thread> thread, ArbitrationType type, VAddr address,
s32 value, u64 nanoseconds);
class Callback;
private:
KernelSystem& kernel;
/// Puts the thread to wait on the specified arbitration address under this address arbiter.
void WaitThread(std::shared_ptr<Thread> thread, VAddr wait_address);
/// Resume all threads found to be waiting on the address under this address arbiter
u64 ResumeAllThreads(VAddr address);
/// Resume one thread found to be waiting on the address under this address arbiter and return
/// the resumed thread.
bool ResumeHighestPriorityThread(VAddr address);
/// Threads waiting for the address arbiter to be signaled.
std::vector<std::shared_ptr<Thread>> waiting_threads;
std::shared_ptr<Callback> timeout_callback;
void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) override;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<Object>(*this);
ar& name;
ar& waiting_threads;
ar& timeout_callback;
ar& resource_limit;
}
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::AddressArbiter)
BOOST_CLASS_EXPORT_KEY(Kernel::AddressArbiter::Callback)
BOOST_CLASS_VERSION(Kernel::AddressArbiter, 2)
CONSTRUCT_KERNEL_OBJECT(Kernel::AddressArbiter)

View File

@ -0,0 +1,52 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/archives.h"
#include "common/assert.h"
#include "core/global.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h"
SERIALIZE_EXPORT_IMPL(Kernel::ClientPort)
namespace Kernel {
ClientPort::ClientPort(KernelSystem& kernel) : Object(kernel), kernel(kernel) {}
ClientPort::~ClientPort() = default;
ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
// Note: Threads do not wait for the server endpoint to call
// AcceptSession before returning from this call.
if (active_sessions >= max_sessions) {
return ERR_MAX_CONNECTIONS_REACHED;
}
active_sessions++;
// Create a new session pair, let the created sessions inherit the parent port's HLE handler.
auto [server, client] = kernel.CreateSessionPair(server_port->GetName(), SharedFrom(this));
if (server_port->hle_handler)
server_port->hle_handler->ClientConnected(server);
else
server_port->pending_sessions.push_back(server);
// Wake the threads waiting on the ServerPort
server_port->WakeupAllWaitingThreads();
return client;
}
void ClientPort::ConnectionClosed() {
ASSERT(active_sessions > 0);
--active_sessions;
}
} // namespace Kernel

View File

@ -0,0 +1,81 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/result.h"
namespace Kernel {
class ClientSession;
class ClientPort final : public Object {
public:
explicit ClientPort(KernelSystem& kernel);
~ClientPort() override;
friend class ServerPort;
std::string GetTypeName() const override {
return "ClientPort";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ClientPort;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ServerPort> GetServerPort() const {
return server_port;
}
/**
* Creates a new Session pair, adds the created ServerSession to the associated ServerPort's
* list of pending sessions, and signals the ServerPort, causing any threads
* waiting on it to awake.
* @returns ClientSession The client endpoint of the created Session pair, or error code.
*/
ResultVal<std::shared_ptr<ClientSession>> Connect();
/**
* Signifies that a previously active connection has been closed,
* decreasing the total number of active connections to this port.
*/
void ConnectionClosed();
private:
KernelSystem& kernel;
std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
u32 active_sessions = 0; ///< Number of currently open sessions to this port
std::string name; ///< Name of client port (optional)
friend class KernelSystem;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<Object>(*this);
ar& server_port;
ar& max_sessions;
ar& active_sessions;
ar& name;
}
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::ClientPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::ClientPort)

View File

@ -0,0 +1,57 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::ClientSession)
namespace Kernel {
ClientSession::ClientSession(KernelSystem& kernel) : Object(kernel) {}
ClientSession::~ClientSession() {
// This destructor will be called automatically when the last ClientSession handle is closed by
// the emulated application.
// Local references to ServerSession and SessionRequestHandler are necessary to guarantee they
// will be kept alive until after ClientDisconnected() returns.
std::shared_ptr<ServerSession> server = SharedFrom(parent->server);
if (server) {
std::shared_ptr<SessionRequestHandler> hle_handler = server->hle_handler;
if (hle_handler)
hle_handler->ClientDisconnected(server);
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
server->pending_requesting_threads.clear();
server->currently_handling = nullptr;
}
parent->client = nullptr;
if (server) {
// Notify any threads waiting on the ServerSession that the endpoint has been closed. Note
// that this call has to happen after `Session::client` has been set to nullptr to let the
// ServerSession know that the client endpoint has been closed.
server->WakeupAllWaitingThreads();
}
}
ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread) {
// Keep ServerSession alive until we're done working with it.
std::shared_ptr<ServerSession> server = SharedFrom(parent->server);
if (server == nullptr)
return ERR_SESSION_CLOSED_BY_REMOTE;
// Signal the server session that new data is available
return server->HandleSyncRequest(std::move(thread));
}
} // namespace Kernel

View File

@ -0,0 +1,67 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/export.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
class Session;
class Thread;
class ClientSession final : public Object {
public:
explicit ClientSession(KernelSystem& kernel);
~ClientSession() override;
friend class KernelSystem;
std::string GetTypeName() const override {
return "ClientSession";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ClientSession;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/**
* Sends an SyncRequest from the current emulated thread.
* @param thread Thread that initiated the request.
* @return ResultCode of the operation.
*/
ResultCode SendSyncRequest(std::shared_ptr<Thread> thread);
std::string name; ///< Name of client port (optional)
/// The parent session, which links to the server endpoint.
std::shared_ptr<Session> parent;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<Object>(*this);
ar& name;
ar& parent;
}
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::ClientSession)
CONSTRUCT_KERNEL_OBJECT(Kernel::ClientSession)

View File

@ -16,7 +16,6 @@ enum {
OutOfEvents = 15, OutOfEvents = 15,
OutOfTimers = 16, OutOfTimers = 16,
OutOfHandles = 19, OutOfHandles = 19,
ProcessNotFound = 24,
SessionClosedByRemote = 26, SessionClosedByRemote = 26,
PortNameTooLong = 30, PortNameTooLong = 30,
WrongLockingThread = 31, WrongLockingThread = 31,
@ -112,8 +111,5 @@ constexpr ResultCode RESULT_TIMEOUT(ErrorDescription::Timeout, ErrorModule::OS,
constexpr ResultCode ERR_NO_PENDING_SESSIONS(ErrCodes::NoPendingSessions, ErrorModule::OS, constexpr ResultCode ERR_NO_PENDING_SESSIONS(ErrCodes::NoPendingSessions, ErrorModule::OS,
ErrorSummary::WouldBlock, ErrorSummary::WouldBlock,
ErrorLevel::Permanent); // 0xD8401823 ErrorLevel::Permanent); // 0xD8401823
constexpr ResultCode ERR_PROCESS_NOT_FOUND(ErrCodes::ProcessNotFound, ErrorModule::OS,
ErrorSummary::WrongArgument,
ErrorLevel::Permanent); // 0xD9001818
} // namespace Kernel } // namespace Kernel

View File

@ -0,0 +1,59 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::Event)
namespace Kernel {
Event::Event(KernelSystem& kernel) : WaitObject(kernel) {}
Event::~Event() {
if (resource_limit) {
resource_limit->Release(ResourceLimitType::Event, 1);
}
}
std::shared_ptr<Event> KernelSystem::CreateEvent(ResetType reset_type, std::string name) {
auto event = std::make_shared<Event>(*this);
event->signaled = false;
event->reset_type = reset_type;
event->name = std::move(name);
return event;
}
bool Event::ShouldWait(const Thread* thread) const {
return !signaled;
}
void Event::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
if (reset_type == ResetType::OneShot)
signaled = false;
}
void Event::Signal() {
signaled = true;
WakeupAllWaitingThreads();
}
void Event::Clear() {
signaled = false;
}
void Event::WakeupAllWaitingThreads() {
WaitObject::WakeupAllWaitingThreads();
if (reset_type == ResetType::Pulse)
signaled = false;
}
} // namespace Kernel

View File

@ -0,0 +1,72 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/export.hpp>
#include <boost/serialization/string.hpp>
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/wait_object.h"
namespace Kernel {
class Event final : public WaitObject {
public:
explicit Event(KernelSystem& kernel);
~Event() override;
std::string GetTypeName() const override {
return "Event";
}
std::string GetName() const override {
return name;
}
void SetName(const std::string& name_) {
name = name_;
}
static constexpr HandleType HANDLE_TYPE = HandleType::Event;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
ResetType GetResetType() const {
return reset_type;
}
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
void WakeupAllWaitingThreads() override;
void Signal();
void Clear();
std::shared_ptr<ResourceLimit> resource_limit;
private:
ResetType reset_type; ///< Current ResetType
bool signaled; ///< Whether the event has already been signaled
std::string name; ///< Name of event (optional)
friend class KernelSystem;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<WaitObject>(*this);
ar& reset_type;
ar& signaled;
ar& name;
ar& resource_limit;
}
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::Event)
CONSTRUCT_KERNEL_OBJECT(Kernel::Event)

View File

@ -0,0 +1,105 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <utility>
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
namespace Kernel {
namespace {
constexpr u16 GetSlot(Handle handle) {
return handle >> 15;
}
constexpr u16 GetGeneration(Handle handle) {
return handle & 0x7FFF;
}
} // Anonymous namespace
HandleTable::HandleTable(KernelSystem& kernel) : kernel(kernel) {
next_generation = 1;
Clear();
}
HandleTable::~HandleTable() = default;
ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
DEBUG_ASSERT(obj != nullptr);
u16 slot = next_free_slot;
if (slot >= generations.size()) {
LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
return ERR_OUT_OF_HANDLES;
}
next_free_slot = generations[slot];
u16 generation = next_generation++;
// Overflow count so it fits in the 15 bits dedicated to the generation in the handle.
// CTR-OS doesn't use generation 0, so skip straight to 1.
if (next_generation >= (1 << 15))
next_generation = 1;
generations[slot] = generation;
objects[slot] = std::move(obj);
Handle handle = generation | (slot << 15);
return handle;
}
ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
std::shared_ptr<Object> object = GetGeneric(handle);
if (object == nullptr) {
LOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle);
return ERR_INVALID_HANDLE;
}
return Create(std::move(object));
}
ResultCode HandleTable::Close(Handle handle) {
if (!IsValid(handle))
return ERR_INVALID_HANDLE;
u16 slot = GetSlot(handle);
objects[slot] = nullptr;
generations[slot] = next_free_slot;
next_free_slot = slot;
return RESULT_SUCCESS;
}
bool HandleTable::IsValid(Handle handle) const {
std::size_t slot = GetSlot(handle);
u16 generation = GetGeneration(handle);
return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation;
}
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
if (handle == CurrentThread) {
return SharedFrom(kernel.GetCurrentThreadManager().GetCurrentThread());
} else if (handle == CurrentProcess) {
return kernel.GetCurrentProcess();
}
if (!IsValid(handle)) {
return nullptr;
}
return objects[GetSlot(handle)];
}
void HandleTable::Clear() {
for (u16 i = 0; i < MAX_COUNT; ++i) {
generations[i] = i + 1;
objects[i] = nullptr;
}
next_free_slot = 0;
}
} // namespace Kernel

View File

@ -0,0 +1,132 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <cstddef>
#include <memory>
#include <boost/serialization/array.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
enum KernelHandle : Handle {
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
};
/**
* This class allows the creation of Handles, which are references to objects that can be tested
* for validity and looked up. Here they are used to pass references to kernel objects to/from the
* emulated process. it has been designed so that it follows the same handle format and has
* approximately the same restrictions as the handle manager in the CTR-OS.
*
* Handles contain two sub-fields: a slot index (bits 31:15) and a generation value (bits 14:0).
* The slot index is used to index into the arrays in this class to access the data corresponding
* to the Handle.
*
* To prevent accidental use of a freed Handle whose slot has already been reused, a global counter
* is kept and incremented every time a Handle is created. This is the Handle's "generation". The
* value of the counter is stored into the Handle as well as in the handle table (in the
* "generations" array). When looking up a handle, the Handle's generation must match with the
* value stored on the class, otherwise the Handle is considered invalid.
*
* To find free slots when allocating a Handle without needing to scan the entire object array, the
* generations field of unallocated slots is re-purposed as a linked list of indices to free slots.
* When a Handle is created, an index is popped off the list and used for the new Handle. When it
* is destroyed, it is again pushed onto the list to be re-used by the next allocation. It is
* likely that this allocation strategy differs from the one used in CTR-OS, but this hasn't been
* verified and isn't likely to cause any problems.
*/
class HandleTable final : NonCopyable {
public:
explicit HandleTable(KernelSystem& kernel);
~HandleTable();
/**
* Allocates a handle for the given object.
* @return The created Handle or one of the following errors:
* - `ERR_OUT_OF_HANDLES`: the maximum number of handles has been exceeded.
*/
ResultVal<Handle> Create(std::shared_ptr<Object> obj);
/**
* Returns a new handle that points to the same object as the passed in handle.
* @return The duplicated Handle or one of the following errors:
* - `ERR_INVALID_HANDLE`: an invalid handle was passed in.
* - Any errors returned by `Create()`.
*/
ResultVal<Handle> Duplicate(Handle handle);
/**
* Closes a handle, removing it from the table and decreasing the object's ref-count.
* @return `RESULT_SUCCESS` or one of the following errors:
* - `ERR_INVALID_HANDLE`: an invalid handle was passed in.
*/
ResultCode Close(Handle handle);
/// Checks if a handle is valid and points to an existing object.
bool IsValid(Handle handle) const;
/**
* Looks up a handle.
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid.
*/
std::shared_ptr<Object> GetGeneric(Handle handle) const;
/**
* Looks up a handle while verifying its type.
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid or its
* type differs from the requested one.
*/
template <class T>
std::shared_ptr<T> Get(Handle handle) const {
return DynamicObjectCast<T>(GetGeneric(handle));
}
/// Closes all handles held in this table.
void Clear();
private:
/**
* This is the maximum limit of handles allowed per process in CTR-OS. It can be further
* reduced by ExHeader values, but this is not emulated here.
*/
static const std::size_t MAX_COUNT = 4096;
/// Stores the Object referenced by the handle or null if the slot is empty.
std::array<std::shared_ptr<Object>, MAX_COUNT> objects;
/**
* The value of `next_generation` when the handle was created, used to check for validity. For
* empty slots, contains the index of the next free slot in the list.
*/
std::array<u16, MAX_COUNT> generations;
/**
* Global counter of the number of created handles. Stored in `generations` when a handle is
* created, and wraps around to 1 when it hits 0x8000.
*/
u16 next_generation;
/// Head of the free slots linked list.
u16 next_free_slot;
KernelSystem& kernel;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version) {
ar& objects;
ar& generations;
ar& next_generation;
ar& next_free_slot;
}
};
} // namespace Kernel

View File

@ -8,10 +8,10 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "core/core.h" #include "core/core.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/ipc_debugger/recorder.h" #include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/process.h"
@ -23,13 +23,15 @@ public:
ThreadCallback(std::shared_ptr<HLERequestContext> context_, ThreadCallback(std::shared_ptr<HLERequestContext> context_,
std::shared_ptr<HLERequestContext::WakeupCallback> callback_) std::shared_ptr<HLERequestContext::WakeupCallback> callback_)
: callback(std::move(callback_)), context(std::move(context_)) {} : callback(std::move(callback_)), context(std::move(context_)) {}
void WakeUp(ThreadWakeupReason reason, Thread* thread, KSynchronizationObject* object) { void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
ASSERT(thread->m_status == ThreadStatus::WaitHleEvent); std::shared_ptr<WaitObject> object) {
ASSERT(thread->status == ThreadStatus::WaitHleEvent);
if (callback) { if (callback) {
callback->WakeUp(thread, *context, reason); callback->WakeUp(thread, *context, reason);
} }
Process* process = thread->GetOwner(); auto process = thread->owner_process.lock();
ASSERT(process);
// We must copy the entire command buffer *plus* the entire static buffers area, since // We must copy the entire command buffer *plus* the entire static buffers area, since
// the translation might need to read from it in order to retrieve the StaticBuffer // the translation might need to read from it in order to retrieve the StaticBuffer
@ -58,16 +60,16 @@ private:
friend class boost::serialization::access; friend class boost::serialization::access;
}; };
SessionRequestHandler::SessionInfo::SessionInfo(KServerSession* session_, SessionRequestHandler::SessionInfo::SessionInfo(std::shared_ptr<ServerSession> session,
std::unique_ptr<SessionDataBase> data) std::unique_ptr<SessionDataBase> data)
: session(session_), data(std::move(data)) {} : session(std::move(session)), data(std::move(data)) {}
void SessionRequestHandler::ClientConnected(KServerSession* server_session) { void SessionRequestHandler::ClientConnected(std::shared_ptr<ServerSession> server_session) {
server_session->SetHleHandler(shared_from_this()); server_session->SetHleHandler(shared_from_this());
connected_sessions.emplace_back(server_session, MakeSessionData()); connected_sessions.emplace_back(std::move(server_session), MakeSessionData());
} }
void SessionRequestHandler::ClientDisconnected(KServerSession* server_session) { void SessionRequestHandler::ClientDisconnected(std::shared_ptr<ServerSession> server_session) {
server_session->SetHleHandler(nullptr); server_session->SetHleHandler(nullptr);
connected_sessions.erase( connected_sessions.erase(
std::remove_if(connected_sessions.begin(), connected_sessions.end(), std::remove_if(connected_sessions.begin(), connected_sessions.end(),
@ -75,40 +77,40 @@ void SessionRequestHandler::ClientDisconnected(KServerSession* server_session) {
connected_sessions.end()); connected_sessions.end());
} }
KEvent* HLERequestContext::SleepClientThread(const std::string& reason, std::shared_ptr<Event> HLERequestContext::SleepClientThread(
std::chrono::nanoseconds timeout, const std::string& reason, std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback) { std::shared_ptr<WakeupCallback> callback) {
// Put the client thread to sleep until the wait event is signaled or the timeout expires. // Put the client thread to sleep until the wait event is signaled or the timeout expires.
thread->m_wakeup_callback = std::make_shared<ThreadCallback>(shared_from_this(), callback); thread->wakeup_callback = std::make_shared<ThreadCallback>(shared_from_this(), callback);
auto event = kernel.CreateEvent(Kernel::ResetType::OneShot, "HLE Pause Event: " + reason); auto event = kernel.CreateEvent(Kernel::ResetType::OneShot, "HLE Pause Event: " + reason);
thread->m_status = ThreadStatus::WaitHleEvent; thread->status = ThreadStatus::WaitHleEvent;
thread->m_wait_objects = {event}; thread->wait_objects = {event};
event->AddWaitingThread(thread); event->AddWaitingThread(thread);
if (timeout.count() > 0) { if (timeout.count() > 0)
thread->WakeAfterDelay(timeout.count()); thread->WakeAfterDelay(timeout.count());
}
return event; return event;
} }
HLERequestContext::HLERequestContext() : kernel(Core::Global<KernelSystem>()) {} HLERequestContext::HLERequestContext() : kernel(Core::Global<KernelSystem>()) {}
HLERequestContext::HLERequestContext(KernelSystem& kernel, KServerSession* session, Thread* thread) HLERequestContext::HLERequestContext(KernelSystem& kernel, std::shared_ptr<ServerSession> session,
: kernel(kernel), session(session), thread(thread) { std::shared_ptr<Thread> thread)
: kernel(kernel), session(std::move(session)), thread(thread) {
cmd_buf[0] = 0; cmd_buf[0] = 0;
} }
HLERequestContext::~HLERequestContext() = default; HLERequestContext::~HLERequestContext() = default;
KAutoObject* HLERequestContext::GetIncomingHandle(u32 id_from_cmdbuf) const { std::shared_ptr<Object> HLERequestContext::GetIncomingHandle(u32 id_from_cmdbuf) const {
ASSERT(id_from_cmdbuf < request_handles.size()); ASSERT(id_from_cmdbuf < request_handles.size());
return request_handles[id_from_cmdbuf]; return request_handles[id_from_cmdbuf];
} }
u32 HLERequestContext::AddOutgoingHandle(KAutoObject* object) { u32 HLERequestContext::AddOutgoingHandle(std::shared_ptr<Object> object) {
request_handles.push_back(object); request_handles.push_back(std::move(object));
return static_cast<u32>(request_handles.size() - 1); return static_cast<u32>(request_handles.size() - 1);
} }
@ -124,8 +126,9 @@ void HLERequestContext::AddStaticBuffer(u8 buffer_id, std::vector<u8> data) {
static_buffers[buffer_id] = std::move(data); static_buffers[buffer_id] = std::move(data);
} }
ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf, ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(
Process* src_process) { const u32_le* src_cmdbuf, std::shared_ptr<Process> src_process_) {
auto& src_process = *src_process_;
IPC::Header header{src_cmdbuf[0]}; IPC::Header header{src_cmdbuf[0]};
std::size_t untranslated_size = 1u + header.normal_params_size; std::size_t untranslated_size = 1u + header.normal_params_size;
@ -149,32 +152,25 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* sr
switch (IPC::GetDescriptorType(descriptor)) { switch (IPC::GetDescriptorType(descriptor)) {
case IPC::DescriptorType::CopyHandle: case IPC::DescriptorType::CopyHandle:
case IPC::DescriptorType::MoveHandle: { case IPC::DescriptorType::MoveHandle: {
const u32 num_handles = IPC::HandleNumberFromDesc(descriptor); u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
auto& src_handle_table = src_process->handle_table;
ASSERT(i + num_handles <= command_size); // TODO(yuriks): Return error ASSERT(i + num_handles <= command_size); // TODO(yuriks): Return error
for (u32 j = 0; j < num_handles; ++j) { for (u32 j = 0; j < num_handles; ++j) {
const Handle handle = src_cmdbuf[i]; Handle handle = src_cmdbuf[i];
if (!handle) { std::shared_ptr<Object> object = nullptr;
cmd_buf[i++] = AddOutgoingHandle(nullptr); if (handle != 0) {
continue; object = src_process.handle_table.GetGeneric(handle);
ASSERT(object != nullptr); // TODO(yuriks): Return error
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_process.handle_table.Close(handle);
}
} }
// Get object from the handle table. cmd_buf[i++] = AddOutgoingHandle(std::move(object));
KScopedAutoObject object =
src_handle_table.GetObjectForIpcWithoutPseudoHandle(handle);
ASSERT(object.IsNotNull());
// If we are moving, remove the old handle.
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_handle_table.Remove(handle);
}
cmd_buf[i++] = AddOutgoingHandle(object.GetPointerUnsafe());
} }
break; break;
} }
case IPC::DescriptorType::CallingPid: { case IPC::DescriptorType::CallingPid: {
cmd_buf[i++] = src_process->process_id; cmd_buf[i++] = src_process.process_id;
break; break;
} }
case IPC::DescriptorType::StaticBuffer: { case IPC::DescriptorType::StaticBuffer: {
@ -183,7 +179,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* sr
// Copy the input buffer into our own vector and store it. // Copy the input buffer into our own vector and store it.
std::vector<u8> data(buffer_info.size); std::vector<u8> data(buffer_info.size);
kernel.memory.ReadBlock(*src_process, source_address, data.data(), data.size()); kernel.memory.ReadBlock(src_process, source_address, data.data(), data.size());
AddStaticBuffer(buffer_info.buffer_id, std::move(data)); AddStaticBuffer(buffer_info.buffer_id, std::move(data));
cmd_buf[i++] = source_address; cmd_buf[i++] = source_address;
@ -191,7 +187,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* sr
} }
case IPC::DescriptorType::MappedBuffer: { case IPC::DescriptorType::MappedBuffer: {
u32 next_id = static_cast<u32>(request_mapped_buffers.size()); u32 next_id = static_cast<u32>(request_mapped_buffers.size());
request_mapped_buffers.emplace_back(kernel.memory, src_process, descriptor, request_mapped_buffers.emplace_back(kernel.memory, src_process_, descriptor,
src_cmdbuf[i], next_id); src_cmdbuf[i], next_id);
cmd_buf[i++] = next_id; cmd_buf[i++] = next_id;
break; break;
@ -236,14 +232,14 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf,
case IPC::DescriptorType::CopyHandle: case IPC::DescriptorType::CopyHandle:
case IPC::DescriptorType::MoveHandle: { case IPC::DescriptorType::MoveHandle: {
// HLE services don't use handles, so we treat both CopyHandle and MoveHandle equally // HLE services don't use handles, so we treat both CopyHandle and MoveHandle equally
const u32 num_handles = IPC::HandleNumberFromDesc(descriptor); u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
ASSERT(i + num_handles <= command_size); ASSERT(i + num_handles <= command_size);
for (u32 j = 0; j < num_handles; ++j) { for (u32 j = 0; j < num_handles; ++j) {
KAutoObject* object = GetIncomingHandle(cmd_buf[i]); std::shared_ptr<Object> object = GetIncomingHandle(cmd_buf[i]);
Handle handle = 0; Handle handle = 0;
if (object != nullptr) { if (object != nullptr) {
// TODO(yuriks): Figure out the proper error handling for if this fails // TODO(yuriks): Figure out the proper error handling for if this fails
dst_process.handle_table.Add(std::addressof(handle), object); handle = dst_process.handle_table.Create(object).Unwrap();
} }
dst_cmdbuf[i++] = handle; dst_cmdbuf[i++] = handle;
} }
@ -301,8 +297,8 @@ void HLERequestContext::ReportUnimplemented() const {
MappedBuffer::MappedBuffer() : memory(&Core::Global<Core::System>().Memory()) {} MappedBuffer::MappedBuffer() : memory(&Core::Global<Core::System>().Memory()) {}
MappedBuffer::MappedBuffer(Memory::MemorySystem& memory, Process* process, u32 descriptor, MappedBuffer::MappedBuffer(Memory::MemorySystem& memory, std::shared_ptr<Process> process,
VAddr address, u32 id) u32 descriptor, VAddr address, u32 id)
: memory(&memory), id(id), address(address), process(std::move(process)) { : memory(&memory), id(id), address(address), process(std::move(process)) {
IPC::MappedBufferDescInfo desc{descriptor}; IPC::MappedBufferDescInfo desc{descriptor};
size = desc.size; size = desc.size;

View File

@ -13,14 +13,15 @@
#include <vector> #include <vector>
#include <boost/container/small_vector.hpp> #include <boost/container/small_vector.hpp>
#include <boost/serialization/assume_abstract.hpp> #include <boost/serialization/assume_abstract.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/unique_ptr.hpp> #include <boost/serialization/unique_ptr.hpp>
#include <boost/serialization/vector.hpp> #include <boost/serialization/vector.hpp>
#include "common/common_types.h" #include "common/common_types.h"
#include "common/serialization/boost_small_vector.hpp" #include "common/serialization/boost_small_vector.hpp"
#include "common/swap.h" #include "common/swap.h"
#include "core/hle/ipc.h" #include "core/hle/ipc.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/object.h" #include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_session.h"
namespace Service { namespace Service {
class ServiceFrameworkBase; class ServiceFrameworkBase;
@ -35,7 +36,7 @@ namespace Kernel {
class HandleTable; class HandleTable;
class Process; class Process;
class Thread; class Thread;
class KEvent; class Event;
class HLERequestContext; class HLERequestContext;
class KernelSystem; class KernelSystem;
@ -60,14 +61,14 @@ public:
* associated ServerSession alive for the duration of the connection. * associated ServerSession alive for the duration of the connection.
* @param server_session Owning pointer to the ServerSession associated with the connection. * @param server_session Owning pointer to the ServerSession associated with the connection.
*/ */
virtual void ClientConnected(KServerSession* server_session); virtual void ClientConnected(std::shared_ptr<ServerSession> server_session);
/** /**
* Signals that a client has just disconnected from this HLE handler and releases the * Signals that a client has just disconnected from this HLE handler and releases the
* associated ServerSession. * associated ServerSession.
* @param server_session ServerSession associated with the connection. * @param server_session ServerSession associated with the connection.
*/ */
virtual void ClientDisconnected(KServerSession* server_session); virtual void ClientDisconnected(std::shared_ptr<ServerSession> server_session);
/// Empty placeholder structure for services with no per-session data. The session data classes /// Empty placeholder structure for services with no per-session data. The session data classes
/// in each service must inherit from this. /// in each service must inherit from this.
@ -76,7 +77,7 @@ public:
private: private:
template <class Archive> template <class Archive>
void serialize(Archive& ar, const u32 file_version) {} void serialize(Archive& ar, const unsigned int file_version) {}
friend class boost::serialization::access; friend class boost::serialization::access;
}; };
@ -86,7 +87,7 @@ protected:
/// Returns the session data associated with the server session. /// Returns the session data associated with the server session.
template <typename T> template <typename T>
T* GetSessionData(KServerSession* session) { T* GetSessionData(std::shared_ptr<ServerSession> session) {
static_assert(std::is_base_of<SessionDataBase, T>(), static_assert(std::is_base_of<SessionDataBase, T>(),
"T is not a subclass of SessionDataBase"); "T is not a subclass of SessionDataBase");
auto itr = std::find_if(connected_sessions.begin(), connected_sessions.end(), auto itr = std::find_if(connected_sessions.begin(), connected_sessions.end(),
@ -96,9 +97,9 @@ protected:
} }
struct SessionInfo { struct SessionInfo {
SessionInfo(KServerSession* session, std::unique_ptr<SessionDataBase> data); SessionInfo(std::shared_ptr<ServerSession> session, std::unique_ptr<SessionDataBase> data);
KServerSession* session; std::shared_ptr<ServerSession> session;
std::unique_ptr<SessionDataBase> data; std::unique_ptr<SessionDataBase> data;
private: private:
@ -126,8 +127,8 @@ private:
class MappedBuffer { class MappedBuffer {
public: public:
MappedBuffer(Memory::MemorySystem& memory, Process* process, u32 descriptor, VAddr address, MappedBuffer(Memory::MemorySystem& memory, std::shared_ptr<Process> process, u32 descriptor,
u32 id); VAddr address, u32 id);
// interface for service // interface for service
void Read(void* dest_buffer, std::size_t offset, std::size_t size); void Read(void* dest_buffer, std::size_t offset, std::size_t size);
@ -150,7 +151,7 @@ private:
Memory::MemorySystem* memory; Memory::MemorySystem* memory;
u32 id; u32 id;
VAddr address; VAddr address;
Process* process; std::shared_ptr<Process> process;
u32 size; u32 size;
IPC::MappedBufferPermissions perms; IPC::MappedBufferPermissions perms;
@ -198,7 +199,8 @@ private:
*/ */
class HLERequestContext : public std::enable_shared_from_this<HLERequestContext> { class HLERequestContext : public std::enable_shared_from_this<HLERequestContext> {
public: public:
explicit HLERequestContext(KernelSystem& kernel, KServerSession* session, Thread* thread); HLERequestContext(KernelSystem& kernel, std::shared_ptr<ServerSession> session,
std::shared_ptr<Thread> thread);
~HLERequestContext(); ~HLERequestContext();
/// Returns a pointer to the IPC command buffer for this request. /// Returns a pointer to the IPC command buffer for this request.
@ -215,21 +217,21 @@ public:
* Returns the session through which this request was made. This can be used as a map key to * Returns the session through which this request was made. This can be used as a map key to
* access per-client data on services. * access per-client data on services.
*/ */
KServerSession* Session() const { std::shared_ptr<ServerSession> Session() const {
return session; return session;
} }
/** /**
* Returns the client thread that made the service request. * Returns the client thread that made the service request.
*/ */
Thread* ClientThread() const { std::shared_ptr<Thread> ClientThread() const {
return thread; return thread;
} }
class WakeupCallback { class WakeupCallback {
public: public:
virtual ~WakeupCallback() = default; virtual ~WakeupCallback() = default;
virtual void WakeUp(Thread* thread, HLERequestContext& context, virtual void WakeUp(std::shared_ptr<Thread> thread, HLERequestContext& context,
ThreadWakeupReason reason) = 0; ThreadWakeupReason reason) = 0;
private: private:
@ -249,8 +251,9 @@ public:
* was called. * was called.
* @returns Event that when signaled will resume the thread and call the callback function. * @returns Event that when signaled will resume the thread and call the callback function.
*/ */
KEvent* SleepClientThread(const std::string& reason, std::chrono::nanoseconds timeout, std::shared_ptr<Event> SleepClientThread(const std::string& reason,
std::shared_ptr<WakeupCallback> callback); std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback);
private: private:
template <typename ResultFunctor> template <typename ResultFunctor>
@ -261,7 +264,7 @@ private:
future = std::move(fut); future = std::move(fut);
} }
void WakeUp(Kernel::Thread* thread, Kernel::HLERequestContext& ctx, void WakeUp(std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
Kernel::ThreadWakeupReason reason) { Kernel::ThreadWakeupReason reason) {
functor(ctx); functor(ctx);
} }
@ -326,13 +329,13 @@ public:
* Resolves a object id from the request command buffer into a pointer to an object. See the * Resolves a object id from the request command buffer into a pointer to an object. See the
* "HLE handle protocol" section in the class documentation for more details. * "HLE handle protocol" section in the class documentation for more details.
*/ */
KAutoObject* GetIncomingHandle(u32 id_from_cmdbuf) const; std::shared_ptr<Object> GetIncomingHandle(u32 id_from_cmdbuf) const;
/** /**
* Adds an outgoing object to the response, returning the id which should be used to reference * Adds an outgoing object to the response, returning the id which should be used to reference
* it. See the "HLE handle protocol" section in the class documentation for more details. * it. See the "HLE handle protocol" section in the class documentation for more details.
*/ */
u32 AddOutgoingHandle(KAutoObject* object); u32 AddOutgoingHandle(std::shared_ptr<Object> object);
/** /**
* Discards all Objects from the context, invalidating all ids. This may be called after reading * Discards all Objects from the context, invalidating all ids. This may be called after reading
@ -360,8 +363,8 @@ public:
MappedBuffer& GetMappedBuffer(u32 id_from_cmdbuf); MappedBuffer& GetMappedBuffer(u32 id_from_cmdbuf);
/// Populates this context with data from the requesting process/thread. /// Populates this context with data from the requesting process/thread.
ResultCode PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf, Process* src_process); ResultCode PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf,
std::shared_ptr<Process> src_process);
/// Writes data from this context back to the requesting process/thread. /// Writes data from this context back to the requesting process/thread.
ResultCode WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, Process& dst_process) const; ResultCode WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, Process& dst_process) const;
@ -374,10 +377,10 @@ public:
private: private:
KernelSystem& kernel; KernelSystem& kernel;
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf; std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
KServerSession* session; std::shared_ptr<ServerSession> session;
Thread* thread; std::shared_ptr<Thread> thread;
// TODO(yuriks): Check common usage of this and optimize size accordingly // TODO(yuriks): Check common usage of this and optimize size accordingly
boost::container::small_vector<KAutoObject*, 8> request_handles; boost::container::small_vector<std::shared_ptr<Object>, 8> request_handles;
// The static buffers will be created when the IPC request is translated. // The static buffers will be created when the IPC request is translated.
std::array<std::vector<u8>, IPC::MAX_STATIC_BUFFERS> static_buffers; std::array<std::vector<u8>, IPC::MAX_STATIC_BUFFERS> static_buffers;
// The mapped buffers will be created when the IPC request is translated // The mapped buffers will be created when the IPC request is translated

View File

@ -7,9 +7,9 @@
#include "common/memory_ref.h" #include "common/memory_ref.h"
#include "core/core.h" #include "core/core.h"
#include "core/hle/ipc.h" #include "core/hle/ipc.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/ipc.h" #include "core/hle/kernel/ipc.h"
#include "core/hle/kernel/ipc_debugger/recorder.h" #include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory.h" #include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/process.h"
@ -19,12 +19,13 @@
namespace Kernel { namespace Kernel {
ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySystem& memory, ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySystem& memory,
Thread* src_thread, Thread* dst_thread, VAddr src_address, std::shared_ptr<Thread> src_thread,
std::shared_ptr<Thread> dst_thread, VAddr src_address,
VAddr dst_address, VAddr dst_address,
std::vector<MappedBufferContext>& mapped_buffer_context, std::vector<MappedBufferContext>& mapped_buffer_context,
bool reply) { bool reply) {
auto src_process = src_thread->owner_process; auto src_process = src_thread->owner_process.lock();
auto dst_process = dst_thread->owner_process; auto dst_process = dst_thread->owner_process.lock();
ASSERT(src_process && dst_process); ASSERT(src_process && dst_process);
IPC::Header header; IPC::Header header;
@ -65,34 +66,30 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
for (u32 j = 0; j < num_handles; ++j) { for (u32 j = 0; j < num_handles; ++j) {
Handle handle = cmd_buf[i]; Handle handle = cmd_buf[i];
std::shared_ptr<Object> object = nullptr;
// Perform pseudo-handle detection here because by the time this function is called, // Perform pseudo-handle detection here because by the time this function is called,
// the current thread and process are no longer the ones which created this IPC // the current thread and process are no longer the ones which created this IPC
// request, but the ones that are handling it. // request, but the ones that are handling it.
KScopedAutoObject object = [&]() -> KScopedAutoObject<KAutoObject> { if (handle == CurrentThread) {
if (handle == CurrentThread) { object = src_thread;
return src_thread; } else if (handle == CurrentProcess) {
} else if (handle == CurrentProcess) { object = src_process;
return src_process; } else if (handle != 0) {
} else if (handle != 0) { object = src_process->handle_table.GetGeneric(handle);
auto obj = src_process->handle_table.GetObject(handle); if (descriptor == IPC::DescriptorType::MoveHandle) {
if (descriptor == IPC::DescriptorType::MoveHandle) { src_process->handle_table.Close(handle);
src_process->handle_table.Remove(handle);
}
return obj;
} }
return nullptr; }
}();
if (object.IsNull()) { if (object == nullptr) {
// Note: The real kernel sets invalid translated handles to 0 in the target // Note: The real kernel sets invalid translated handles to 0 in the target
// command buffer. // command buffer.
cmd_buf[i++] = 0; cmd_buf[i++] = 0;
continue; continue;
} }
Handle dst_handle = 0; auto result = dst_process->handle_table.Create(std::move(object));
dst_process->handle_table.Add(&dst_handle, object.GetPointerUnsafe()); cmd_buf[i++] = result.ValueOr(0);
cmd_buf[i++] = dst_handle;
} }
break; break;
} }

View File

@ -4,6 +4,7 @@
#pragma once #pragma once
#include <memory>
#include <vector> #include <vector>
#include <boost/serialization/shared_ptr.hpp> #include <boost/serialization/shared_ptr.hpp>
#include "common/common_types.h" #include "common/common_types.h"
@ -40,7 +41,8 @@ private:
/// Performs IPC command buffer translation from one process to another. /// Performs IPC command buffer translation from one process to another.
ResultCode TranslateCommandBuffer(KernelSystem& system, Memory::MemorySystem& memory, ResultCode TranslateCommandBuffer(KernelSystem& system, Memory::MemorySystem& memory,
Thread* src_thread, Thread* dst_thread, VAddr src_address, std::shared_ptr<Thread> src_thread,
std::shared_ptr<Thread> dst_thread, VAddr src_address,
VAddr dst_address, VAddr dst_address,
std::vector<MappedBufferContext>& mapped_buffer_context, std::vector<MappedBufferContext>& mapped_buffer_context,
bool reply); bool reply);

View File

@ -4,23 +4,20 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "common/scope_exit.h" #include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/ipc_debugger/recorder.h" #include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/process.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h" #include "core/hle/kernel/thread.h"
#include "core/hle/service/service.h" #include "core/hle/service/service.h"
namespace IPCDebugger { namespace IPCDebugger {
namespace { namespace {
ObjectInfo GetObjectInfo(const Kernel::Object* object) {
ObjectInfo GetObjectInfo(const Kernel::KAutoObject* object) {
if (object == nullptr) { if (object == nullptr) {
return {}; return {};
} }
@ -40,33 +37,29 @@ ObjectInfo GetObjectInfo(const Kernel::Process* process) {
} }
return {process->GetTypeName(), process->GetName(), static_cast<int>(process->process_id)}; return {process->GetTypeName(), process->GetName(), static_cast<int>(process->process_id)};
} }
} // namespace
} // Anonymous namespace
Recorder::Recorder() = default; Recorder::Recorder() = default;
Recorder::~Recorder() = default; Recorder::~Recorder() = default;
bool Recorder::IsEnabled() const { bool Recorder::IsEnabled() const {
return enabled.load(std::memory_order_relaxed); return enabled.load(std::memory_order_relaxed);
} }
void Recorder::RegisterRequest(const Kernel::KClientSession* client_session, void Recorder::RegisterRequest(const std::shared_ptr<Kernel::ClientSession>& client_session,
const Kernel::Thread* client_thread) { const std::shared_ptr<Kernel::Thread>& client_thread) {
const u32 thread_id = client_thread->GetThreadId(); const u32 thread_id = client_thread->GetThreadId();
if (auto owner_process = client_thread->owner_process) { if (auto owner_process = client_thread->owner_process.lock()) {
RequestRecord record = { RequestRecord record = {/* id */ ++record_count,
.id = ++record_count, /* status */ RequestStatus::Sent,
.status = RequestStatus::Sent, /* client_process */ GetObjectInfo(owner_process.get()),
.client_process = GetObjectInfo(owner_process), /* client_thread */ GetObjectInfo(client_thread.get()),
.client_thread = GetObjectInfo(client_thread), /* client_session */ GetObjectInfo(client_session.get()),
.client_session = GetObjectInfo(client_session), /* client_port */ GetObjectInfo(client_session->parent->port.get()),
.client_port = GetObjectInfo(client_session->GetParent()->GetParent()), /* server_process */ {},
.server_process = {}, /* server_thread */ {},
.server_thread = {}, /* server_session */ GetObjectInfo(client_session->parent->server)};
.server_session = GetObjectInfo(&client_session->GetParent()->GetServerSession()),
};
record_map.insert_or_assign(thread_id, std::make_unique<RequestRecord>(record)); record_map.insert_or_assign(thread_id, std::make_unique<RequestRecord>(record));
client_session_map.insert_or_assign(thread_id, client_session); client_session_map.insert_or_assign(thread_id, client_session);
@ -74,10 +67,10 @@ void Recorder::RegisterRequest(const Kernel::KClientSession* client_session,
} }
} }
void Recorder::SetRequestInfo(const Kernel::Thread* client_thread, void Recorder::SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
std::vector<u32> untranslated_cmdbuf, std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf, std::vector<u32> translated_cmdbuf,
const Kernel::Thread* server_thread) { const std::shared_ptr<Kernel::Thread>& server_thread) {
const u32 thread_id = client_thread->GetThreadId(); const u32 thread_id = client_thread->GetThreadId();
if (!record_map.count(thread_id)) { if (!record_map.count(thread_id)) {
// This is possible when the recorder is enabled after application started // This is possible when the recorder is enabled after application started
@ -91,36 +84,30 @@ void Recorder::SetRequestInfo(const Kernel::Thread* client_thread,
record.translated_request_cmdbuf = std::move(translated_cmdbuf); record.translated_request_cmdbuf = std::move(translated_cmdbuf);
if (server_thread) { if (server_thread) {
if (auto owner_process = server_thread->owner_process) { if (auto owner_process = server_thread->owner_process.lock()) {
record.server_process = GetObjectInfo(owner_process); record.server_process = GetObjectInfo(owner_process.get());
} }
record.server_thread = GetObjectInfo(server_thread); record.server_thread = GetObjectInfo(server_thread.get());
} else { } else {
record.is_hle = true; record.is_hle = true;
} }
// Function name // Function name
ASSERT_MSG(client_session_map.count(thread_id), "Client session is missing"); ASSERT_MSG(client_session_map.count(thread_id), "Client session is missing");
const auto client_session = client_session_map[thread_id]; const auto& client_session = client_session_map[thread_id];
if (client_session->parent->port &&
client_session->parent->port->GetServerPort()->hle_handler) {
SCOPE_EXIT({ record.function_name = std::dynamic_pointer_cast<Service::ServiceFrameworkBase>(
client_session_map.erase(thread_id); client_session->parent->port->GetServerPort()->hle_handler)
InvokeCallbacks(record);
});
auto port = client_session->GetParent()->GetParent();
if (!port) {
return;
}
auto hle_handler = port->GetParent()->GetServerPort().GetHleHandler();
if (hle_handler) {
record.function_name = std::dynamic_pointer_cast<Service::ServiceFrameworkBase>(hle_handler)
->GetFunctionName({record.untranslated_request_cmdbuf[0]}); ->GetFunctionName({record.untranslated_request_cmdbuf[0]});
} }
client_session_map.erase(thread_id);
InvokeCallbacks(record);
} }
void Recorder::SetReplyInfo(const Kernel::Thread* client_thread, void Recorder::SetReplyInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
std::vector<u32> untranslated_cmdbuf, std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf) { std::vector<u32> translated_cmdbuf) {
const u32 thread_id = client_thread->GetThreadId(); const u32 thread_id = client_thread->GetThreadId();
@ -142,7 +129,7 @@ void Recorder::SetReplyInfo(const Kernel::Thread* client_thread,
record_map.erase(thread_id); record_map.erase(thread_id);
} }
void Recorder::SetHLEUnimplemented(const Kernel::Thread* client_thread) { void Recorder::SetHLEUnimplemented(const std::shared_ptr<Kernel::Thread>& client_thread) {
const u32 thread_id = client_thread->GetThreadId(); const u32 thread_id = client_thread->GetThreadId();
if (!record_map.count(thread_id)) { if (!record_map.count(thread_id)) {
// This is possible when the recorder is enabled after application started // This is possible when the recorder is enabled after application started

View File

@ -15,9 +15,8 @@
#include "common/common_types.h" #include "common/common_types.h"
namespace Kernel { namespace Kernel {
class KClientSession; class ClientSession;
class Thread; class Thread;
enum class ClassTokenType : u32;
} // namespace Kernel } // namespace Kernel
namespace IPCDebugger { namespace IPCDebugger {
@ -28,7 +27,7 @@ namespace IPCDebugger {
struct ObjectInfo { struct ObjectInfo {
std::string type; std::string type;
std::string name; std::string name;
Kernel::ClassTokenType id; int id = -1;
}; };
/** /**
@ -81,28 +80,28 @@ public:
/** /**
* Registers a request into the recorder. The request is then assoicated with the client thread. * Registers a request into the recorder. The request is then assoicated with the client thread.
*/ */
void RegisterRequest(const Kernel::KClientSession* client_session, void RegisterRequest(const std::shared_ptr<Kernel::ClientSession>& client_session,
const Kernel::Thread* client_thread); const std::shared_ptr<Kernel::Thread>& client_thread);
/** /**
* Sets the request information of the request record associated with the client thread. * Sets the request information of the request record associated with the client thread.
* When the server thread is empty, the request will be considered HLE. * When the server thread is empty, the request will be considered HLE.
*/ */
void SetRequestInfo(const Kernel::Thread* client_thread, std::vector<u32> untranslated_cmdbuf, void SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
std::vector<u32> translated_cmdbuf, std::vector<u32> untranslated_cmdbuf, std::vector<u32> translated_cmdbuf,
const Kernel::Thread* server_thread = nullptr); const std::shared_ptr<Kernel::Thread>& server_thread = {});
/** /**
* Sets the reply information of the request record assoicated with the client thread. * Sets the reply information of the request record assoicated with the client thread.
* The request is then unlinked from the client thread. * The request is then unlinked from the client thread.
*/ */
void SetReplyInfo(const Kernel::Thread* client_thread, std::vector<u32> untranslated_cmdbuf, void SetReplyInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
std::vector<u32> translated_cmdbuf); std::vector<u32> untranslated_cmdbuf, std::vector<u32> translated_cmdbuf);
/** /**
* Set the status of a record to HLEUnimplemented. * Set the status of a record to HLEUnimplemented.
*/ */
void SetHLEUnimplemented(const Kernel::Thread* client_thread); void SetHLEUnimplemented(const std::shared_ptr<Kernel::Thread>& client_thread);
/** /**
* Set the status of the debugger (enabled/disabled). * Set the status of the debugger (enabled/disabled).
@ -119,7 +118,7 @@ private:
int record_count{}; int record_count{};
// Temporary client session map for function name handling // Temporary client session map for function name handling
std::unordered_map<u32, Kernel::KClientSession*> client_session_map; std::unordered_map<u32, std::shared_ptr<Kernel::ClientSession>> client_session_map;
std::atomic_bool enabled{false}; std::atomic_bool enabled{false};

View File

@ -1,79 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
namespace Kernel {
class Thread;
enum class ArbitrationType : u32 {
Signal,
WaitIfLessThan,
DecrementAndWaitIfLessThan,
WaitIfLessThanWithTimeout,
DecrementAndWaitIfLessThanWithTimeout,
};
/**
* Address arbiters are an underlying kernel synchronization object that can be created/used via
* supervisor calls (SVCs). They function as sort of a global lock. Typically, games/other CTR
* applications use them as an underlying mechanism to implement thread-safe barriers, events, and
* semaphores.
**/
class KAddressArbiter final : public KAutoObjectWithSlabHeapAndContainer<KAddressArbiter>,
public WakeupCallback {
KERNEL_AUTOOBJECT_TRAITS(KAddressArbiter, KAutoObject);
public:
explicit KAddressArbiter(KernelSystem& kernel);
~KAddressArbiter() override;
void Initialize(Process* owner);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
ResultCode ArbitrateAddress(Thread* thread, ArbitrationType type, VAddr address, s32 value,
u64 nanoseconds);
private:
void WaitThread(Thread* thread, VAddr wait_address);
u64 ResumeAllThreads(VAddr address);
bool ResumeHighestPriorityThread(VAddr address);
void WakeUp(ThreadWakeupReason reason, Thread* thread, KSynchronizationObject* object) override;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
public:
Process* m_owner{};
std::string m_name{};
std::vector<Thread*> m_waiting_threads;
class Callback;
std::shared_ptr<Callback> m_timeout_callback;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KAddressArbiter)
BOOST_CLASS_EXPORT_KEY(Kernel::KAddressArbiter::Callback)

View File

@ -1,23 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
KAutoObject* KAutoObject::Create(KAutoObject* obj) {
obj->m_ref_count = 1;
return obj;
}
void KAutoObject::RegisterWithKernel() {
m_kernel.RegisterKernelObject(this);
}
void KAutoObject::UnregisterWithKernel(KernelSystem& kernel, KAutoObject* self) {
kernel.UnregisterKernelObject(self);
}
} // namespace Kernel

View File

@ -1,296 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
namespace Kernel {
class KernelSystem;
class Process;
using Handle = u32;
enum class ClassTokenType : u32 {
KAutoObject = 0,
KSynchronizationObject = 1,
KSemaphore = 27,
KEvent = 31,
KTimer = 53,
KMutex = 57,
Debug = 77,
KServerPort = 85,
DmaObject = 89,
KClientPort = 101,
CodeSet = 104,
KSession = 112,
Thread = 141,
KServerSession = 149,
KAddressArbiter = 152,
KClientSession = 165,
KPort = 168,
KSharedMemory = 176,
Process = 197,
KResourceLimit = 200,
};
DECLARE_ENUM_FLAG_OPERATORS(ClassTokenType)
#define KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, ATTRIBUTE) \
private: \
static constexpr inline const char* const TypeName = #CLASS; \
static constexpr inline auto ClassToken = ClassTokenType::CLASS; \
\
public: \
CITRA_NON_COPYABLE(CLASS); \
CITRA_NON_MOVEABLE(CLASS); \
\
using BaseClass = BASE_CLASS; \
static constexpr TypeObj GetStaticTypeObj() { return TypeObj(TypeName, ClassToken); } \
static constexpr const char* GetStaticTypeName() { return TypeName; } \
virtual TypeObj GetTypeObj() ATTRIBUTE { return GetStaticTypeObj(); } \
virtual const char* GetTypeName() ATTRIBUTE { return GetStaticTypeName(); } \
\
private: \
constexpr bool operator!=(const TypeObj& rhs)
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, const override)
class KAutoObject {
protected:
class TypeObj {
public:
constexpr explicit TypeObj(const char* n, ClassTokenType tok)
: m_name(n), m_class_token(tok) {}
constexpr const char* GetName() const {
return m_name;
}
constexpr ClassTokenType GetClassToken() const {
return m_class_token;
}
constexpr bool operator==(const TypeObj& rhs) const {
return this->GetClassToken() == rhs.GetClassToken();
}
constexpr bool operator!=(const TypeObj& rhs) const {
return this->GetClassToken() != rhs.GetClassToken();
}
constexpr bool IsDerivedFrom(const TypeObj& rhs) const {
return (this->GetClassToken() | rhs.GetClassToken()) == this->GetClassToken();
}
private:
const char* m_name;
ClassTokenType m_class_token;
};
private:
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
public:
explicit KAutoObject(KernelSystem& kernel) : m_kernel(kernel) {
RegisterWithKernel();
}
virtual ~KAutoObject() = default;
static KAutoObject* Create(KAutoObject* ptr);
// Destroy is responsible for destroying the auto object's resources when ref_count hits zero.
virtual void Destroy() {
UNIMPLEMENTED();
}
// Finalize is responsible for cleaning up resource, but does not destroy the object.
virtual void Finalize() {}
virtual Process* GetOwner() const {
return nullptr;
}
u32 GetReferenceCount() const {
return m_ref_count.load();
}
bool IsDerivedFrom(const TypeObj& rhs) const {
return this->GetTypeObj().IsDerivedFrom(rhs);
}
bool IsDerivedFrom(const KAutoObject& rhs) const {
return this->IsDerivedFrom(rhs.GetTypeObj());
}
template <typename Derived>
Derived DynamicCast() {
static_assert(std::is_pointer_v<Derived>);
using DerivedType = std::remove_pointer_t<Derived>;
if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) {
return static_cast<Derived>(this);
} else {
return nullptr;
}
}
template <typename Derived>
const Derived DynamicCast() const {
static_assert(std::is_pointer_v<Derived>);
using DerivedType = std::remove_pointer_t<Derived>;
if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) {
return static_cast<Derived>(this);
} else {
return nullptr;
}
}
bool Open() {
// Atomically increment the reference count, only if it's positive.
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
do {
if (cur_ref_count == 0) {
return false;
}
ASSERT(cur_ref_count < cur_ref_count + 1);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1,
std::memory_order_relaxed));
return true;
}
void Close() {
// Atomically decrement the reference count, not allowing it to become negative.
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
do {
ASSERT(cur_ref_count > 0);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1,
std::memory_order_acq_rel));
// If ref count hits zero, destroy the object.
if (cur_ref_count - 1 == 0) {
KernelSystem& kernel = m_kernel;
this->Destroy();
KAutoObject::UnregisterWithKernel(kernel, this);
}
}
private:
void RegisterWithKernel();
static void UnregisterWithKernel(KernelSystem& kernel, KAutoObject* self);
protected:
KernelSystem& m_kernel;
private:
std::atomic<u32> m_ref_count{};
};
template <typename T>
class KScopedAutoObject {
public:
CITRA_NON_COPYABLE(KScopedAutoObject);
constexpr KScopedAutoObject() = default;
constexpr KScopedAutoObject(T* o) : m_obj(o) {
if (m_obj != nullptr) {
m_obj->Open();
}
}
~KScopedAutoObject() {
if (m_obj != nullptr) {
m_obj->Close();
}
m_obj = nullptr;
}
template <typename U>
requires(std::derived_from<T, U> || std::derived_from<U, T>)
constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) {
if constexpr (std::derived_from<U, T>) {
// Upcast.
m_obj = rhs.m_obj;
rhs.m_obj = nullptr;
} else {
// Downcast.
T* derived = nullptr;
if (rhs.m_obj != nullptr) {
derived = rhs.m_obj->template DynamicCast<T*>();
if (derived == nullptr) {
rhs.m_obj->Close();
}
}
m_obj = derived;
rhs.m_obj = nullptr;
}
}
constexpr KScopedAutoObject<T>& operator=(KScopedAutoObject<T>&& rhs) {
rhs.Swap(*this);
return *this;
}
constexpr T* operator->() {
return m_obj;
}
constexpr T& operator*() {
return *m_obj;
}
constexpr void Reset(T* o) {
KScopedAutoObject(o).Swap(*this);
}
constexpr T* GetPointerUnsafe() {
return m_obj;
}
constexpr T* GetPointerUnsafe() const {
return m_obj;
}
constexpr T* ReleasePointerUnsafe() {
T* ret = m_obj;
m_obj = nullptr;
return ret;
}
constexpr bool IsNull() const {
return m_obj == nullptr;
}
constexpr bool IsNotNull() const {
return m_obj != nullptr;
}
private:
template <typename U>
friend class KScopedAutoObject;
private:
T* m_obj{};
private:
constexpr void Swap(KScopedAutoObject& rhs) noexcept {
std::swap(m_obj, rhs.m_obj);
}
};
} // namespace Kernel
#define CONSTRUCT_KERNEL_OBJECT(T) \
namespace boost::serialization { \
template <class Archive> \
void load_construct_data(Archive& ar, T* t, const unsigned int file_version) { \
::new (t) T(Core::Global<Kernel::KernelSystem>()); \
} \
}

View File

@ -1,31 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include "core/hle/kernel/k_auto_object_container.h"
namespace Kernel {
void KAutoObjectWithListContainer::Register(KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
m_object_list.push_back(*obj);
}
void KAutoObjectWithListContainer::Unregister(KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
for (auto it = m_object_list.begin(); it != m_object_list.end(); it++) {
if (std::addressof(*it) == obj) {
m_object_list.erase(it);
return;
}
}
}
size_t KAutoObjectWithListContainer::GetOwnedCount(Process* owner) {
// KScopedLightMutex lk{m_mutex};
return std::count_if(m_object_list.begin(), m_object_list.end(),
[&](const auto& obj) { return obj.GetOwner() == owner; });
}
} // namespace Kernel

View File

@ -1,37 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_funcs.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_linked_list.h"
namespace Kernel {
class KernelSystem;
class Process;
class KAutoObjectWithListContainer {
public:
CITRA_NON_COPYABLE(KAutoObjectWithListContainer);
CITRA_NON_MOVEABLE(KAutoObjectWithListContainer);
using ListType = KLinkedList<KAutoObject>;
KAutoObjectWithListContainer(KernelSystem& kernel) : m_object_list(kernel) {}
void Initialize() {}
void Finalize() {}
void Register(KAutoObject* obj);
void Unregister(KAutoObject* obj);
size_t GetOwnedCount(Process* owner);
private:
// KLightMutex m_mutex;
ListType m_object_list;
};
} // namespace Kernel

View File

@ -1,69 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/export.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
SERIALIZE_EXPORT_IMPL(Kernel::KClientPort)
namespace Kernel {
KClientPort::KClientPort(KernelSystem& kernel) : KAutoObject(kernel) {}
KClientPort::~KClientPort() = default;
void KClientPort::Initialize(KPort* parent, s32 max_sessions) {
// Set member variables.
m_parent = parent;
m_max_sessions = max_sessions;
}
ResultCode KClientPort::CreateSession(KClientSession** out) {
R_UNLESS(m_active_sessions < m_max_sessions, ERR_MAX_CONNECTIONS_REACHED);
m_active_sessions++;
// Allocate a new session.
KSession* session = KSession::Create(m_kernel);
// Initialize the session.
session->Initialize(this);
// Register the session.
KSession::Register(m_kernel, session);
// Wake the threads waiting on the ServerPort
m_server_port->WakeupAllWaitingThreads();
// We succeeded, so set the output.
*out = std::addressof(session->GetClientSession());
R_SUCCEED();
}
void KClientPort::ConnectionClosed() {
ASSERT(m_active_sessions > 0);
--m_active_sessions;
}
template <class Archive>
void KClientPort::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& m_server_port;
ar& m_max_sessions;
ar& m_active_sessions;
ar& m_name;
}
SERIALIZE_IMPL(KClientPort)
} // namespace Kernel

View File

@ -1,52 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/result.h"
namespace Kernel {
class KClientSession;
class KClientPort final : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KClientPort, KAutoObject);
public:
explicit KClientPort(KernelSystem& kernel);
~KClientPort() override;
void Initialize(KPort* parent, s32 max_sessions);
const KPort* GetParent() const {
return m_parent;
}
KPort* GetParent() {
return m_parent;
}
ResultCode CreateSession(KClientSession** out);
void ConnectionClosed();
private:
KPort* m_parent{};
u32 m_max_sessions{};
u32 m_active_sessions{};
std::string m_name;
friend class KernelSystem;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KClientPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::KClientPort)

View File

@ -1,40 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::ClientSession)
namespace Kernel {
KClientSession::KClientSession(KernelSystem& kernel) : KAutoObject(kernel) {}
KClientSession::~KClientSession() = default;
void KClientSession::Destroy() {
m_parent->OnClientClosed();
m_parent->Close();
}
void KClientSession::OnServerClosed() {}
ResultCode KClientSession::SendSyncRequest(Thread* thread) {
// Signal the server session that new data is available
return m_parent->GetServerSession().HandleSyncRequest(thread);
}
template <class Archive>
void KClientSession::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& m_parent;
}
} // namespace Kernel

View File

@ -1,49 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/result.h"
namespace Kernel {
class KSession;
class Thread;
class KClientSession final : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
public:
explicit KClientSession(KernelSystem& kernel);
~KClientSession() override;
void Initialize(KSession* parent) {
// Set member variables.
m_parent = parent;
}
void Destroy() override;
KSession* GetParent() const {
return m_parent;
}
ResultCode SendSyncRequest(Thread* thread);
void OnServerClosed();
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
KSession* m_parent{};
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KClientSession)

View File

@ -1,76 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include <boost/serialization/vector.hpp>
namespace Kernel {
class CodeSet {
public:
CodeSet() = default;
~CodeSet() = default;
struct Segment {
std::size_t offset = 0;
VAddr addr = 0;
u32 size = 0;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& offset;
ar& addr;
ar& size;
}
};
Segment& CodeSegment() {
return segments[0];
}
const Segment& CodeSegment() const {
return segments[0];
}
Segment& RODataSegment() {
return segments[1];
}
const Segment& RODataSegment() const {
return segments[1];
}
Segment& DataSegment() {
return segments[2];
}
const Segment& DataSegment() const {
return segments[2];
}
std::vector<u8> memory;
std::array<Segment, 3> segments;
VAddr entrypoint;
u64 program_id;
std::string name;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& memory;
ar& segments;
ar& entrypoint;
ar& program_id;
ar& name;
}
};
} // namespace Kernel

View File

@ -1,78 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KEvent)
namespace Kernel {
KEvent::KEvent(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KEvent::~KEvent() = default;
void KEvent::Initialize(Process* owner, ResetType reset_type) {
// Open a reference to the owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set member variables.
m_reset_type = reset_type;
}
void KEvent::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Event, 1);
owner->Close();
}
}
bool KEvent::ShouldWait(const Thread* thread) const {
return !m_signaled;
}
void KEvent::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
if (m_reset_type == ResetType::OneShot) {
m_signaled = false;
}
}
void KEvent::Signal() {
m_signaled = true;
this->WakeupAllWaitingThreads();
}
void KEvent::Clear() {
m_signaled = false;
}
void KEvent::WakeupAllWaitingThreads() {
KSynchronizationObject::WakeupAllWaitingThreads();
if (m_reset_type == ResetType::Pulse) {
m_signaled = false;
}
}
template <class Archive>
void KEvent::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_owner;
ar& m_reset_type;
ar& m_signaled;
}
SERIALIZE_IMPL(KEvent)
} // namespace Kernel

View File

@ -1,67 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/global.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KEvent, KSynchronizationObject);
public:
explicit KEvent(KernelSystem& kernel);
~KEvent() override;
std::string GetName() const {
return m_name;
}
void SetName(const std::string& name) {
m_name = name;
}
void Initialize(Process* owner, ResetType reset_type);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
ResetType GetResetType() const {
return m_reset_type;
}
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
void WakeupAllWaitingThreads() override;
void Signal();
void Clear();
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
Process* m_owner{};
ResetType m_reset_type{};
bool m_signaled{};
std::string m_name;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KEvent)
CONSTRUCT_KERNEL_OBJECT(Kernel::KEvent)

View File

@ -1,107 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/array.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include "common/archives.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/process.h"
namespace Kernel {
ResultCode KHandleTable::Finalize() {
// Close and free all entries.
for (size_t i = 0; i < m_table_size; i++) {
if (KAutoObject* obj = m_objects[i]; obj != nullptr) {
obj->Close();
}
}
R_SUCCEED();
}
bool KHandleTable::Remove(Handle handle) {
// Don't allow removal of a pseudo-handle.
if (handle == KernelHandle::CurrentProcess || handle == KernelHandle::CurrentThread)
[[unlikely]] {
return false;
}
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) [[unlikely]] {
return false;
}
// Find the object and free the entry.
KAutoObject* obj = nullptr;
{
// KScopedLightMutex lk{m_mutex};
if (this->IsValidHandle(handle)) [[likely]] {
const auto index = handle_pack.index;
obj = m_objects[index];
this->FreeEntry(index);
} else {
return false;
}
}
// Close the object.
obj->Close();
return true;
}
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
// Never exceed our capacity.
R_UNLESS(m_count < m_table_size, ERR_OUT_OF_HANDLES);
// Allocate entry, set output handle.
const auto linear_id = this->AllocateLinearId();
const auto index = this->AllocateEntry();
m_entry_infos[index].linear_id = linear_id;
m_objects[index] = obj;
obj->Open();
*out_handle = EncodeHandle(static_cast<u16>(index), linear_id);
R_SUCCEED();
}
KScopedAutoObject<KAutoObject> KHandleTable::GetObjectForIpc(Handle handle,
Thread* cur_thread) const {
// Handle pseudo-handles.
ASSERT(cur_thread != nullptr);
if (handle == KernelHandle::CurrentProcess) {
auto* cur_process = cur_thread->GetOwner();
ASSERT(cur_process != nullptr);
return cur_process;
}
if (handle == KernelHandle::CurrentThread) {
return cur_thread;
}
return this->GetObjectForIpcWithoutPseudoHandle(handle);
}
template <class Archive>
void KHandleTable::serialize(Archive& ar, const u32 file_version) {
ar& m_entry_infos;
ar& m_objects;
ar& m_free_head_index;
ar& m_table_size;
ar& m_next_id;
ar& m_max_count;
ar& m_next_linear_id;
ar& m_count;
}
SERIALIZE_IMPL(KHandleTable)
} // namespace Kernel

View File

@ -1,106 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/array.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include "common/archives.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_handle_table.h"
namespace Kernel {
ResultCode KHandleTable::Finalize() {
// Close and free all entries.
for (size_t i = 0; i < m_table_size; i++) {
if (KAutoObject* obj = m_objects[i]; obj != nullptr) {
obj->Close();
}
}
R_SUCCEED();
}
bool KHandleTable::Remove(Handle handle) {
// Don't allow removal of a pseudo-handle.
if (handle == KernelHandle::CurrentProcess || handle == KernelHandle::CurrentThread)
[[unlikely]] {
return false;
}
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) [[unlikely]] {
return false;
}
// Find the object and free the entry.
KAutoObject* obj = nullptr;
{
// KScopedLightMutex lk{m_mutex};
if (this->IsValidHandle(handle)) [[likely]] {
const auto index = handle_pack.index;
obj = m_objects[index];
this->FreeEntry(index);
} else {
return false;
}
}
// Close the object.
obj->Close();
return true;
}
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
// Never exceed our capacity.
R_UNLESS(m_count < m_table_size, ERR_OUT_OF_HANDLES);
// Allocate entry, set output handle.
const auto linear_id = this->AllocateLinearId();
const auto index = this->AllocateEntry();
m_entry_infos[index].linear_id = linear_id;
m_objects[index] = obj;
obj->Open();
*out_handle = EncodeHandle(static_cast<u16>(index), linear_id);
R_SUCCEED();
}
KScopedAutoObject<KAutoObject> KHandleTable::GetObjectForIpc(Handle handle,
Thread* cur_thread) const {
// Handle pseudo-handles.
ASSERT(cur_thread != nullptr);
if (handle == KernelHandle::CurrentProcess) {
auto* cur_process = cur_thread->GetOwner();
ASSERT(cur_process != nullptr);
return cur_process;
}
if (handle == KernelHandle::CurrentThread) {
return cur_thread;
}
return this->GetObjectForIpcWithoutPseudoHandle(handle);
}
template <class Archive>
void KHandleTable::serialize(Archive& ar, const u32 file_version) {
ar& m_entry_infos;
ar& m_objects;
ar& m_free_head_index;
ar& m_table_size;
ar& m_next_id;
ar& m_max_count;
ar& m_next_linear_id;
ar& m_count;
}
SERIALIZE_IMPL(KHandleTable)
} // namespace Kernel

View File

@ -1,279 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
namespace Kernel {
enum KernelHandle : Handle {
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
};
class KHandleTable {
CITRA_NON_COPYABLE(KHandleTable);
CITRA_NON_MOVEABLE(KHandleTable);
public:
static constexpr size_t MaxTableSize = 1024;
public:
explicit KHandleTable(KernelSystem& kernel) : m_kernel(kernel) {}
ResultCode Initialize(s32 size) {
// KScopedLightMutex lk{m_mutex};
// Initialize all fields.
m_max_count = 0;
m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size);
m_next_linear_id = MinLinearId;
m_count = 0;
m_free_head_index = -1;
// Create the arrays
m_objects.resize(m_table_size);
m_entry_infos.resize(m_table_size);
// Free all entries.
for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
m_objects[i] = nullptr;
m_entry_infos[i].next_free_index = static_cast<s16>(i - 1);
m_free_head_index = i;
}
R_SUCCEED();
}
size_t GetTableSize() const {
return m_table_size;
}
size_t GetCount() const {
return m_count;
}
size_t GetMaxCount() const {
return m_max_count;
}
ResultCode Finalize();
bool Remove(Handle handle);
ResultCode Add(Handle* out_handle, KAutoObject* obj);
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
// KScopedLightMutex lk{m_mutex};
if constexpr (std::is_same_v<T, KAutoObject>) {
return this->GetObjectImpl(handle);
} else {
if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] {
return obj->DynamicCast<T*>();
} else {
return nullptr;
}
}
}
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObject(Handle handle) const {
// Handle pseudo-handles.
if constexpr (std::derived_from<Process, T>) {
if (handle == KernelHandle::CurrentProcess) {
auto* const cur_process = m_kernel.GetCurrentProcess();
ASSERT(cur_process != nullptr);
return cur_process;
}
} else if constexpr (std::derived_from<Thread, T>) {
if (handle == KernelHandle::CurrentThread) {
auto* const cur_thread = m_kernel.GetCurrentThreadManager().GetCurrentThread();
ASSERT(cur_thread != nullptr);
return cur_thread;
}
}
return this->template GetObjectWithoutPseudoHandle<T>(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const {
return this->GetObjectImpl(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, Thread* cur_thread) const;
template <typename T>
bool GetMultipleObjects(T** out, const Handle* handles, size_t num_handles) const {
// Try to convert and open all the handles.
size_t num_opened;
{
// KScopedLightMutex lk{m_mutex};
for (num_opened = 0; num_opened < num_handles; num_opened++) {
// Get the current handle.
const auto cur_handle = handles[num_opened];
// Get the object for the current handle.
KAutoObject* cur_object = this->GetObjectImpl(cur_handle);
if (cur_object == nullptr) [[unlikely]] {
break;
}
// Cast the current object to the desired type.
T* cur_t = cur_object->DynamicCast<T*>();
if (cur_t == nullptr) [[unlikely]] {
break;
}
// Open a reference to the current object.
cur_t->Open();
out[num_opened] = cur_t;
}
}
// If we converted every object, succeed.
if (num_opened == num_handles) [[likely]] {
return true;
}
// If we didn't convert entry object, close the ones we opened.
for (size_t i = 0; i < num_opened; i++) {
out[i]->Close();
}
return false;
}
private:
s32 AllocateEntry() {
ASSERT(m_count < m_table_size);
const auto index = m_free_head_index;
m_free_head_index = m_entry_infos[index].GetNextFreeIndex();
m_max_count = std::max(m_max_count, ++m_count);
return index;
}
void FreeEntry(s32 index) {
ASSERT(m_count > 0);
m_objects[index] = nullptr;
m_entry_infos[index].next_free_index = static_cast<s16>(m_free_head_index);
m_free_head_index = index;
--m_count;
}
u16 AllocateLinearId() {
const u16 id = m_next_linear_id++;
if (m_next_linear_id > MaxLinearId) {
m_next_linear_id = MinLinearId;
}
return id;
}
bool IsValidHandle(Handle handle) const {
// Unpack the handle.
const auto handle_pack = HandlePack(handle);
const auto raw_value = handle_pack.raw;
const auto index = handle_pack.index;
const auto linear_id = handle_pack.linear_id;
const auto reserved = handle_pack.reserved;
ASSERT(reserved == 0);
// Validate our indexing information.
if (raw_value == 0) [[unlikely]] {
return false;
}
if (linear_id == 0) [[unlikely]] {
return false;
}
if (index >= m_table_size) [[unlikely]] {
return false;
}
// Check that there's an object, and our serial id is correct.
if (m_objects[index] == nullptr) [[unlikely]] {
return false;
}
if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] {
return false;
}
return true;
}
KAutoObject* GetObjectImpl(Handle handle) const {
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) [[unlikely]] {
return nullptr;
}
if (this->IsValidHandle(handle)) [[likely]] {
return m_objects[handle_pack.index];
} else {
return nullptr;
}
}
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
union HandlePack {
constexpr HandlePack() = default;
constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
u32 raw{};
BitField<0, 15, u32> index;
BitField<15, 15, u32> linear_id;
BitField<30, 2, u32> reserved;
};
static constexpr Handle EncodeHandle(u16 index, u16 linear_id) {
HandlePack handle{};
handle.index.Assign(index);
handle.linear_id.Assign(linear_id);
handle.reserved.Assign(0);
return handle.raw;
}
private:
static constexpr u16 MinLinearId = 1;
static constexpr u16 MaxLinearId = 0x7FFF;
union EntryInfo {
u16 linear_id;
s16 next_free_index;
constexpr u16 GetLinearId() const {
return linear_id;
}
constexpr s32 GetNextFreeIndex() const {
return next_free_index;
}
};
private:
KernelSystem& m_kernel;
std::vector<EntryInfo> m_entry_infos{};
std::vector<KAutoObject*> m_objects{};
s32 m_free_head_index{};
u16 m_table_size{};
u16 m_next_id{};
u16 m_max_count{};
u16 m_next_linear_id{};
u16 m_count{};
// KLightMutex mutex;
};
} // namespace Kernel

View File

@ -1,237 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/intrusive_list.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KernelSystem;
class KLinkedListNode : public Common::IntrusiveListBaseNode<KLinkedListNode>,
public KSlabAllocated<KLinkedListNode> {
public:
explicit KLinkedListNode(KernelSystem&) {}
KLinkedListNode() = default;
void Initialize(void* it) {
m_item = it;
}
void* GetItem() const {
return m_item;
}
private:
void* m_item = nullptr;
};
template <typename T>
class KLinkedList : private Common::IntrusiveListBaseTraits<KLinkedListNode>::ListType {
private:
using BaseList = Common::IntrusiveListBaseTraits<KLinkedListNode>::ListType;
public:
template <bool Const>
class Iterator;
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
private:
using BaseIterator = BaseList::iterator;
friend class KLinkedList;
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename KLinkedList::value_type;
using difference_type = typename KLinkedList::difference_type;
using pointer = std::conditional_t<Const, KLinkedList::const_pointer, KLinkedList::pointer>;
using reference =
std::conditional_t<Const, KLinkedList::const_reference, KLinkedList::reference>;
public:
explicit Iterator(BaseIterator it) : m_base_it(it) {}
pointer GetItem() const {
return static_cast<pointer>(m_base_it->GetItem());
}
bool operator==(const Iterator& rhs) const {
return m_base_it == rhs.m_base_it;
}
bool operator!=(const Iterator& rhs) const {
return !(*this == rhs);
}
pointer operator->() const {
return this->GetItem();
}
reference operator*() const {
return *this->GetItem();
}
Iterator& operator++() {
++m_base_it;
return *this;
}
Iterator& operator--() {
--m_base_it;
return *this;
}
Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
Iterator operator--(int) {
const Iterator it{*this};
--(*this);
return it;
}
operator Iterator<true>() const {
return Iterator<true>(m_base_it);
}
private:
BaseIterator m_base_it;
};
public:
constexpr KLinkedList(KernelSystem& kernel_) : BaseList(), kernel{kernel_} {}
~KLinkedList() {
// Erase all elements.
for (auto it = begin(); it != end(); it = erase(it)) {
}
// Ensure we succeeded.
ASSERT(this->empty());
}
// Iterator accessors.
iterator begin() {
return iterator(BaseList::begin());
}
const_iterator begin() const {
return const_iterator(BaseList::begin());
}
iterator end() {
return iterator(BaseList::end());
}
const_iterator end() const {
return const_iterator(BaseList::end());
}
const_iterator cbegin() const {
return this->begin();
}
const_iterator cend() const {
return this->end();
}
reverse_iterator rbegin() {
return reverse_iterator(this->end());
}
const_reverse_iterator rbegin() const {
return const_reverse_iterator(this->end());
}
reverse_iterator rend() {
return reverse_iterator(this->begin());
}
const_reverse_iterator rend() const {
return const_reverse_iterator(this->begin());
}
const_reverse_iterator crbegin() const {
return this->rbegin();
}
const_reverse_iterator crend() const {
return this->rend();
}
// Content management.
using BaseList::empty;
using BaseList::size;
reference back() {
return *(--this->end());
}
const_reference back() const {
return *(--this->end());
}
reference front() {
return *this->begin();
}
const_reference front() const {
return *this->begin();
}
iterator insert(const_iterator pos, reference ref) {
KLinkedListNode* new_node = KLinkedListNode::Allocate(kernel);
ASSERT(new_node != nullptr);
new_node->Initialize(std::addressof(ref));
return iterator(BaseList::insert(pos.m_base_it, *new_node));
}
void push_back(reference ref) {
this->insert(this->end(), ref);
}
void push_front(reference ref) {
this->insert(this->begin(), ref);
}
void pop_back() {
this->erase(--this->end());
}
void pop_front() {
this->erase(this->begin());
}
iterator erase(const iterator pos) {
KLinkedListNode* freed_node = std::addressof(*pos.m_base_it);
iterator ret = iterator(BaseList::erase(pos.m_base_it));
KLinkedListNode::Free(kernel, freed_node);
return ret;
}
private:
KernelSystem& kernel;
};
} // namespace Kernel

View File

@ -1,151 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_mutex.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KMutex)
namespace Kernel {
void ReleaseThreadMutexes(Thread* thread) {
for (KMutex* mtx : thread->m_held_mutexes) {
mtx->m_lock_count = 0;
mtx->m_holding_thread = nullptr;
mtx->WakeupAllWaitingThreads();
}
thread->m_held_mutexes.clear();
}
KMutex::KMutex(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KMutex::~KMutex() = default;
void KMutex::Initialize(Process* owner, bool initial_locked) {
// Open a reference to the owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set default priority
m_priority = ThreadPrioLowest;
// Acquire mutex with current thread if initialized as locked
if (initial_locked) {
Thread* thread = m_kernel.GetCurrentThreadManager().GetCurrentThread();
this->Acquire(thread);
}
}
void KMutex::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Mutex, 1);
owner->Close();
}
}
bool KMutex::ShouldWait(const Thread* thread) const {
return m_lock_count > 0 && thread != m_holding_thread;
}
void KMutex::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
// Actually "acquire" the mutex only if we don't already have it
if (m_lock_count == 0) {
m_priority = thread->m_current_priority;
thread->m_held_mutexes.insert(this);
m_holding_thread = thread;
thread->UpdatePriority();
m_kernel.PrepareReschedule();
}
m_lock_count++;
}
ResultCode KMutex::Release(Thread* thread) {
// We can only release the mutex if it's held by the calling thread.
if (thread != m_holding_thread) {
if (m_holding_thread) {
LOG_ERROR(
Kernel,
"Tried to release a mutex (owned by thread id {}) from a different thread id {}",
m_holding_thread->m_thread_id, thread->m_thread_id);
}
return ResultCode(ErrCodes::WrongLockingThread, ErrorModule::Kernel,
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
}
// Note: It should not be possible for the situation where the mutex has a holding thread with a
// zero lock count to occur. The real kernel still checks for this, so we do too.
if (m_lock_count <= 0) {
return ResultCode(ErrorDescription::InvalidResultValue, ErrorModule::Kernel,
ErrorSummary::InvalidState, ErrorLevel::Permanent);
}
m_lock_count--;
// Yield to the next thread only if we've fully released the mutex
if (m_lock_count == 0) {
m_holding_thread->m_held_mutexes.erase(this);
m_holding_thread->UpdatePriority();
m_holding_thread = nullptr;
WakeupAllWaitingThreads();
m_kernel.PrepareReschedule();
}
R_SUCCEED();
}
void KMutex::AddWaitingThread(Thread* thread) {
KSynchronizationObject::AddWaitingThread(thread);
thread->m_pending_mutexes.insert(this);
this->UpdatePriority();
}
void KMutex::RemoveWaitingThread(Thread* thread) {
KSynchronizationObject::RemoveWaitingThread(thread);
thread->m_pending_mutexes.erase(this);
this->UpdatePriority();
}
void KMutex::UpdatePriority() {
if (!m_holding_thread) {
return;
}
u32 best_priority = ThreadPrioLowest;
for (const Thread* waiter : GetWaitingThreads()) {
if (waiter->m_current_priority < best_priority) {
best_priority = waiter->m_current_priority;
}
}
if (best_priority != m_priority) {
m_priority = best_priority;
m_holding_thread->UpdatePriority();
}
}
template <class Archive>
void KMutex::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_lock_count;
ar& m_priority;
ar& m_holding_thread;
}
SERIALIZE_IMPL(KMutex)
} // namespace Kernel

View File

@ -1,81 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/global.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class Thread;
class KMutex final : public KAutoObjectWithSlabHeapAndContainer<KMutex, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KMutex, KSynchronizationObject);
public:
explicit KMutex(KernelSystem& kernel);
~KMutex() override;
void Initialize(Process* owner, bool initial_locked);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
u32 GetPriority() const {
return m_priority;
}
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
void AddWaitingThread(Thread* thread) override;
void RemoveWaitingThread(Thread* thread) override;
/**
* Elevate the mutex priority to the best priority
* among the priorities of all its waiting threads.
*/
void UpdatePriority();
/**
* Attempts to release the mutex from the specified thread.
* @param thread Thread that wants to release the mutex.
* @returns The result code of the operation.
*/
ResultCode Release(Thread* thread);
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
public:
Process* m_owner{};
int m_lock_count{};
u32 m_priority{};
Thread* m_holding_thread{};
};
/**
* Releases all the mutexes held by the specified thread
* @param thread Thread that is holding the mutexes
*/
void ReleaseThreadMutexes(Thread* thread);
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KMutex)
CONSTRUCT_KERNEL_OBJECT(Kernel::KMutex)

View File

@ -1,103 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_object_name.h"
namespace Kernel {
KObjectNameGlobalData::KObjectNameGlobalData(KernelSystem& kernel) {}
KObjectNameGlobalData::~KObjectNameGlobalData() = default;
void KObjectName::Initialize(KAutoObject* obj, const char* name) {
// Set member variables.
m_object = obj;
std::strncpy(m_name.data(), name, sizeof(m_name) - 1);
m_name[sizeof(m_name) - 1] = '\x00';
// Open a reference to the object we hold.
m_object->Open();
}
bool KObjectName::MatchesName(const char* name) const {
return std::strncmp(m_name.data(), name, sizeof(m_name)) == 0;
}
ResultCode KObjectName::NewFromName(KernelSystem& kernel, KAutoObject* obj, const char* name) {
// Create a new object name.
KObjectName* new_name = KObjectName::Allocate(kernel);
R_UNLESS(new_name != nullptr, ResultCode{0xD86007F3});
// Initialize the new name.
new_name->Initialize(obj, name);
// Check if there's an existing name.
{
// Get the global data.
KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Ensure we have exclusive access to the global list.
// KScopedLightMutex lk{gd.GetObjectListLock()};
// If the object doesn't exist, put it into the list.
KScopedAutoObject existing_object = FindImpl(kernel, name);
if (existing_object.IsNull()) {
gd.GetObjectList().push_back(*new_name);
R_SUCCEED();
}
}
// The object already exists, the kernel does not check for this.
UNREACHABLE();
}
ResultCode KObjectName::Delete(KernelSystem& kernel, KAutoObject* obj, const char* compare_name) {
// Get the global data.
KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Ensure we have exclusive access to the global list.
// KScopedLightMutex lk{gd.GetObjectListLock()};
// Find a matching entry in the list, and delete it.
for (auto& name : gd.GetObjectList()) {
if (name.MatchesName(compare_name) && obj == name.GetObject()) {
// We found a match, clean up its resources.
obj->Close();
gd.GetObjectList().erase(gd.GetObjectList().iterator_to(name));
KObjectName::Free(kernel, std::addressof(name));
R_SUCCEED();
}
}
// We didn't find the object in the list.
R_THROW(ERR_NOT_FOUND);
}
KScopedAutoObject<KAutoObject> KObjectName::Find(KernelSystem& kernel, const char* name) {
// Get the global data.
// KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Ensure we have exclusive access to the global list.
// KScopedLightMutex lk{gd.GetObjectListLock()};
return FindImpl(kernel, name);
}
KScopedAutoObject<KAutoObject> KObjectName::FindImpl(KernelSystem& kernel,
const char* compare_name) {
// Get the global data.
KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Try to find a matching object in the global list.
for (const auto& name : gd.GetObjectList()) {
if (name.MatchesName(compare_name)) {
return name.GetObject();
}
}
// There's no matching entry in the list.
return nullptr;
}
} // namespace Kernel

View File

@ -1,81 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include "common/intrusive_list.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KObjectNameGlobalData;
class KObjectName : public KSlabAllocated<KObjectName>,
public Common::IntrusiveListBaseNode<KObjectName> {
public:
explicit KObjectName(KernelSystem&) {}
virtual ~KObjectName() = default;
static constexpr size_t NameLengthMax = 12;
using List = Common::IntrusiveListBaseTraits<KObjectName>::ListType;
static ResultCode NewFromName(KernelSystem& kernel, KAutoObject* obj, const char* name);
static ResultCode Delete(KernelSystem& kernel, KAutoObject* obj, const char* name);
static KScopedAutoObject<KAutoObject> Find(KernelSystem& kernel, const char* name);
template <typename Derived>
static ResultCode Delete(KernelSystem& kernel, const char* name) {
// Find the object.
KScopedAutoObject obj = Find(kernel, name);
R_UNLESS(obj.IsNotNull(), ERR_NOT_FOUND);
// Cast the object to the desired type.
Derived* derived = obj->DynamicCast<Derived*>();
R_UNLESS(derived != nullptr, ERR_NOT_FOUND);
// Check that the object is closed.
R_UNLESS(derived->IsServerClosed(), ERR_INVALID_ADDRESS_STATE);
R_RETURN(Delete(kernel, obj.GetPointerUnsafe(), name));
}
template <typename Derived>
requires(std::derived_from<Derived, KAutoObject>)
static KScopedAutoObject<Derived> Find(KernelSystem& kernel, const char* name) {
return Find(kernel, name);
}
private:
static KScopedAutoObject<KAutoObject> FindImpl(KernelSystem& kernel, const char* name);
void Initialize(KAutoObject* obj, const char* name);
bool MatchesName(const char* name) const;
KAutoObject* GetObject() const {
return m_object;
}
private:
std::array<char, NameLengthMax> m_name{};
KAutoObject* m_object{};
};
class KObjectNameGlobalData {
public:
explicit KObjectNameGlobalData(KernelSystem& kernel);
~KObjectNameGlobalData();
KObjectName::List& GetObjectList() {
return m_object_list;
}
private:
// KMutex m_mutex;
KObjectName::List m_object_list;
};
} // namespace Kernel

View File

@ -1,25 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_port.h"
namespace Kernel {
KPort::KPort(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
KPort::~KPort() = default;
void KPort::Initialize(s32 max_sessions) {
// Open a new reference count to the initialized port.
this->Open();
// Create and initialize our server/client pair.
KAutoObject::Create(std::addressof(m_server));
KAutoObject::Create(std::addressof(m_client));
m_server.Initialize(this);
m_client.Initialize(this, max_sessions);
}
} // namespace Kernel

View File

@ -1,52 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KServerSession;
class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort> {
KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject);
public:
explicit KPort(KernelSystem& kernel);
~KPort() override;
static void PostDestroy(uintptr_t arg) {}
void Initialize(s32 max_sessions);
void OnClientClosed();
void OnServerClosed();
bool IsServerClosed() const;
ResultCode EnqueueSession(KServerSession* session);
KClientPort& GetClientPort() {
return m_client;
}
KServerPort& GetServerPort() {
return m_server;
}
const KClientPort& GetClientPort() const {
return m_client;
}
const KServerPort& GetServerPort() const {
return m_server;
}
private:
KServerPort m_server;
KClientPort m_client;
};
} // namespace Kernel

View File

@ -1,50 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/process.h"
namespace Kernel {
class KScopedResourceReservation {
public:
explicit KScopedResourceReservation(KResourceLimit* l, ResourceLimitType type, s32 amount = 1)
: m_limit(l), m_amount(amount), m_type(type) {
if (m_limit) {
m_succeeded = m_limit->Reserve(m_type, m_amount);
} else {
m_succeeded = true;
}
}
explicit KScopedResourceReservation(const Process* p, ResourceLimitType type, s32 amount = 1)
: KScopedResourceReservation(p->resource_limit, type, amount) {}
~KScopedResourceReservation() noexcept {
if (m_limit && m_succeeded) {
// Resource was not committed, release the reservation.
m_limit->Release(m_type, m_amount);
}
}
/// Commit the resource reservation, destruction of this object does not release the resource
void Commit() {
m_limit = nullptr;
}
bool Succeeded() const {
return m_succeeded;
}
private:
KResourceLimit* m_limit{};
s32 m_amount{};
ResourceLimitType m_type{};
bool m_succeeded{};
};
} // namespace Kernel

View File

@ -1,77 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/export.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_semaphore.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
SERIALIZE_EXPORT_IMPL(Kernel::KSemaphore)
namespace Kernel {
KSemaphore::KSemaphore(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KSemaphore::~KSemaphore() = default;
void KSemaphore::Initialize(Process* owner, s32 initial_count, s32 max_count, std::string name) {
// Open a reference to the owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set member variables
m_available_count = initial_count;
m_max_count = max_count;
m_name = name;
}
void KSemaphore::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Semaphore, 1);
owner->Close();
}
}
bool KSemaphore::ShouldWait(const Thread* thread) const {
return m_available_count <= 0;
}
void KSemaphore::Acquire(Thread* thread) {
if (m_available_count <= 0) {
return;
}
--m_available_count;
}
ResultCode KSemaphore::Release(s32* out_count, s32 release_count) {
R_UNLESS(release_count + m_available_count <= m_max_count, ERR_OUT_OF_RANGE_KERNEL);
// Update available count.
const s32 previous_count = m_available_count;
m_available_count += release_count;
// Wakeup waiting threads and return.
this->WakeupAllWaitingThreads();
*out_count = previous_count;
R_SUCCEED();
}
template <class Archive>
void KSemaphore::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_max_count;
ar& m_available_count;
}
SERIALIZE_IMPL(KSemaphore)
} // namespace Kernel

View File

@ -1,59 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/global.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class ResourceLimit;
class KSemaphore final
: public KAutoObjectWithSlabHeapAndContainer<KSemaphore, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KSemaphore, KSynchronizationObject);
public:
explicit KSemaphore(KernelSystem& kernel);
~KSemaphore() override;
void Initialize(Process* owner, s32 initial_count, s32 max_count, std::string name);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
ResultCode Release(s32* out_count, s32 release_count);
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
Process* m_owner{};
s32 m_max_count{};
s32 m_available_count{};
std::string m_name;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KSemaphore)
CONSTRUCT_KERNEL_OBJECT(Kernel::KSemaphore)

View File

@ -1,63 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KServerPort)
namespace Kernel {
KServerPort::KServerPort(KernelSystem& kernel) : KSynchronizationObject(kernel) {}
KServerPort::~KServerPort() = default;
void KServerPort::Initialize(KPort* parent, std::string name) {
m_parent = parent;
m_name = name;
}
void KServerPort::Destroy() {
// Close our reference to our parent.
m_parent->Close();
}
KServerSession* KServerPort::AcceptSession() {
// Return the first session in the list.
if (m_pending_sessions.empty()) {
return nullptr;
}
KServerSession* session = m_pending_sessions.back();
m_pending_sessions.pop_back();
return session;
}
bool KServerPort::ShouldWait(const Thread* thread) const {
// If there are no pending sessions, we wait until a new one is added.
return m_pending_sessions.size() == 0;
}
void KServerPort::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
}
template <class Archive>
void KServerPort::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_name;
ar& m_pending_sessions;
ar& m_hle_handler;
}
SERIALIZE_IMPL(KServerPort)
} // namespace Kernel

View File

@ -1,61 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_synchronization_object.h"
namespace Kernel {
class KClientPort;
class KServerSession;
class KPort;
class SessionRequestHandler;
class KServerPort final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject);
public:
explicit KServerPort(KernelSystem& kernel);
~KServerPort() override;
void Initialize(KPort* parent, std::string name);
void Destroy() override;
void EnqueueSession(KServerSession* session);
KServerSession* AcceptSession();
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) {
m_hle_handler = std::move(hle_handler_);
}
std::shared_ptr<SessionRequestHandler> GetHleHandler() {
return m_hle_handler;
}
private:
KPort* m_parent{};
std::string m_name;
std::vector<KServerSession*> m_pending_sessions;
std::shared_ptr<SessionRequestHandler> m_hle_handler;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KServerPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::KServerPort)

View File

@ -1,145 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KServerSession)
namespace Kernel {
KServerSession::KServerSession(KernelSystem& kernel) : KSynchronizationObject(kernel) {}
KServerSession::~KServerSession() = default;
void KServerSession::Destroy() {
m_parent->OnServerClosed();
m_parent->Close();
}
bool KServerSession::ShouldWait(const Thread* thread) const {
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
const auto state = m_parent->GetState();
if (state != KSessionState::Normal) {
return false;
}
// Wait if we have no pending requests, or if we're currently handling a request.
return pending_requesting_threads.empty() || currently_handling != nullptr;
}
void KServerSession::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
// If the client endpoint was closed, don't do anything. This KServerSession is now useless and
// will linger until its last handle is closed by the running application.
const auto state = m_parent->GetState();
if (state != KSessionState::Normal) {
return;
}
// We are now handling a request, pop it from the stack.
ASSERT(!pending_requesting_threads.empty());
currently_handling = pending_requesting_threads.back();
pending_requesting_threads.pop_back();
}
void KServerSession::OnClientClosed() {
// Notify HLE handler that client session has been disconnected.
if (hle_handler) {
hle_handler->ClientDisconnected(this);
}
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
pending_requesting_threads.clear();
currently_handling = nullptr;
// Notify any threads waiting on the KServerSession that the endpoint has been closed. Note
// that this call has to happen after `Session::client` has been set to nullptr to let the
// ServerSession know that the client endpoint has been closed.
this->WakeupAllWaitingThreads();
}
ResultCode KServerSession::HandleSyncRequest(Thread* thread) {
// The KServerSession received a sync request, this means that there's new data available
// from its ClientSession, so wake up any threads that may be waiting on a svcReplyAndReceive or
// similar.
// If this KServerSession has an associated HLE handler, forward the request to it.
if (hle_handler != nullptr) {
std::array<u32_le, IPC::COMMAND_BUFFER_LENGTH + 2 * IPC::MAX_STATIC_BUFFERS> cmd_buf;
auto current_process = thread->GetOwner();
ASSERT(current_process);
m_kernel.memory.ReadBlock(*current_process, thread->GetCommandBufferAddress(),
cmd_buf.data(), cmd_buf.size() * sizeof(u32));
auto context =
std::make_shared<Kernel::HLERequestContext>(m_kernel, SharedFrom(this), thread);
context->PopulateFromIncomingCommandBuffer(cmd_buf.data(), current_process);
hle_handler->HandleSyncRequest(*context);
ASSERT(thread->m_status == Kernel::ThreadStatus::Running ||
thread->m_status == Kernel::ThreadStatus::WaitHleEvent);
// Only write the response immediately if the thread is still running. If the HLE handler
// put the thread to sleep then the writing of the command buffer will be deferred to the
// wakeup callback.
if (thread->m_status == Kernel::ThreadStatus::Running) {
context->WriteToOutgoingCommandBuffer(cmd_buf.data(), *current_process);
m_kernel.memory.WriteBlock(*current_process, thread->GetCommandBufferAddress(),
cmd_buf.data(), cmd_buf.size() * sizeof(u32));
}
}
if (thread->m_status == ThreadStatus::Running) {
// Put the thread to sleep until the server replies, it will be awoken in
// svcReplyAndReceive for LLE servers.
thread->m_status = ThreadStatus::WaitIPC;
if (hle_handler != nullptr) {
// For HLE services, we put the request threads to sleep for a short duration to
// simulate IPC overhead, but only if the HLE handler didn't put the thread to sleep for
// other reasons like an async callback. The IPC overhead is needed to prevent
// starvation when a thread only does sync requests to HLE services while a
// lower-priority thread is waiting to run.
// This delay was approximated in a homebrew application by measuring the average time
// it takes for svcSendSyncRequest to return when performing the SetLcdForceBlack IPC
// request to the GSP:GPU service in a n3DS with firmware 11.6. The measured values have
// a high variance and vary between models.
static constexpr u64 IPCDelayNanoseconds = 39000;
thread->WakeAfterDelay(IPCDelayNanoseconds);
} else {
// Add the thread to the list of threads that have issued a sync request with this
// server.
pending_requesting_threads.push_back(std::move(thread));
}
}
// If this KServerSession does not have an HLE implementation,
// just wake up the threads waiting on it.
this->WakeupAllWaitingThreads();
R_SUCCEED();
}
template <class Archive>
void KServerSession::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_name;
ar& m_parent;
ar& hle_handler;
ar& pending_requesting_threads;
ar& currently_handling;
ar& mapped_buffer_context;
}
SERIALIZE_IMPL(KServerSession)
} // namespace Kernel

View File

@ -1,80 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/ipc.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
namespace Kernel {
class ClientSession;
class ClientPort;
class KSession;
class SessionRequestHandler;
class Thread;
class KServerSession final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KServerSession, KSynchronizationObject);
public:
~KServerSession() override;
explicit KServerSession(KernelSystem& kernel);
void Destroy() override;
void Initialize(KSession* parent) {
m_parent = parent;
}
KSession* GetParent() const {
return m_parent;
}
Thread* GetCurrent() {
return currently_handling;
}
std::vector<MappedBufferContext>& GetMappedBufferContext() {
return mapped_buffer_context;
}
void SetHleHandler(std::shared_ptr<SessionRequestHandler>&& hle_handler_) {
hle_handler = std::move(hle_handler_);
}
std::shared_ptr<SessionRequestHandler>& GetHleHandler() {
return hle_handler;
}
void OnClientClosed();
ResultCode HandleSyncRequest(Thread* thread);
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
public:
std::string m_name;
KSession* m_parent{};
std::shared_ptr<SessionRequestHandler> hle_handler;
std::vector<Thread*> pending_requesting_threads;
Thread* currently_handling;
std::vector<MappedBufferContext> mapped_buffer_context;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KServerSession)
CONSTRUCT_KERNEL_OBJECT(Kernel::KServerSession)

View File

@ -1,68 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
namespace Kernel {
KSession::KSession(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
KSession::~KSession() = default;
void KSession::Initialize(KClientPort* client_port) {
// Increment reference count.
// Because reference count is one on creation, this will result
// in a reference count of two. Thus, when both server and client are closed
// this object will be destroyed.
this->Open();
// Create our sub sessions.
KAutoObject::Create(std::addressof(m_server));
KAutoObject::Create(std::addressof(m_client));
// Initialize our sub sessions.
m_state = KSessionState::Normal;
m_server.Initialize(this);
m_client.Initialize(this);
// Set our port.
m_port = client_port;
if (m_port != nullptr) {
m_port->Open();
}
if (m_server_port->hle_handler)
m_server_port->hle_handler->ClientConnected(server);
else
m_server_port->pending_sessions.push_back(server);
// Mark initialized.
m_initialized = true;
}
void KSession::Finalize() {
if (m_port != nullptr) {
m_port->ConnectionClosed();
m_port->Close();
}
}
void KSession::OnServerClosed() {
if (m_state == KSessionState::Normal) {
m_state = KSessionState::ServerClosed;
m_client.OnServerClosed();
}
}
void KSession::OnClientClosed() {
if (m_state == KSessionState::Normal) {
m_state = KSessionState::ClientClosed;
m_server.OnClientClosed();
}
}
} // namespace Kernel

View File

@ -1,76 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KClientPort;
enum class KSessionState : u8 {
Invalid = 0,
Normal = 1,
ClientClosed = 2,
ServerClosed = 3,
};
class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession> {
KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject);
public:
explicit KSession(KernelSystem& kernel);
~KSession() override;
void Initialize(KClientPort* port);
void Finalize() override;
bool IsInitialized() const override {
return m_initialized;
}
static void PostDestroy(uintptr_t arg) {}
void OnServerClosed();
void OnClientClosed();
KSessionState GetState() const {
return m_state;
}
KClientSession& GetClientSession() {
return m_client;
}
KServerSession& GetServerSession() {
return m_server;
}
const KClientSession& GetClientSession() const {
return m_client;
}
const KServerSession& GetServerSession() const {
return m_server;
}
KClientPort* GetParent() {
return m_port;
}
private:
KServerSession m_server;
KClientSession m_client;
KClientPort* m_port{};
KSessionState m_state{};
bool m_initialized{};
};
} // namespace Kernel

View File

@ -1,238 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include "common/archives.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::KSharedMemory)
namespace Kernel {
KSharedMemory::KSharedMemory(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KSharedMemory::~KSharedMemory() = default;
ResultCode KSharedMemory::Initialize(Process* owner, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address,
MemoryRegion region) {
// Open a reference to our owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set member variables.
m_base_address = address;
m_size = size;
m_permissions = permissions;
m_other_permissions = other_permissions;
// Allocate the shared memory block.
if (address == 0) {
// We need to allocate a block from the Linear Heap ourselves.
// We'll manually allocate some memory from the linear heap in the specified region.
auto memory_region = m_kernel.GetMemoryRegion(region);
auto offset = memory_region->LinearAllocate(size);
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
// Store the backing blocks of allocated memory.
auto& memory = m_kernel.memory;
std::fill(memory.GetFCRAMPointer(*offset), memory.GetFCRAMPointer(*offset + size), 0);
m_backing_blocks = {{memory.GetFCRAMRef(*offset), size}};
m_holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
m_linear_heap_phys_offset = *offset;
// Increase the amount of used linear heap memory for the owner process.
if (m_owner) {
m_owner->memory_used += size;
}
} else {
// The memory is already available and mapped in the owner process.
ASSERT(m_owner);
auto& vm_manager = m_owner->vm_manager;
R_TRY(vm_manager.ChangeMemoryState(address, size, MemoryState::Private,
VMAPermission::ReadWrite, MemoryState::Locked,
KSharedMemory::ConvertPermissions(permissions)));
// Should succeed after verifying memory state above.
auto backing_blocks = vm_manager.GetBackingBlocksForRange(address, size);
ASSERT(backing_blocks.Succeeded());
m_backing_blocks = std::move(backing_blocks).Unwrap();
}
R_SUCCEED();
}
void KSharedMemory::InitializeForApplet(u32 offset, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions) {
// Allocate memory in heap
auto memory_region = m_kernel.GetMemoryRegion(MemoryRegion::SYSTEM);
auto backing_blocks = memory_region->HeapAllocate(size);
ASSERT_MSG(!backing_blocks.empty(), "Not enough space in region to allocate shared memory!");
// Set member variables
m_holding_memory = backing_blocks;
m_base_address = Memory::HEAP_VADDR + offset;
m_size = size;
m_permissions = permissions;
m_other_permissions = other_permissions;
// Initialize backing blocks
auto& memory = m_kernel.memory;
for (const auto& interval : backing_blocks) {
const VAddr addr = interval.lower();
const VAddr end = interval.upper();
m_backing_blocks.emplace_back(memory.GetFCRAMRef(addr), end - addr);
std::fill(memory.GetFCRAMPointer(addr), memory.GetFCRAMPointer(end), 0);
}
}
void KSharedMemory::Finalize() {
auto memory_region = m_kernel.GetMemoryRegion(MemoryRegion::SYSTEM);
for (const auto& interval : m_holding_memory) {
memory_region->Free(interval.lower(), interval.upper() - interval.lower());
}
if (m_owner) {
if (m_base_address != 0) {
m_owner->vm_manager.ChangeMemoryState(m_base_address, m_size, MemoryState::Locked,
VMAPermission::None, MemoryState::Private,
VMAPermission::ReadWrite);
} else {
m_owner->memory_used -= m_size;
}
}
}
void KSharedMemory::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::SharedMemory, 1);
owner->Close();
}
}
ResultCode KSharedMemory::Map(Process& target_process, VAddr address, MemoryPermission permissions,
MemoryPermission other_permissions) {
const MemoryPermission own_other_permissions =
&target_process == m_owner ? m_permissions : m_other_permissions;
// Automatically allocated memory blocks can only be mapped with other_permissions = DontCare
R_UNLESS(m_base_address != 0 || other_permissions == MemoryPermission::DontCare,
ERR_INVALID_COMBINATION);
// Heap-backed memory blocks can not be mapped with other_permissions = DontCare
R_UNLESS(m_base_address == 0 || other_permissions != MemoryPermission::DontCare,
ERR_INVALID_COMBINATION);
// Error out if the requested permissions don't match what the creator process allows.
if (static_cast<u32>(permissions) & ~static_cast<u32>(own_other_permissions)) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, permissions don't match", address);
R_THROW(ERR_INVALID_COMBINATION);
}
// Error out if the provided permissions are not compatible with what the creator process needs.
if (other_permissions != MemoryPermission::DontCare &&
static_cast<u32>(m_permissions) & ~static_cast<u32>(other_permissions)) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, permissions don't match", address);
R_THROW(ERR_WRONG_PERMISSION);
}
// TODO(Subv): Check for the Shared Device Mem flag in the creator process.
/*if (was_created_with_shared_device_mem && address != 0) {
return ResultCode(ErrorDescription::InvalidCombination, ErrorModule::OS,
ErrorSummary::InvalidArgument, ErrorLevel::Usage);
}*/
// TODO(Subv): The same process that created a SharedMemory object
// can not map it in its own address space unless it was created with addr=0, result 0xD900182C.
if (address != 0) {
if (address < Memory::HEAP_VADDR || address + m_size >= Memory::SHARED_MEMORY_VADDR_END) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, invalid address", address);
R_THROW(ERR_INVALID_ADDRESS);
}
}
VAddr target_address = address;
if (m_base_address == 0 && target_address == 0) {
// Calculate the address at which to map the memory block.
// Note: even on new firmware versions, the target address is still in the old linear heap
// region. This exception is made to keep the shared font compatibility. See
// APT:GetSharedFont for detail.
target_address = m_linear_heap_phys_offset + Memory::LINEAR_HEAP_VADDR;
}
{
auto vma = target_process.vm_manager.FindVMA(target_address);
if (vma->second.type != VMAType::Free ||
vma->second.base + vma->second.size < target_address + m_size) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, mapping to already allocated memory",
address);
R_THROW(ERR_INVALID_ADDRESS_STATE);
}
}
// Map the memory block into the target process
VAddr interval_target = target_address;
for (const auto& interval : m_backing_blocks) {
auto vma = target_process.vm_manager.MapBackingMemory(interval_target, interval.first,
interval.second, MemoryState::Shared);
ASSERT(vma.Succeeded());
target_process.vm_manager.Reprotect(vma.Unwrap(), ConvertPermissions(permissions));
interval_target += interval.second;
}
R_SUCCEED();
}
ResultCode KSharedMemory::Unmap(Process& target_process, VAddr address) {
// TODO(Subv): Verify what happens if the application tries to unmap an address that is not
// mapped to a SharedMemory.
return target_process.vm_manager.UnmapRange(address, m_size);
}
VMAPermission KSharedMemory::ConvertPermissions(MemoryPermission permission) {
u32 masked_permissions =
static_cast<u32>(permission) & static_cast<u32>(MemoryPermission::ReadWriteExecute);
return static_cast<VMAPermission>(masked_permissions);
};
u8* KSharedMemory::GetPointer(u32 offset) {
if (m_backing_blocks.size() != 1) {
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
}
return m_backing_blocks[0].first + offset;
}
const u8* KSharedMemory::GetPointer(u32 offset) const {
if (m_backing_blocks.size() != 1) {
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
}
return m_backing_blocks[0].first + offset;
}
template <class Archive>
void KSharedMemory::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& m_linear_heap_phys_offset;
ar& m_backing_blocks;
ar& m_size;
ar& m_permissions;
ar& m_other_permissions;
ar& m_owner;
ar& m_base_address;
ar& m_holding_memory;
}
SERIALIZE_IMPL(KSharedMemory)
} // namespace Kernel

View File

@ -1,107 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/global.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
enum class VMAPermission : u8;
class KSharedMemory final : public KAutoObjectWithSlabHeapAndContainer<KSharedMemory> {
KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
public:
explicit KSharedMemory(KernelSystem& kernel);
~KSharedMemory() override;
ResultCode Initialize(Process* owner, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address, MemoryRegion region);
void InitializeForApplet(u32 offset, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions);
void Finalize() override;
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
u64 GetSize() const {
return m_size;
}
u64 GetLinearHeapPhysicalOffset() const {
return m_linear_heap_phys_offset;
}
/**
* Converts the specified MemoryPermission into the equivalent VMAPermission.
* @param permission The MemoryPermission to convert.
*/
static VMAPermission ConvertPermissions(MemoryPermission permission);
/**
* Maps a shared memory block to an address in the target process' address space
* @param target_process Process on which to map the memory block.
* @param address Address in system memory to map shared memory block to
* @param permissions Memory block map permissions (specified by SVC field)
* @param other_permissions Memory block map other permissions (specified by SVC field)
*/
ResultCode Map(Process& target_process, VAddr address, MemoryPermission permissions,
MemoryPermission other_permissions);
/**
* Unmaps a shared memory block from the specified address in system memory
* @param target_process Process from which to unmap the memory block.
* @param address Address in system memory where the shared memory block is mapped
* @return Result code of the unmap operation
*/
ResultCode Unmap(Process& target_process, VAddr address);
/**
* Gets a pointer to the shared memory block
* @param offset Offset from the start of the shared memory block to get pointer
* @return A pointer to the shared memory block from the specified offset
*/
u8* GetPointer(u32 offset = 0);
/**
* Gets a constant pointer to the shared memory block
* @param offset Offset from the start of the shared memory block to get pointer
* @return A constant pointer to the shared memory block from the specified offset
*/
const u8* GetPointer(u32 offset = 0) const;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
Process* m_owner{};
PAddr m_linear_heap_phys_offset{};
VAddr m_base_address{};
u32 m_size{};
MemoryPermission m_permissions{};
MemoryPermission m_other_permissions{};
std::vector<std::pair<MemoryRef, u32>> m_backing_blocks;
MemoryRegionInfo::IntervalSet m_holding_memory;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KSharedMemory)
CONSTRUCT_KERNEL_OBJECT(Kernel::KSharedMemory)

View File

@ -1,191 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include "common/assert.h"
#include "common/atomic_ops.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
namespace Kernel {
class KernelSystem;
namespace impl {
class KSlabHeapImpl {
CITRA_NON_COPYABLE(KSlabHeapImpl);
CITRA_NON_MOVEABLE(KSlabHeapImpl);
public:
struct Node {
Node* next{};
};
public:
constexpr KSlabHeapImpl() = default;
void Initialize() {
ASSERT(m_head == nullptr);
}
Node* GetHead() const {
return m_head;
}
void* Allocate() {
Node* ret = m_head;
if (ret != nullptr) [[likely]] {
m_head = ret->next;
}
return ret;
}
void Free(void* obj) {
Node* node = static_cast<Node*>(obj);
node->next = m_head;
m_head = node;
}
private:
std::atomic<Node*> m_head{};
};
} // namespace impl
class KSlabHeapBase : protected impl::KSlabHeapImpl {
CITRA_NON_COPYABLE(KSlabHeapBase);
CITRA_NON_MOVEABLE(KSlabHeapBase);
private:
size_t m_obj_size{};
uintptr_t m_peak{};
uintptr_t m_start{};
uintptr_t m_end{};
private:
void UpdatePeakImpl(uintptr_t obj) {
const uintptr_t alloc_peak = obj + this->GetObjectSize();
uintptr_t cur_peak = m_peak;
do {
if (alloc_peak <= cur_peak) {
break;
}
} while (
!Common::AtomicCompareAndSwap(std::addressof(m_peak), alloc_peak, cur_peak, cur_peak));
}
public:
constexpr KSlabHeapBase() = default;
bool Contains(uintptr_t address) const {
return m_start <= address && address < m_end;
}
void Initialize(size_t obj_size, void* memory, size_t memory_size) {
// Ensure we don't initialize a slab using null memory.
ASSERT(memory != nullptr);
// Set our object size.
m_obj_size = obj_size;
// Initialize the base allocator.
KSlabHeapImpl::Initialize();
// Set our tracking variables.
const size_t num_obj = (memory_size / obj_size);
m_start = reinterpret_cast<uintptr_t>(memory);
m_end = m_start + num_obj * obj_size;
m_peak = m_start;
// Free the objects.
u8* cur = reinterpret_cast<u8*>(m_end);
for (size_t i = 0; i < num_obj; i++) {
cur -= obj_size;
KSlabHeapImpl::Free(cur);
}
}
size_t GetSlabHeapSize() const {
return (m_end - m_start) / this->GetObjectSize();
}
size_t GetObjectSize() const {
return m_obj_size;
}
void* Allocate() {
void* obj = KSlabHeapImpl::Allocate();
return obj;
}
void Free(void* obj) {
// Don't allow freeing an object that wasn't allocated from this heap.
const bool contained = this->Contains(reinterpret_cast<uintptr_t>(obj));
ASSERT(contained);
KSlabHeapImpl::Free(obj);
}
size_t GetObjectIndex(const void* obj) const {
return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize();
}
size_t GetPeakIndex() const {
return this->GetObjectIndex(reinterpret_cast<const void*>(m_peak));
}
uintptr_t GetSlabHeapAddress() const {
return m_start;
}
size_t GetNumRemaining() const {
// Only calculate the number of remaining objects under debug configuration.
return 0;
}
};
template <typename T>
class KSlabHeap final : public KSlabHeapBase {
private:
using BaseHeap = KSlabHeapBase;
public:
constexpr KSlabHeap() = default;
void Initialize(void* memory, size_t memory_size) {
BaseHeap::Initialize(sizeof(T), memory, memory_size);
}
T* Allocate() {
T* obj = static_cast<T*>(BaseHeap::Allocate());
if (obj != nullptr) [[likely]] {
std::construct_at(obj);
}
return obj;
}
T* Allocate(KernelSystem& kernel) {
T* obj = static_cast<T*>(BaseHeap::Allocate());
if (obj != nullptr) [[likely]] {
std::construct_at(obj, kernel);
}
return obj;
}
void Free(T* obj) {
BaseHeap::Free(obj);
}
size_t GetObjectIndex(const T* obj) const {
return BaseHeap::GetObjectIndex(obj);
}
};
} // namespace Kernel

View File

@ -1,116 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <utility>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/thread.h"
namespace Kernel {
KSynchronizationObject::KSynchronizationObject(KernelSystem& kernel) : KAutoObject(kernel) {}
KSynchronizationObject::~KSynchronizationObject() = default;
void KSynchronizationObject::AddWaitingThread(Thread* thread) {
auto it = std::ranges::find(waiting_threads, thread);
if (it == waiting_threads.end()) {
waiting_threads.push_back(thread);
}
}
void KSynchronizationObject::RemoveWaitingThread(Thread* thread) {
// If a thread passed multiple handles to the same object,
// the kernel might attempt to remove the thread from the object's
// waiting threads list multiple times.
auto it = std::ranges::find(waiting_threads, thread);
if (it != waiting_threads.end()) {
waiting_threads.erase(it);
}
}
Thread* KSynchronizationObject::GetHighestPriorityReadyThread() const {
Thread* candidate = nullptr;
u32 candidate_priority = ThreadPrioLowest + 1;
for (auto* thread : waiting_threads) {
// The list of waiting threads must not contain threads that are not waiting to be awakened.
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynchAny ||
thread->GetStatus() == ThreadStatus::WaitSynchAll ||
thread->GetStatus() == ThreadStatus::WaitHleEvent,
"Inconsistent thread statuses in waiting_threads");
if (thread->GetCurrentPriority() >= candidate_priority || ShouldWait(thread)) {
continue;
}
// A thread is ready to run if it's either in ThreadStatus::WaitSynchAny or
// in ThreadStatus::WaitSynchAll and the rest of the objects it is waiting on are ready.
bool ready_to_run = true;
if (thread->GetStatus() == ThreadStatus::WaitSynchAll) {
ready_to_run = std::ranges::none_of(thread->m_wait_objects, [thread](const auto* object) {
return object->ShouldWait(thread);
});
}
if (ready_to_run) {
candidate = thread;
candidate_priority = thread->GetCurrentPriority();
}
}
return candidate;
}
void KSynchronizationObject::WakeupAllWaitingThreads() {
while (auto thread = GetHighestPriorityReadyThread()) {
if (!thread->IsSleepingOnWaitAll()) {
Acquire(thread);
} else {
for (auto& object : thread->m_wait_objects) {
object->Acquire(thread);
}
}
// Invoke the wakeup callback before clearing the wait objects
if (thread->m_wakeup_callback) {
thread->m_wakeup_callback->WakeUp(ThreadWakeupReason::Signal, thread, this);
}
for (auto& object : thread->m_wait_objects) {
object->RemoveWaitingThread(thread);
}
thread->m_wait_objects.clear();
thread->ResumeFromWait();
}
if (hle_notifier) {
hle_notifier();
}
}
std::vector<Thread*>& KSynchronizationObject::GetWaitingThreads() const {
return waiting_threads;
}
void KSynchronizationObject::SetHLENotifier(std::function<void()> callback) {
hle_notifier = std::move(callback);
}
template <class Archive>
void KSynchronizationObject::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& waiting_threads;
// NB: hle_notifier *not* serialized since it's a callback!
// Fortunately it's only used in one place (DSP) so we can reconstruct it there
}
SERIALIZE_IMPL(KSynchronizationObject)
} // namespace Kernel

View File

@ -1,116 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <utility>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/thread.h"
namespace Kernel {
KSynchronizationObject::KSynchronizationObject(KernelSystem& kernel) : KAutoObject(kernel) {}
KSynchronizationObject::~KSynchronizationObject() = default;
void KSynchronizationObject::AddWaitingThread(Thread* thread) {
auto it = std::ranges::find(waiting_threads, thread);
if (it == waiting_threads.end()) {
waiting_threads.push_back(thread);
}
}
void KSynchronizationObject::RemoveWaitingThread(Thread* thread) {
// If a thread passed multiple handles to the same object,
// the kernel might attempt to remove the thread from the object's
// waiting threads list multiple times.
auto it = std::ranges::find(waiting_threads, thread);
if (it != waiting_threads.end()) {
waiting_threads.erase(it);
}
}
Thread* KSynchronizationObject::GetHighestPriorityReadyThread() const {
Thread* candidate = nullptr;
u32 candidate_priority = ThreadPrioLowest + 1;
for (auto* thread : waiting_threads) {
// The list of waiting threads must not contain threads that are not waiting to be awakened.
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynchAny ||
thread->GetStatus() == ThreadStatus::WaitSynchAll ||
thread->GetStatus() == ThreadStatus::WaitHleEvent,
"Inconsistent thread statuses in waiting_threads");
if (thread->GetCurrentPriority() >= candidate_priority || ShouldWait(thread)) {
continue;
}
// A thread is ready to run if it's either in ThreadStatus::WaitSynchAny or
// in ThreadStatus::WaitSynchAll and the rest of the objects it is waiting on are ready.
bool ready_to_run = true;
if (thread->GetStatus() == ThreadStatus::WaitSynchAll) {
ready_to_run = std::ranges::none_of(thread->m_wait_objects, [thread](const auto* object) {
return object->ShouldWait(thread);
});
}
if (ready_to_run) {
candidate = thread;
candidate_priority = thread->GetCurrentPriority();
}
}
return candidate;
}
void KSynchronizationObject::WakeupAllWaitingThreads() {
while (auto thread = GetHighestPriorityReadyThread()) {
if (!thread->IsSleepingOnWaitAll()) {
Acquire(thread);
} else {
for (auto& object : thread->m_wait_objects) {
object->Acquire(thread);
}
}
// Invoke the wakeup callback before clearing the wait objects
if (thread->m_wakeup_callback) {
thread->m_wakeup_callback->WakeUp(ThreadWakeupReason::Signal, thread, this);
}
for (auto& object : thread->m_wait_objects) {
object->RemoveWaitingThread(thread);
}
thread->m_wait_objects.clear();
thread->ResumeFromWait();
}
if (hle_notifier) {
hle_notifier();
}
}
const std::vector<Thread*>& KSynchronizationObject::GetWaitingThreads() const {
return waiting_threads;
}
void KSynchronizationObject::SetHLENotifier(std::function<void()> callback) {
hle_notifier = std::move(callback);
}
template <class Archive>
void KSynchronizationObject::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& waiting_threads;
// NB: hle_notifier *not* serialized since it's a callback!
// Fortunately it's only used in one place (DSP) so we can reconstruct it there
}
SERIALIZE_IMPL(KSynchronizationObject)
} // namespace Kernel

View File

@ -1,74 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <functional>
#include <span>
#include <vector>
#include <boost/serialization/access.hpp>
#include "core/hle/kernel/k_auto_object.h"
namespace Kernel {
class Thread;
class KSynchronizationObject : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject);
public:
explicit KSynchronizationObject(KernelSystem& kernel);
~KSynchronizationObject();
/**
* Check if the specified thread should wait until the object is available
* @param thread The thread about which we're deciding.
* @return True if the current thread should wait due to this object being unavailable
*/
virtual bool ShouldWait(const Thread* thread) const = 0;
/// Acquire/lock the object for the specified thread if it is available
virtual void Acquire(Thread* thread) = 0;
/**
* Add a thread to wait on this object
* @param thread Pointer to thread to add
*/
virtual void AddWaitingThread(Thread* thread);
/**
* Removes a thread from waiting on this object (e.g. if it was resumed already)
* @param thread Pointer to thread to remove
*/
virtual void RemoveWaitingThread(Thread* thread);
/**
* Wake up all threads waiting on this object that can be awoken, in priority order,
* and set the synchronization result and output of the thread.
*/
virtual void WakeupAllWaitingThreads();
/// Obtains the highest priority thread that is ready to run from this object's waiting list.
Thread* GetHighestPriorityReadyThread() const;
/// Get a const reference to the waiting threads list for debug use
std::vector<Thread*>& GetWaitingThreads() const;
/// Sets a callback which is called when the object becomes available
void SetHLENotifier(std::function<void()> callback);
private:
/// Threads waiting for this object to become available
std::vector<Thread*> waiting_threads;
/// Function to call when this object becomes available
std::function<void()> hle_notifier;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version);
};
} // namespace Kernel

View File

@ -1,119 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/archives.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/kernel/k_timer.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_resource_limit.h"
SERIALIZE_EXPORT_IMPL(Kernel::KTimer)
namespace Kernel {
KTimer::KTimer(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer(kernel), m_timer_manager(kernel.GetTimerManager()) {}
KTimer::~KTimer() = default;
void KTimer::Initialize(Process* owner, ResetType reset_type) {
// Open a reference to the owner process.
owner->Open();
// Set member variables.
m_owner = owner;
m_reset_type = reset_type;
// Register to TimerManager
m_callback_id = m_timer_manager.GetNextCallbackId();
m_timer_manager.Register(m_callback_id, this);
}
void KTimer::Finalize() {
this->Cancel();
m_timer_manager.Unregister(m_callback_id);
}
void KTimer::PostDestroy(uintptr_t arg) {
// Release the session count resource the owner process holds.
Process* owner = reinterpret_cast<Process*>(arg);
owner->ReleaseResource(ResourceLimitType::Timer, 1);
owner->Close();
}
bool KTimer::ShouldWait(const Thread* thread) const {
return !m_signaled;
}
void KTimer::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
if (m_reset_type == ResetType::OneShot) {
m_signaled = false;
}
}
void KTimer::Set(s64 initial, s64 interval) {
// Ensure we get rid of any previous scheduled event
this->Cancel();
// Set member variables
m_initial_delay = initial;
m_interval_delay = interval;
if (initial == 0) {
// Immediately invoke the callback
this->Signal(0);
} else {
auto& timing = m_kernel.timing;
timing.ScheduleEvent(nsToCycles(initial), m_timer_manager.GetEventType(), m_callback_id);
}
}
void KTimer::Cancel() {
auto& timing = m_kernel.timing;
timing.UnscheduleEvent(m_timer_manager.GetEventType(), m_callback_id);
}
void KTimer::Clear() {
m_signaled = false;
}
void KTimer::WakeupAllWaitingThreads() {
KSynchronizationObject::WakeupAllWaitingThreads();
if (m_reset_type == ResetType::Pulse) {
m_signaled = false;
}
}
void KTimer::Signal(s64 cycles_late) {
LOG_TRACE(Kernel, "Timer {} fired", GetObjectId());
m_signaled = true;
// Resume all waiting threads
this->WakeupAllWaitingThreads();
// Reschedule the timer with the interval delay
if (m_interval_delay != 0) {
auto& timing = m_kernel.timing;
const s64 cycles_into_future = nsToCycles(m_interval_delay) - cycles_late;
timing.ScheduleEvent(cycles_into_future, m_timer_manager.GetEventType(), m_callback_id);
}
}
void TimerManager::TimerCallback(u64 callback_id, s64 cycles_late) {
KTimer* timer = m_timer_callback_table.at(callback_id);
ASSERT_MSG(timer, "Callback fired for invalid timer {:016x}", callback_id);
timer->Signal(cycles_late);
}
TimerManager::TimerManager(Core::Timing& timing) : m_timing(timing) {
m_timer_callback_event_type =
timing.RegisterEvent("TimerCallback", [this](u64 thread_id, s64 cycle_late) {
this->TimerCallback(thread_id, cycle_late);
});
}
} // namespace Kernel

View File

@ -1,132 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/string.hpp>
#include <boost/serialization/unordered_map.hpp>
#include "common/common_types.h"
#include "core/core_timing.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Core {
class Timing;
}
namespace Kernel {
class KTimer;
class TimerManager {
public:
explicit TimerManager(Core::Timing& timing);
~TimerManager();
u64 GetNextCallbackId() {
return +m_next_timer_callback_id;
}
Core::TimingEventType* GetEventType() {
return m_timer_callback_event_type;
}
void Register(u64 callback_id, KTimer* timer) {
m_timer_callback_table[callback_id] = timer;
}
void Unregister(u64 callback_id) {
m_timer_callback_table.erase(callback_id);
}
private:
void TimerCallback(u64 callback_id, s64 cycles_late);
private:
Core::Timing& m_timing;
Core::TimingEventType* m_timer_callback_event_type{};
u64 m_next_timer_callback_id{};
std::unordered_map<u64, KTimer*> m_timer_callback_table;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& m_next_timer_callback_id;
ar& m_timer_callback_table;
}
};
class ResourceLimit;
class KTimer final : public KAutoObjectWithSlabHeapAndContainer<KTimer, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KTimer, KSynchronizationObject);
public:
explicit KTimer(KernelSystem& kernel);
~KTimer() override;
void Initialize(Process* owner, ResetType reset_type);
void Finalize() override;
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
ResetType GetResetType() const {
return m_reset_type;
}
u64 GetInitialDelay() const {
return m_initial_delay;
}
u64 GetIntervalDelay() const {
return m_interval_delay;
}
void Set(s64 initial, s64 interval);
void Signal(s64 cycles_late);
void Cancel();
void Clear();
void WakeupAllWaitingThreads() override;
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
private:
TimerManager& m_timer_manager;
Process* m_owner{};
ResetType m_reset_type{};
u64 m_initial_delay{};
u64 m_interval_delay{};
bool m_signaled{};
u64 m_callback_id{};
friend class KernelSystem;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_owner;
ar& m_reset_type;
ar& m_initial_delay;
ar& m_interval_delay;
ar& m_signaled;
ar& m_callback_id;
}
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KTimer)
CONSTRUCT_KERNEL_OBJECT(Kernel::KTimer)

View File

@ -6,24 +6,18 @@
#include <boost/serialization/unordered_map.hpp> #include <boost/serialization/unordered_map.hpp>
#include <boost/serialization/vector.hpp> #include <boost/serialization/vector.hpp>
#include "common/archives.h" #include "common/archives.h"
#include "common/serialization/atomic.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/config_mem.h" #include "core/hle/kernel/config_mem.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/ipc_debugger/recorder.h" #include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory.h" #include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/shared_page.h" #include "core/hle/kernel/shared_page.h"
#include "core/hle/kernel/thread.h" #include "core/hle/kernel/thread.h"
#include "core/hle/kernel/k_timer.h" #include "core/hle/kernel/timer.h"
#include "core/hle/kernel/k_object_name.h"
#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_mutex.h"
#include "core/hle/kernel/k_semaphore.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_session.h"
namespace Kernel { namespace Kernel {
@ -35,7 +29,6 @@ KernelSystem::KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing,
: memory(memory), timing(timing), : memory(memory), timing(timing),
prepare_reschedule_callback(std::move(prepare_reschedule_callback)), memory_mode(memory_mode), prepare_reschedule_callback(std::move(prepare_reschedule_callback)), memory_mode(memory_mode),
n3ds_hw_caps(n3ds_hw_caps) { n3ds_hw_caps(n3ds_hw_caps) {
slab_heap_container = std::make_unique<SlabHeapContainer>();
std::generate(memory_regions.begin(), memory_regions.end(), std::generate(memory_regions.begin(), memory_regions.end(),
[] { return std::make_shared<MemoryRegionInfo>(); }); [] { return std::make_shared<MemoryRegionInfo>(); });
MemoryInit(memory_mode, n3ds_hw_caps.memory_mode, override_init_time); MemoryInit(memory_mode, n3ds_hw_caps.memory_mode, override_init_time);
@ -68,16 +61,16 @@ u32 KernelSystem::GenerateObjectID() {
return next_object_id++; return next_object_id++;
} }
Process* KernelSystem::GetCurrentProcess() const { std::shared_ptr<Process> KernelSystem::GetCurrentProcess() const {
return current_process; return current_process;
} }
void KernelSystem::SetCurrentProcess(Process* process) { void KernelSystem::SetCurrentProcess(std::shared_ptr<Process> process) {
current_process = process; current_process = process;
SetCurrentMemoryPageTable(process->vm_manager.page_table); SetCurrentMemoryPageTable(process->vm_manager.page_table);
} }
void KernelSystem::SetCurrentProcessForCPU(Process* process, u32 core_id) { void KernelSystem::SetCurrentProcessForCPU(std::shared_ptr<Process> process, u32 core_id) {
if (current_cpu->GetID() == core_id) { if (current_cpu->GetID() == core_id) {
current_process = process; current_process = process;
SetCurrentMemoryPageTable(process->vm_manager.page_table); SetCurrentMemoryPageTable(process->vm_manager.page_table);
@ -156,12 +149,12 @@ const IPCDebugger::Recorder& KernelSystem::GetIPCRecorder() const {
return *ipc_recorder; return *ipc_recorder;
} }
u32 KernelSystem::NewThreadId() { void KernelSystem::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) {
return next_thread_id++; named_ports.emplace(std::move(name), std::move(port));
} }
u32 KernelSystem::NewProcessId() { u32 KernelSystem::NewThreadId() {
return ++next_process_id; return next_thread_id++;
} }
void KernelSystem::ResetThreadIDs() { void KernelSystem::ResetThreadIDs() {
@ -171,6 +164,7 @@ void KernelSystem::ResetThreadIDs() {
template <class Archive> template <class Archive>
void KernelSystem::serialize(Archive& ar, const unsigned int file_version) { void KernelSystem::serialize(Archive& ar, const unsigned int file_version) {
ar& memory_regions; ar& memory_regions;
ar& named_ports;
// current_cpu set externally // current_cpu set externally
// NB: subsystem references and prepare_reschedule_callback are constant // NB: subsystem references and prepare_reschedule_callback are constant
ar&* resource_limits.get(); ar&* resource_limits.get();
@ -201,71 +195,6 @@ void KernelSystem::serialize(Archive& ar, const unsigned int file_version) {
} }
} }
void KernelSystem::RegisterKernelObject(KAutoObject* object) {
registered_objects.insert(object);
}
void KernelSystem::UnregisterKernelObject(KAutoObject* object) {
registered_objects.erase(object);
}
struct KernelSystem::SlabHeapContainer {
KSlabHeap<KEvent> event;
KSlabHeap<KPort> port;
KSlabHeap<Process> process;
KSlabHeap<KResourceLimit> resource_limit;
KSlabHeap<KSession> session;
KSlabHeap<KSharedMemory> shared_memory;
KSlabHeap<Thread> thread;
KSlabHeap<KObjectName> object_name;
KSlabHeap<KAddressArbiter> address_arbiter;
KSlabHeap<KSemaphore> semaphore;
KSlabHeap<KMutex> mutex;
};
template <typename T>
KSlabHeap<T>& KernelSystem::SlabHeap() {
if constexpr (std::is_same_v<T, KEvent>) {
return slab_heap_container->event;
} else if constexpr (std::is_same_v<T, KPort>) {
return slab_heap_container->port;
} else if constexpr (std::is_same_v<T, Process>) {
return slab_heap_container->process;
} else if constexpr (std::is_same_v<T, KResourceLimit>) {
return slab_heap_container->resource_limit;
} else if constexpr (std::is_same_v<T, KSession>) {
return slab_heap_container->session;
} else if constexpr (std::is_same_v<T, KSharedMemory>) {
return slab_heap_container->shared_memory;
} else if constexpr (std::is_same_v<T, Thread>) {
return slab_heap_container->thread;
} else if constexpr (std::is_same_v<T, KAddressArbiter>) {
return slab_heap_container->address_arbiter;
} else if constexpr (std::is_same_v<T, KSemaphore>) {
return slab_heap_container->semaphore;
} else if constexpr (std::is_same_v<T, KMutex>) {
return slab_heap_container->mutex;
} else if constexpr (std::is_same_v<T, KObjectName>) {
return slab_heap_container->object_name;
}
}
KObjectNameGlobalData& KernelSystem::ObjectNameGlobalData() {
return *object_name_global_data;
}
template KSlabHeap<KEvent>& KernelSystem::SlabHeap();
template KSlabHeap<KPort>& KernelSystem::SlabHeap();
template KSlabHeap<Process>& KernelSystem::SlabHeap();
template KSlabHeap<KResourceLimit>& KernelSystem::SlabHeap();
template KSlabHeap<KSession>& KernelSystem::SlabHeap();
template KSlabHeap<KSharedMemory>& KernelSystem::SlabHeap();
template KSlabHeap<Thread>& KernelSystem::SlabHeap();
template KSlabHeap<KObjectName>& KernelSystem::SlabHeap();
template KSlabHeap<KAddressArbiter>& KernelSystem::SlabHeap();
template KSlabHeap<KSemaphore>& KernelSystem::SlabHeap();
template KSlabHeap<KMutex>& KernelSystem::SlabHeap();
SERIALIZE_IMPL(KernelSystem) SERIALIZE_IMPL(KernelSystem)
} // namespace Kernel } // namespace Kernel

View File

@ -9,11 +9,13 @@
#include <functional> #include <functional>
#include <memory> #include <memory>
#include <mutex> #include <mutex>
#include <unordered_set> #include <span>
#include <string>
#include <unordered_map>
#include <vector> #include <vector>
#include "common/bit_field.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/kernel/memory.h" #include "core/hle/kernel/memory.h"
#include "core/hle/result.h"
#include "core/memory.h" #include "core/memory.h"
namespace ConfigMem { namespace ConfigMem {
@ -39,20 +41,26 @@ class Recorder;
namespace Kernel { namespace Kernel {
class AddressArbiter;
class Event;
class Mutex;
class CodeSet; class CodeSet;
class Process; class Process;
class Thread; class Thread;
class Semaphore;
class Timer;
class ClientPort;
class ServerPort;
class ClientSession;
class ServerSession;
class ResourceLimitList; class ResourceLimitList;
class SharedMemory; class SharedMemory;
class ThreadManager; class ThreadManager;
class TimerManager; class TimerManager;
class VMManager; class VMManager;
class KAutoObject;
struct AddressMapping; struct AddressMapping;
class KObjectName;
class KObjectNameGlobalData;
enum class ResetType : u32 { enum class ResetType {
OneShot, OneShot,
Sticky, Sticky,
Pulse, Pulse,
@ -124,10 +132,6 @@ private:
friend class boost::serialization::access; friend class boost::serialization::access;
}; };
template <typename T>
class KSlabHeap;
class KAutoObjectWithListContainer;
class KernelSystem { class KernelSystem {
public: public:
explicit KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing, explicit KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing,
@ -136,45 +140,140 @@ public:
u64 override_init_time = 0); u64 override_init_time = 0);
~KernelSystem(); ~KernelSystem();
using PortPair = std::pair<std::shared_ptr<ServerPort>, std::shared_ptr<ClientPort>>;
using SessionPair = std::pair<std::shared_ptr<ServerSession>, std::shared_ptr<ClientSession>>;
/**
* Creates an address arbiter.
*
* @param name Optional name used for debugging.
* @returns The created AddressArbiter.
*/
std::shared_ptr<AddressArbiter> CreateAddressArbiter(std::string name = "Unknown");
/**
* Creates an event
* @param reset_type ResetType describing how to create event
* @param name Optional name of event
*/
std::shared_ptr<Event> CreateEvent(ResetType reset_type, std::string name = "Unknown");
/**
* Creates a mutex.
* @param initial_locked Specifies if the mutex should be locked initially
* @param name Optional name of mutex
* @return Pointer to new Mutex object
*/
std::shared_ptr<Mutex> CreateMutex(bool initial_locked, std::string name = "Unknown");
std::shared_ptr<CodeSet> CreateCodeSet(std::string name, u64 program_id);
std::shared_ptr<Process> CreateProcess(std::shared_ptr<CodeSet> code_set);
/** /**
* Terminates a process, killing its threads and removing it from the process list. * Terminates a process, killing its threads and removing it from the process list.
* @param process Process to terminate. * @param process Process to terminate.
*/ */
void TerminateProcess(Process* process); void TerminateProcess(std::shared_ptr<Process> process);
/**
* Creates and returns a new thread. The new thread is immediately scheduled
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread
* @return A shared pointer to the newly created thread
*/
ResultVal<std::shared_ptr<Thread>> CreateThread(std::string name, VAddr entry_point,
u32 priority, u32 arg, s32 processor_id,
VAddr stack_top,
std::shared_ptr<Process> owner_process);
/**
* Creates a semaphore.
* @param initial_count Number of slots reserved for other threads
* @param max_count Maximum number of slots the semaphore can have
* @param name Optional name of semaphore
* @return The created semaphore
*/
ResultVal<std::shared_ptr<Semaphore>> CreateSemaphore(s32 initial_count, s32 max_count,
std::string name = "Unknown");
/**
* Creates a timer
* @param reset_type ResetType describing how to create the timer
* @param name Optional name of timer
* @return The created Timer
*/
std::shared_ptr<Timer> CreateTimer(ResetType reset_type, std::string name = "Unknown");
/**
* Creates a pair of ServerPort and an associated ClientPort.
*
* @param max_sessions Maximum number of sessions to the port
* @param name Optional name of the ports
* @return The created port tuple
*/
PortPair CreatePortPair(u32 max_sessions, std::string name = "UnknownPort");
/**
* Creates a pair of ServerSession and an associated ClientSession.
* @param name Optional name of the ports.
* @param client_port Optional The ClientPort that spawned this session.
* @return The created session tuple
*/
SessionPair CreateSessionPair(const std::string& name = "Unknown",
std::shared_ptr<ClientPort> client_port = nullptr);
ResourceLimitList& ResourceLimit(); ResourceLimitList& ResourceLimit();
const ResourceLimitList& ResourceLimit() const; const ResourceLimitList& ResourceLimit() const;
/**
* Creates a shared memory object.
* @param owner_process Process that created this shared memory object.
* @param size Size of the memory block. Must be page-aligned.
* @param permissions Permission restrictions applied to the process which created the block.
* @param other_permissions Permission restrictions applied to other processes mapping the
* block.
* @param address The address from which to map the Shared Memory.
* @param region If the address is 0, the shared memory will be allocated in this region of the
* linear heap.
* @param name Optional object name, used for debugging purposes.
*/
ResultVal<std::shared_ptr<SharedMemory>> CreateSharedMemory(
std::shared_ptr<Process> owner_process, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address = 0,
MemoryRegion region = MemoryRegion::BASE, std::string name = "Unknown");
/**
* Creates a shared memory object from a block of memory managed by an HLE applet.
* @param offset The offset into the heap block that the SharedMemory will map.
* @param size Size of the memory block. Must be page-aligned.
* @param permissions Permission restrictions applied to the process which created the block.
* @param other_permissions Permission restrictions applied to other processes mapping the
* block.
* @param name Optional object name, used for debugging purposes.
*/
std::shared_ptr<SharedMemory> CreateSharedMemoryForApplet(u32 offset, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions,
std::string name = "Unknown Applet");
u32 GenerateObjectID(); u32 GenerateObjectID();
/// Gets the slab heap for the specified kernel object type.
template <typename T>
KSlabHeap<T>& SlabHeap();
template <typename T>
KAutoObjectWithListContainer& ObjectListContainer();
/// Gets global data for KObjectName.
KObjectNameGlobalData& ObjectNameGlobalData();
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.
void RegisterKernelObject(KAutoObject* object);
/// Unregisters a kernel object previously registered with RegisterKernelObject when it was
/// destroyed during the current emulation session.
void UnregisterKernelObject(KAutoObject* object);
/// Retrieves a process from the current list of processes. /// Retrieves a process from the current list of processes.
Process* GetProcessById(u32 process_id) const; std::shared_ptr<Process> GetProcessById(u32 process_id) const;
const std::vector<Process*>& GetProcessList() const { std::span<const std::shared_ptr<Process>> GetProcessList() const {
return process_list; return process_list;
} }
Process* GetCurrentProcess() const; std::shared_ptr<Process> GetCurrentProcess() const;
void SetCurrentProcess(Process* process); void SetCurrentProcess(std::shared_ptr<Process> process);
void SetCurrentProcessForCPU(Process* process, u32 core_id); void SetCurrentProcessForCPU(std::shared_ptr<Process> process, u32 core_id);
void SetCurrentMemoryPageTable(std::shared_ptr<Memory::PageTable> page_table); void SetCurrentMemoryPageTable(std::shared_ptr<Memory::PageTable> page_table);
@ -207,12 +306,14 @@ public:
std::array<std::shared_ptr<MemoryRegionInfo>, 3> memory_regions{}; std::array<std::shared_ptr<MemoryRegionInfo>, 3> memory_regions{};
/// Adds a port to the named port table
void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port);
void PrepareReschedule() { void PrepareReschedule() {
prepare_reschedule_callback(); prepare_reschedule_callback();
} }
u32 NewThreadId(); u32 NewThreadId();
u32 NewProcessId();
void ResetThreadIDs(); void ResetThreadIDs();
@ -228,15 +329,15 @@ public:
return hle_lock; return hle_lock;
} }
/// Map of named ports managed by the kernel, which can be retrieved using the ConnectToPort
std::unordered_map<std::string, std::shared_ptr<ClientPort>> named_ports;
Core::ARM_Interface* current_cpu = nullptr; Core::ARM_Interface* current_cpu = nullptr;
Memory::MemorySystem& memory; Memory::MemorySystem& memory;
Core::Timing& timing; Core::Timing& timing;
// Lists all processes that exist in the current session.
std::vector<Process*> process_list;
private: private:
void MemoryInit(MemoryMode memory_mode, New3dsMemoryMode n3ds_mode, u64 override_init_time); void MemoryInit(MemoryMode memory_mode, New3dsMemoryMode n3ds_mode, u64 override_init_time);
@ -258,8 +359,11 @@ private:
// reserved for low-level services // reserved for low-level services
u32 next_process_id = 10; u32 next_process_id = 10;
Process* current_process; // Lists all processes that exist in the current session.
std::vector<Process*> stored_processes; std::vector<std::shared_ptr<Process>> process_list;
std::shared_ptr<Process> current_process;
std::vector<std::shared_ptr<Process>> stored_processes;
std::vector<std::unique_ptr<ThreadManager>> thread_managers; std::vector<std::unique_ptr<ThreadManager>> thread_managers;
@ -273,14 +377,6 @@ private:
MemoryMode memory_mode; MemoryMode memory_mode;
New3dsHwCapabilities n3ds_hw_caps; New3dsHwCapabilities n3ds_hw_caps;
/// Helper to encapsulate all slab heaps in a single heap allocated container
struct SlabHeapContainer;
std::unique_ptr<SlabHeapContainer> slab_heap_container;
std::unique_ptr<KObjectNameGlobalData> object_name_global_data;
std::unordered_set<KAutoObject*> registered_objects;
/* /*
* Synchronizes access to the internal HLE kernel structures, it is acquired when a guest * Synchronizes access to the internal HLE kernel structures, it is acquired when a guest
* application thread performs a syscall. It should be acquired by any host threads that read or * application thread performs a syscall. It should be acquired by any host threads that read or

View File

@ -0,0 +1,130 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/archives.h"
#include "common/assert.h"
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::Mutex)
namespace Kernel {
void ReleaseThreadMutexes(Thread* thread) {
for (auto& mtx : thread->held_mutexes) {
mtx->lock_count = 0;
mtx->holding_thread = nullptr;
mtx->WakeupAllWaitingThreads();
}
thread->held_mutexes.clear();
}
Mutex::Mutex(KernelSystem& kernel) : WaitObject(kernel), kernel(kernel) {}
Mutex::~Mutex() {
if (resource_limit) {
resource_limit->Release(ResourceLimitType::Mutex, 1);
}
}
std::shared_ptr<Mutex> KernelSystem::CreateMutex(bool initial_locked, std::string name) {
auto mutex = std::make_shared<Mutex>(*this);
mutex->lock_count = 0;
mutex->name = std::move(name);
mutex->holding_thread = nullptr;
// Acquire mutex with current thread if initialized as locked
if (initial_locked) {
mutex->Acquire(GetCurrentThreadManager().GetCurrentThread());
}
return mutex;
}
bool Mutex::ShouldWait(const Thread* thread) const {
return lock_count > 0 && thread != holding_thread.get();
}
void Mutex::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
// Actually "acquire" the mutex only if we don't already have it
if (lock_count == 0) {
priority = thread->current_priority;
thread->held_mutexes.insert(SharedFrom(this));
holding_thread = SharedFrom(thread);
thread->UpdatePriority();
kernel.PrepareReschedule();
}
lock_count++;
}
ResultCode Mutex::Release(Thread* thread) {
// We can only release the mutex if it's held by the calling thread.
if (thread != holding_thread.get()) {
if (holding_thread) {
LOG_ERROR(
Kernel,
"Tried to release a mutex (owned by thread id {}) from a different thread id {}",
holding_thread->thread_id, thread->thread_id);
}
return ResultCode(ErrCodes::WrongLockingThread, ErrorModule::Kernel,
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
}
// Note: It should not be possible for the situation where the mutex has a holding thread with a
// zero lock count to occur. The real kernel still checks for this, so we do too.
if (lock_count <= 0)
return ResultCode(ErrorDescription::InvalidResultValue, ErrorModule::Kernel,
ErrorSummary::InvalidState, ErrorLevel::Permanent);
lock_count--;
// Yield to the next thread only if we've fully released the mutex
if (lock_count == 0) {
holding_thread->held_mutexes.erase(SharedFrom(this));
holding_thread->UpdatePriority();
holding_thread = nullptr;
WakeupAllWaitingThreads();
kernel.PrepareReschedule();
}
return RESULT_SUCCESS;
}
void Mutex::AddWaitingThread(std::shared_ptr<Thread> thread) {
WaitObject::AddWaitingThread(thread);
thread->pending_mutexes.insert(SharedFrom(this));
UpdatePriority();
}
void Mutex::RemoveWaitingThread(Thread* thread) {
WaitObject::RemoveWaitingThread(thread);
thread->pending_mutexes.erase(SharedFrom(this));
UpdatePriority();
}
void Mutex::UpdatePriority() {
if (!holding_thread)
return;
u32 best_priority = ThreadPrioLowest;
for (auto& waiter : GetWaitingThreads()) {
if (waiter->current_priority < best_priority)
best_priority = waiter->current_priority;
}
if (best_priority != priority) {
priority = best_priority;
holding_thread->UpdatePriority();
}
}
} // namespace Kernel

View File

@ -0,0 +1,89 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/export.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/wait_object.h"
#include "core/hle/result.h"
namespace Kernel {
class Thread;
class Mutex final : public WaitObject {
public:
explicit Mutex(KernelSystem& kernel);
~Mutex() override;
std::string GetTypeName() const override {
return "Mutex";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::Mutex;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ResourceLimit> resource_limit;
int lock_count; ///< Number of times the mutex has been acquired
u32 priority; ///< The priority of the mutex, used for priority inheritance.
std::string name; ///< Name of mutex (optional)
std::shared_ptr<Thread> holding_thread; ///< Thread that has acquired the mutex
/**
* Elevate the mutex priority to the best priority
* among the priorities of all its waiting threads.
*/
void UpdatePriority();
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
void AddWaitingThread(std::shared_ptr<Thread> thread) override;
void RemoveWaitingThread(Thread* thread) override;
/**
* Attempts to release the mutex from the specified thread.
* @param thread Thread that wants to release the mutex.
* @returns The result code of the operation.
*/
ResultCode Release(Thread* thread);
private:
KernelSystem& kernel;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<WaitObject>(*this);
ar& lock_count;
ar& priority;
ar& name;
ar& holding_thread;
ar& resource_limit;
}
};
/**
* Releases all the mutexes held by the specified thread
* @param thread Thread that is holding the mutexes
*/
void ReleaseThreadMutexes(Thread* thread);
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::Mutex)
CONSTRUCT_KERNEL_OBJECT(Kernel::Mutex)

View File

@ -9,14 +9,14 @@
#include <boost/serialization/shared_ptr.hpp> #include <boost/serialization/shared_ptr.hpp>
#include "common/archives.h" #include "common/archives.h"
#include "common/assert.h" #include "common/assert.h"
#include "common/common_funcs.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "common/scope_exit.h" #include "common/serialization/boost_vector.hpp"
#include "core/core.h" #include "core/core.h"
#include "core/hle/kernel/errors.h" #include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/memory.h" #include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h" #include "core/hle/kernel/thread.h"
#include "core/hle/kernel/vm_manager.h" #include "core/hle/kernel/vm_manager.h"
#include "core/hle/service/plgldr/plgldr.h" #include "core/hle/service/plgldr/plgldr.h"
@ -53,7 +53,33 @@ void Process::serialize(Archive& ar, const unsigned int file_version) {
SERIALIZE_IMPL(Process) SERIALIZE_IMPL(Process)
void KernelSystem::TerminateProcess(Process* process) { std::shared_ptr<CodeSet> KernelSystem::CreateCodeSet(std::string name, u64 program_id) {
auto codeset{std::make_shared<CodeSet>(*this)};
codeset->name = std::move(name);
codeset->program_id = program_id;
return codeset;
}
CodeSet::CodeSet(KernelSystem& kernel) : Object(kernel) {}
CodeSet::~CodeSet() {}
std::shared_ptr<Process> KernelSystem::CreateProcess(std::shared_ptr<CodeSet> code_set) {
auto process{std::make_shared<Process>(*this)};
process->codeset = std::move(code_set);
process->flags.raw = 0;
process->flags.memory_region.Assign(MemoryRegion::APPLICATION);
process->status = ProcessStatus::Created;
process->process_id = ++next_process_id;
process->creation_time_ticks = timing.GetTicks();
process_list.push_back(process);
return process;
}
void KernelSystem::TerminateProcess(std::shared_ptr<Process> process) {
LOG_INFO(Kernel_SVC, "Process {} exiting", process->process_id); LOG_INFO(Kernel_SVC, "Process {} exiting", process->process_id);
ASSERT_MSG(process->status == ProcessStatus::Running, "Process has already exited"); ASSERT_MSG(process->status == ProcessStatus::Running, "Process has already exited");
@ -138,8 +164,6 @@ void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x{:08X}", descriptor); LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x{:08X}", descriptor);
} }
} }
handle_table.Initialize(handle_table_size);
} }
void Process::Set3dsxKernelCaps() { void Process::Set3dsxKernelCaps() {
@ -161,17 +185,22 @@ void Process::Set3dsxKernelCaps() {
void Process::Run(s32 main_thread_priority, u32 stack_size) { void Process::Run(s32 main_thread_priority, u32 stack_size) {
memory_region = kernel.GetMemoryRegion(flags.memory_region); memory_region = kernel.GetMemoryRegion(flags.memory_region);
// Ensure we can reserve a thread. Real kernel returns 0xC860180C if this fails.
if (!resource_limit->Reserve(ResourceLimitType::Thread, 1)) {
return;
}
auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
MemoryState memory_state) { MemoryState memory_state) {
HeapAllocate(segment.addr, segment.size, permissions, memory_state, true); HeapAllocate(segment.addr, segment.size, permissions, memory_state, true);
kernel.memory.WriteBlock(*this, segment.addr, codeset.memory.data() + segment.offset, kernel.memory.WriteBlock(*this, segment.addr, codeset->memory.data() + segment.offset,
segment.size); segment.size);
}; };
// Map CodeSet segments // Map CodeSet segments
MapSegment(codeset.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code); MapSegment(codeset->CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
MapSegment(codeset.RODataSegment(), VMAPermission::Read, MemoryState::Code); MapSegment(codeset->RODataSegment(), VMAPermission::Read, MemoryState::Code);
MapSegment(codeset.DataSegment(), VMAPermission::ReadWrite, MemoryState::Private); MapSegment(codeset->DataSegment(), VMAPermission::ReadWrite, MemoryState::Private);
// Allocate and map stack // Allocate and map stack
HeapAllocate(Memory::HEAP_VADDR_END - stack_size, stack_size, VMAPermission::ReadWrite, HeapAllocate(Memory::HEAP_VADDR_END - stack_size, stack_size, VMAPermission::ReadWrite,
@ -189,24 +218,9 @@ void Process::Run(s32 main_thread_priority, u32 stack_size) {
} }
status = ProcessStatus::Running; status = ProcessStatus::Running;
vm_manager.LogLayout(Common::Log::Level::Debug); vm_manager.LogLayout(Common::Log::Level::Debug);
Kernel::SetupMainThread(kernel, codeset->entrypoint, main_thread_priority, SharedFrom(this));
// Place a tentative reservation of a thread for this process.
KScopedResourceReservation thread_reservation(this, ResourceLimitType::Thread);
ASSERT(thread_reservation.Succeeded());
// Create a new thread for the process.
Thread* main_thread = Thread::Create(m_kernel);
ASSERT(main_thread != nullptr);
SCOPE_EXIT({ main_thread->Close(); });
// Initialize the thread.
main_thread->Initialize("", codeset.entrypoint, main_thread_priority, 0, ideal_processor,
Memory::HEAP_VADDR_END, this);
// Register the thread, and commit our reservation.
Thread::Register(m_kernel, main_thread);
thread_reservation.Commit();
} }
void Process::Exit() { void Process::Exit() {
@ -381,7 +395,7 @@ ResultCode Process::LinearFree(VAddr target, u32 size) {
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
ResultCode Process::AllocateThreadLocalStorage(VAddr* out_tls_addr) { ResultVal<VAddr> Process::AllocateThreadLocalStorage() {
std::size_t tls_page; std::size_t tls_page;
std::size_t tls_slot; std::size_t tls_slot;
bool needs_allocation = true; bool needs_allocation = true;
@ -448,8 +462,7 @@ ResultCode Process::AllocateThreadLocalStorage(VAddr* out_tls_addr) {
static_cast<VAddr>(tls_slot) * Memory::TLS_ENTRY_SIZE; static_cast<VAddr>(tls_slot) * Memory::TLS_ENTRY_SIZE;
kernel.memory.ZeroBlock(*this, tls_address, Memory::TLS_ENTRY_SIZE); kernel.memory.ZeroBlock(*this, tls_address, Memory::TLS_ENTRY_SIZE);
*out_tls_addr = tls_address; return tls_address;
R_SUCCEED();
} }
ResultCode Process::Map(VAddr target, VAddr source, u32 size, VMAPermission perms, ResultCode Process::Map(VAddr target, VAddr source, u32 size, VMAPermission perms,
@ -549,11 +562,6 @@ ResultCode Process::Unmap(VAddr target, VAddr source, u32 size, VMAPermission pe
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
void Process::ReleaseResource(ResourceLimitType type, s32 amount) {
ASSERT(resource_limit);
resource_limit->Release(type, amount);
}
void Process::FreeAllMemory() { void Process::FreeAllMemory() {
if (memory_region == nullptr || resource_limit == nullptr) { if (memory_region == nullptr || resource_limit == nullptr) {
return; return;
@ -591,35 +599,30 @@ void Process::FreeAllMemory() {
} }
Kernel::Process::Process(KernelSystem& kernel) Kernel::Process::Process(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer(kernel), handle_table(kernel), : Object(kernel), handle_table(kernel), vm_manager(kernel.memory, *this), kernel(kernel) {
vm_manager(kernel.memory, *this), kernel(kernel) {
kernel.memory.RegisterPageTable(vm_manager.page_table); kernel.memory.RegisterPageTable(vm_manager.page_table);
} }
Kernel::Process::~Process() {
LOG_INFO(Kernel, "Cleaning up process {}", process_id);
Kernel::Process::~Process() = default; // Release all objects this process owns first so that their potential destructor can do clean
// up with this process before further destruction.
// TODO(wwylele): explicitly destroy or invalidate objects this process owns (threads, shared
// memory etc.) even if they are still referenced by other processes.
handle_table.Clear();
void Process::Initialize(CodeSet&& code_set) {
codeset = std::move(code_set);
flags.memory_region.Assign(MemoryRegion::APPLICATION);
status = ProcessStatus::Created;
process_id = m_kernel.NewProcessId();
creation_time_ticks = m_kernel.timing.GetTicks();
m_kernel.process_list.push_back(this);
}
void Process::Finalize() {
handle_table.Finalize();
FreeAllMemory(); FreeAllMemory();
kernel.memory.UnregisterPageTable(vm_manager.page_table); kernel.memory.UnregisterPageTable(vm_manager.page_table);
} }
Process* KernelSystem::GetProcessById(u32 process_id) const { std::shared_ptr<Process> KernelSystem::GetProcessById(u32 process_id) const {
auto it = std::ranges::find_if( auto itr = std::find_if(
process_list, [&](const auto process) { return process->process_id == process_id; }); process_list.begin(), process_list.end(),
[&](const std::shared_ptr<Process>& process) { return process->process_id == process_id; });
if (it == process_list.end()) { if (itr == process_list.end())
return nullptr; return nullptr;
}
return *it; return *itr;
} }
} // namespace Kernel } // namespace Kernel

View File

@ -4,9 +4,11 @@
#pragma once #pragma once
#include <array>
#include <bitset> #include <bitset>
#include <cstddef> #include <cstddef>
#include <memory> #include <memory>
#include <string>
#include <vector> #include <vector>
#include <boost/container/static_vector.hpp> #include <boost/container/static_vector.hpp>
#include <boost/serialization/array.hpp> #include <boost/serialization/array.hpp>
@ -15,9 +17,8 @@
#include <boost/serialization/vector.hpp> #include <boost/serialization/vector.hpp>
#include "common/bit_field.h" #include "common/bit_field.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/kernel/k_code_set.h" #include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/object.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/vm_manager.h" #include "core/hle/kernel/vm_manager.h"
namespace Kernel { namespace Kernel {
@ -58,37 +59,121 @@ union ProcessFlags {
BitField<12, 1, u16> loaded_high; ///< Application loaded high (not at 0x00100000). BitField<12, 1, u16> loaded_high; ///< Application loaded high (not at 0x00100000).
}; };
enum class ProcessStatus { enum class ProcessStatus { Created, Running, Exited };
Created,
Running,
Exited,
};
class KResourceLimit; class ResourceLimit;
enum class ResourceLimitType : u32;
struct MemoryRegionInfo; struct MemoryRegionInfo;
class Process final : public KAutoObjectWithSlabHeapAndContainer<Process> { class CodeSet final : public Object {
KERNEL_AUTOOBJECT_TRAITS(Process, KAutoObject); public:
explicit CodeSet(KernelSystem& kernel);
~CodeSet() override;
struct Segment {
std::size_t offset = 0;
VAddr addr = 0;
u32 size = 0;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version) {
ar& offset;
ar& addr;
ar& size;
}
};
std::string GetTypeName() const override {
return "CodeSet";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::CodeSet;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
Segment& CodeSegment() {
return segments[0];
}
const Segment& CodeSegment() const {
return segments[0];
}
Segment& RODataSegment() {
return segments[1];
}
const Segment& RODataSegment() const {
return segments[1];
}
Segment& DataSegment() {
return segments[2];
}
const Segment& DataSegment() const {
return segments[2];
}
std::vector<u8> memory;
std::array<Segment, 3> segments;
VAddr entrypoint;
/// Name of the process
std::string name;
/// Title ID corresponding to the process
u64 program_id;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<Object>(*this);
ar& memory;
ar& segments;
ar& entrypoint;
ar& name;
ar& program_id;
}
};
class Process final : public Object {
public: public:
explicit Process(Kernel::KernelSystem& kernel); explicit Process(Kernel::KernelSystem& kernel);
~Process() override; ~Process() override;
KHandleTable handle_table; std::string GetTypeName() const override {
return "Process";
}
std::string GetName() const override {
return codeset->name;
}
CodeSet codeset; static constexpr HandleType HANDLE_TYPE = HandleType::Process;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
HandleTable handle_table;
std::shared_ptr<CodeSet> codeset;
/// Resource limit descriptor for this process /// Resource limit descriptor for this process
KResourceLimit* resource_limit; std::shared_ptr<ResourceLimit> resource_limit;
/// The process may only call SVCs which have the corresponding bit set. /// The process may only call SVCs which have the corresponding bit set.
std::bitset<0x80> svc_access_mask; std::bitset<0x80> svc_access_mask;
/// Maximum size of the handle table for the process. /// Maximum size of the handle table for the process.
u32 handle_table_size = 0x200; unsigned int handle_table_size = 0x200;
/// Special memory ranges mapped into this processes address space. This is used to give /// Special memory ranges mapped into this processes address space. This is used to give
/// processes access to specific I/O regions and device memory. /// processes access to specific I/O regions and device memory.
boost::container::static_vector<AddressMapping, 8> address_mappings; boost::container::static_vector<AddressMapping, 8> address_mappings;
ProcessFlags flags{}; ProcessFlags flags;
bool no_thread_restrictions = false; bool no_thread_restrictions = false;
/// Kernel compatibility version for this process /// Kernel compatibility version for this process
u16 kernel_version = 0; u16 kernel_version = 0;
@ -103,12 +188,6 @@ public:
// Creation time in ticks of the process. // Creation time in ticks of the process.
u64 creation_time_ticks; u64 creation_time_ticks;
void Initialize(CodeSet&& code_set);
static void PostDestroy(uintptr_t arg) {}
void Finalize() override;
/** /**
* Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them * Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them
* to this process. * to this process.
@ -130,6 +209,9 @@ public:
*/ */
void Exit(); void Exit();
///////////////////////////////////////////////////////////////////////////////////////////////
// Memory Management
VMManager vm_manager; VMManager vm_manager;
u32 memory_used = 0; u32 memory_used = 0;
@ -157,15 +239,13 @@ public:
ResultVal<VAddr> LinearAllocate(VAddr target, u32 size, VMAPermission perms); ResultVal<VAddr> LinearAllocate(VAddr target, u32 size, VMAPermission perms);
ResultCode LinearFree(VAddr target, u32 size); ResultCode LinearFree(VAddr target, u32 size);
ResultCode AllocateThreadLocalStorage(VAddr* out_tls); ResultVal<VAddr> AllocateThreadLocalStorage();
ResultCode Map(VAddr target, VAddr source, u32 size, VMAPermission perms, ResultCode Map(VAddr target, VAddr source, u32 size, VMAPermission perms,
bool privileged = false); bool privileged = false);
ResultCode Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms, ResultCode Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms,
bool privileged = false); bool privileged = false);
void ReleaseResource(ResourceLimitType type, s32 amount);
private: private:
void FreeAllMemory(); void FreeAllMemory();
@ -178,5 +258,7 @@ private:
} // namespace Kernel } // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::CodeSet)
BOOST_CLASS_EXPORT_KEY(Kernel::Process) BOOST_CLASS_EXPORT_KEY(Kernel::Process)
CONSTRUCT_KERNEL_OBJECT(Kernel::CodeSet)
CONSTRUCT_KERNEL_OBJECT(Kernel::Process) CONSTRUCT_KERNEL_OBJECT(Kernel::Process)

View File

@ -1,37 +1,42 @@
// Copyright 2023 Citra Emulator Project // Copyright 2015 Citra Emulator Project
// Licensed under GPLv2 or any later version // Licensed under GPLv2 or any later version
// Refer to the license.txt file included. // Refer to the license.txt file included.
#include "common/archives.h" #include "common/archives.h"
#include "common/assert.h" #include "common/assert.h"
#include "common/settings.h" #include "common/settings.h"
#include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/resource_limit.h"
SERIALIZE_EXPORT_IMPL(Kernel::KResourceLimit) SERIALIZE_EXPORT_IMPL(Kernel::ResourceLimit)
namespace Kernel { namespace Kernel {
KResourceLimit::KResourceLimit(KernelSystem& kernel) ResourceLimit::ResourceLimit(KernelSystem& kernel) : Object(kernel) {}
: KAutoObjectWithSlabHeapAndContainer(kernel) {}
KResourceLimit::~KResourceLimit() = default; ResourceLimit::~ResourceLimit() = default;
s32 KResourceLimit::GetCurrentValue(ResourceLimitType type) const { std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelSystem& kernel, std::string name) {
auto resource_limit = std::make_shared<ResourceLimit>(kernel);
resource_limit->m_name = std::move(name);
return resource_limit;
}
s32 ResourceLimit::GetCurrentValue(ResourceLimitType type) const {
const auto index = static_cast<size_t>(type); const auto index = static_cast<size_t>(type);
return m_current_values[index]; return m_current_values[index];
} }
s32 KResourceLimit::GetLimitValue(ResourceLimitType type) const { s32 ResourceLimit::GetLimitValue(ResourceLimitType type) const {
const auto index = static_cast<size_t>(type); const auto index = static_cast<size_t>(type);
return m_limit_values[index]; return m_limit_values[index];
} }
void KResourceLimit::SetLimitValue(ResourceLimitType type, s32 value) { void ResourceLimit::SetLimitValue(ResourceLimitType type, s32 value) {
const auto index = static_cast<size_t>(type); const auto index = static_cast<size_t>(type);
m_limit_values[index] = value; m_limit_values[index] = value;
} }
bool KResourceLimit::Reserve(ResourceLimitType type, s32 amount) { bool ResourceLimit::Reserve(ResourceLimitType type, s32 amount) {
const auto index = static_cast<size_t>(type); const auto index = static_cast<size_t>(type);
const s32 limit = m_limit_values[index]; const s32 limit = m_limit_values[index];
const s32 new_value = m_current_values[index] + amount; const s32 new_value = m_current_values[index] + amount;
@ -44,7 +49,7 @@ bool KResourceLimit::Reserve(ResourceLimitType type, s32 amount) {
return true; return true;
} }
bool KResourceLimit::Release(ResourceLimitType type, s32 amount) { bool ResourceLimit::Release(ResourceLimitType type, s32 amount) {
const auto index = static_cast<size_t>(type); const auto index = static_cast<size_t>(type);
const s32 value = m_current_values[index]; const s32 value = m_current_values[index];
if (amount > value) { if (amount > value) {
@ -62,15 +67,8 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
const bool is_new_3ds = Settings::values.is_new_3ds.GetValue(); const bool is_new_3ds = Settings::values.is_new_3ds.GetValue();
const auto& appmemalloc = kernel.GetMemoryRegion(MemoryRegion::APPLICATION); const auto& appmemalloc = kernel.GetMemoryRegion(MemoryRegion::APPLICATION);
const auto CreateLimit = [&](std::string name) {
KResourceLimit* limit = KResourceLimit::Create(kernel);
limit->Initialize(name);
KResourceLimit::Register(kernel, limit);
return limit;
};
// Create the Application resource limit // Create the Application resource limit
auto resource_limit = CreateLimit("Applications"); auto resource_limit = ResourceLimit::Create(kernel, "Applications");
resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x18); resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x18);
resource_limit->SetLimitValue(ResourceLimitType::Commit, appmemalloc->size); resource_limit->SetLimitValue(ResourceLimitType::Commit, appmemalloc->size);
resource_limit->SetLimitValue(ResourceLimitType::Thread, 0x20); resource_limit->SetLimitValue(ResourceLimitType::Thread, 0x20);
@ -84,7 +82,7 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
resource_limits[static_cast<u8>(ResourceLimitCategory::Application)] = resource_limit; resource_limits[static_cast<u8>(ResourceLimitCategory::Application)] = resource_limit;
// Create the SysApplet resource limit // Create the SysApplet resource limit
resource_limit = CreateLimit("System Applets"); resource_limit = ResourceLimit::Create(kernel, "System Applets");
resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x4); resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x4);
resource_limit->SetLimitValue(ResourceLimitType::Commit, is_new_3ds ? 0x5E06000 : 0x2606000); resource_limit->SetLimitValue(ResourceLimitType::Commit, is_new_3ds ? 0x5E06000 : 0x2606000);
resource_limit->SetLimitValue(ResourceLimitType::Thread, is_new_3ds ? 0x1D : 0xE); resource_limit->SetLimitValue(ResourceLimitType::Thread, is_new_3ds ? 0x1D : 0xE);
@ -98,7 +96,7 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
resource_limits[static_cast<u8>(ResourceLimitCategory::SysApplet)] = resource_limit; resource_limits[static_cast<u8>(ResourceLimitCategory::SysApplet)] = resource_limit;
// Create the LibApplet resource limit // Create the LibApplet resource limit
resource_limit = CreateLimit("Library Applets"); resource_limit = ResourceLimit::Create(kernel, "Library Applets");
resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x4); resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x4);
resource_limit->SetLimitValue(ResourceLimitType::Commit, 0x602000); resource_limit->SetLimitValue(ResourceLimitType::Commit, 0x602000);
resource_limit->SetLimitValue(ResourceLimitType::Thread, 0xE); resource_limit->SetLimitValue(ResourceLimitType::Thread, 0xE);
@ -112,7 +110,7 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
resource_limits[static_cast<u8>(ResourceLimitCategory::LibApplet)] = resource_limit; resource_limits[static_cast<u8>(ResourceLimitCategory::LibApplet)] = resource_limit;
// Create the Other resource limit // Create the Other resource limit
resource_limit = CreateLimit("Others"); resource_limit = ResourceLimit::Create(kernel, "Others");
resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x4); resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x4);
resource_limit->SetLimitValue(ResourceLimitType::Commit, is_new_3ds ? 0x2182000 : 0x1682000); resource_limit->SetLimitValue(ResourceLimitType::Commit, is_new_3ds ? 0x2182000 : 0x1682000);
resource_limit->SetLimitValue(ResourceLimitType::Thread, is_new_3ds ? 0xE1 : 0xCA); resource_limit->SetLimitValue(ResourceLimitType::Thread, is_new_3ds ? 0xE1 : 0xCA);
@ -128,7 +126,7 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
ResourceLimitList::~ResourceLimitList() = default; ResourceLimitList::~ResourceLimitList() = default;
KResourceLimit* ResourceLimitList::GetForCategory(ResourceLimitCategory category) { std::shared_ptr<ResourceLimit> ResourceLimitList::GetForCategory(ResourceLimitCategory category) {
switch (category) { switch (category) {
case ResourceLimitCategory::Application: case ResourceLimitCategory::Application:
case ResourceLimitCategory::SysApplet: case ResourceLimitCategory::SysApplet:

View File

@ -1,15 +1,17 @@
// Copyright 2023 Citra Emulator Project // Copyright 2015 Citra Emulator Project
// Licensed under GPLv2 or any later version // Licensed under GPLv2 or any later version
// Refer to the license.txt file included. // Refer to the license.txt file included.
#pragma once #pragma once
#include <array> #include <array>
#include <memory>
#include <boost/serialization/array.hpp> #include <boost/serialization/array.hpp>
#include <boost/serialization/base_object.hpp> #include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/common_types.h" #include "common/common_types.h"
#include "core/global.h" #include "core/hle/kernel/object.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel { namespace Kernel {
@ -34,14 +36,28 @@ enum class ResourceLimitType : u32 {
Max = 10, Max = 10,
}; };
class KResourceLimit final : public KAutoObjectWithSlabHeapAndContainer<KResourceLimit> { class ResourceLimit final : public Object {
KERNEL_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject);
public: public:
explicit KResourceLimit(KernelSystem& kernel); explicit ResourceLimit(KernelSystem& kernel);
~KResourceLimit() override; ~ResourceLimit() override;
void Initialize(std::string name); /**
* Creates a resource limit object.
*/
static std::shared_ptr<ResourceLimit> Create(KernelSystem& kernel,
std::string name = "Unknown");
std::string GetTypeName() const override {
return "ResourceLimit";
}
std::string GetName() const override {
return m_name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
s32 GetCurrentValue(ResourceLimitType type) const; s32 GetCurrentValue(ResourceLimitType type) const;
s32 GetLimitValue(ResourceLimitType type) const; s32 GetLimitValue(ResourceLimitType type) const;
@ -55,16 +71,16 @@ private:
using ResourceArray = std::array<s32, static_cast<size_t>(ResourceLimitType::Max)>; using ResourceArray = std::array<s32, static_cast<size_t>(ResourceLimitType::Max)>;
ResourceArray m_limit_values{}; ResourceArray m_limit_values{};
ResourceArray m_current_values{}; ResourceArray m_current_values{};
std::string m_name{}; std::string m_name;
private: private:
friend class boost::serialization::access; friend class boost::serialization::access;
template <class Archive> template <class Archive>
void serialize(Archive& ar, const unsigned int file_version) { void serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this); ar& boost::serialization::base_object<Object>(*this);
ar& m_name;
ar& m_limit_values; ar& m_limit_values;
ar& m_current_values; ar& m_current_values;
ar& m_name;
} }
}; };
@ -78,10 +94,10 @@ public:
* @param category The resource limit category * @param category The resource limit category
* @returns The resource limit associated with the category * @returns The resource limit associated with the category
*/ */
KResourceLimit* GetForCategory(ResourceLimitCategory category); std::shared_ptr<ResourceLimit> GetForCategory(ResourceLimitCategory category);
private: private:
std::array<KResourceLimit*, 4> resource_limits; std::array<std::shared_ptr<ResourceLimit>, 4> resource_limits;
friend class boost::serialization::access; friend class boost::serialization::access;
template <class Archive> template <class Archive>
@ -92,5 +108,5 @@ private:
} // namespace Kernel } // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KResourceLimit) BOOST_CLASS_EXPORT_KEY(Kernel::ResourceLimit)
CONSTRUCT_KERNEL_OBJECT(Kernel::KResourceLimit) CONSTRUCT_KERNEL_OBJECT(Kernel::ResourceLimit)

View File

@ -0,0 +1,64 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/archives.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/semaphore.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::Semaphore)
namespace Kernel {
Semaphore::Semaphore(KernelSystem& kernel) : WaitObject(kernel) {}
Semaphore::~Semaphore() {
if (resource_limit) {
resource_limit->Release(ResourceLimitType::Semaphore, 1);
}
}
ResultVal<std::shared_ptr<Semaphore>> KernelSystem::CreateSemaphore(s32 initial_count,
s32 max_count,
std::string name) {
if (initial_count > max_count) {
return ERR_INVALID_COMBINATION_KERNEL;
}
// When the semaphore is created, some slots are reserved for other threads,
// and the rest is reserved for the caller thread
auto semaphore = std::make_shared<Semaphore>(*this);
semaphore->max_count = max_count;
semaphore->available_count = initial_count;
semaphore->name = std::move(name);
return semaphore;
}
bool Semaphore::ShouldWait(const Thread* thread) const {
return available_count <= 0;
}
void Semaphore::Acquire(Thread* thread) {
if (available_count <= 0)
return;
--available_count;
}
ResultVal<s32> Semaphore::Release(s32 release_count) {
if (max_count - available_count < release_count)
return ERR_OUT_OF_RANGE_KERNEL;
s32 previous_count = available_count;
available_count += release_count;
WakeupAllWaitingThreads();
return previous_count;
}
} // namespace Kernel

View File

@ -0,0 +1,67 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/export.hpp>
#include <boost/serialization/string.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/wait_object.h"
#include "core/hle/result.h"
namespace Kernel {
class ResourceLimit;
class Semaphore final : public WaitObject {
public:
explicit Semaphore(KernelSystem& kernel);
~Semaphore() override;
std::string GetTypeName() const override {
return "Semaphore";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::Semaphore;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ResourceLimit> resource_limit;
s32 max_count; ///< Maximum number of simultaneous holders the semaphore can have
s32 available_count; ///< Number of free slots left in the semaphore
std::string name; ///< Name of semaphore (optional)
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
/**
* Releases a certain number of slots from a semaphore.
* @param release_count The number of slots to release
* @return The number of free slots the semaphore had before this call
*/
ResultVal<s32> Release(s32 release_count);
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<WaitObject>(*this);
ar& max_count;
ar& available_count;
ar& name;
ar& resource_limit;
}
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::Semaphore)
CONSTRUCT_KERNEL_OBJECT(Kernel::Semaphore)

View File

@ -0,0 +1,68 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <tuple>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::ServerPort)
namespace Kernel {
ServerPort::ServerPort(KernelSystem& kernel) : WaitObject(kernel) {}
ServerPort::~ServerPort() {}
ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
if (pending_sessions.empty()) {
return ERR_NO_PENDING_SESSIONS;
}
auto session = std::move(pending_sessions.back());
pending_sessions.pop_back();
return session;
}
bool ServerPort::ShouldWait(const Thread* thread) const {
// If there are no pending sessions, we wait until a new one is added.
return pending_sessions.size() == 0;
}
void ServerPort::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
}
KernelSystem::PortPair KernelSystem::CreatePortPair(u32 max_sessions, std::string name) {
auto server_port{std::make_shared<ServerPort>(*this)};
auto client_port{std::make_shared<ClientPort>(*this)};
server_port->name = name + "_Server";
client_port->name = name + "_Client";
client_port->server_port = server_port;
client_port->max_sessions = max_sessions;
client_port->active_sessions = 0;
return std::make_pair(std::move(server_port), std::move(client_port));
}
template <class Archive>
void ServerPort::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<WaitObject>(*this);
ar& name;
ar& pending_sessions;
ar& hle_handler;
}
SERIALIZE_IMPL(ServerPort)
} // namespace Kernel

View File

@ -0,0 +1,75 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <tuple>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/wait_object.h"
#include "core/hle/result.h"
namespace Kernel {
class ClientPort;
class ServerSession;
class SessionRequestHandler;
class ServerPort final : public WaitObject {
public:
explicit ServerPort(KernelSystem& kernel);
~ServerPort() override;
std::string GetTypeName() const override {
return "ServerPort";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ServerPort;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/**
* Accepts a pending incoming connection on this port. If there are no pending sessions, will
* return ERR_NO_PENDING_SESSIONS.
*/
ResultVal<std::shared_ptr<ServerSession>> Accept();
/**
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
* will inherit a reference to this handler.
*/
void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) {
hle_handler = std::move(hle_handler_);
}
std::string name; ///< Name of port (optional)
/// ServerSessions waiting to be accepted by the port
std::vector<std::shared_ptr<ServerSession>> pending_sessions;
/// This session's HLE request handler template (optional)
/// ServerSessions created from this port inherit a reference to this handler.
std::shared_ptr<SessionRequestHandler> hle_handler;
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::ServerPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::ServerPort)

View File

@ -0,0 +1,159 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <tuple>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::ServerSession)
namespace Kernel {
template <class Archive>
void ServerSession::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<WaitObject>(*this);
ar& name;
ar& parent;
ar& hle_handler;
ar& pending_requesting_threads;
ar& currently_handling;
ar& mapped_buffer_context;
}
SERIALIZE_IMPL(ServerSession)
ServerSession::ServerSession(KernelSystem& kernel) : WaitObject(kernel), kernel(kernel) {}
ServerSession::~ServerSession() {
// This destructor will be called automatically when the last ServerSession handle is closed by
// the emulated application.
// Decrease the port's connection count.
if (parent->port)
parent->port->ConnectionClosed();
// TODO(Subv): Wake up all the ClientSession's waiting threads and set
// the SendSyncRequest result to 0xC920181A.
parent->server = nullptr;
}
ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelSystem& kernel,
std::string name) {
auto server_session{std::make_shared<ServerSession>(kernel)};
server_session->name = std::move(name);
server_session->parent = nullptr;
return server_session;
}
bool ServerSession::ShouldWait(const Thread* thread) const {
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
if (parent->client == nullptr)
return false;
// Wait if we have no pending requests, or if we're currently handling a request.
return pending_requesting_threads.empty() || currently_handling != nullptr;
}
void ServerSession::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
// If the client endpoint was closed, don't do anything. This ServerSession is now useless and
// will linger until its last handle is closed by the running application.
if (parent->client == nullptr)
return;
// We are now handling a request, pop it from the stack.
ASSERT(!pending_requesting_threads.empty());
currently_handling = pending_requesting_threads.back();
pending_requesting_threads.pop_back();
}
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread) {
// The ServerSession received a sync request, this means that there's new data available
// from its ClientSession, so wake up any threads that may be waiting on a svcReplyAndReceive or
// similar.
// If this ServerSession has an associated HLE handler, forward the request to it.
if (hle_handler != nullptr) {
std::array<u32_le, IPC::COMMAND_BUFFER_LENGTH + 2 * IPC::MAX_STATIC_BUFFERS> cmd_buf;
auto current_process = thread->owner_process.lock();
ASSERT(current_process);
kernel.memory.ReadBlock(*current_process, thread->GetCommandBufferAddress(), cmd_buf.data(),
cmd_buf.size() * sizeof(u32));
auto context =
std::make_shared<Kernel::HLERequestContext>(kernel, SharedFrom(this), thread);
context->PopulateFromIncomingCommandBuffer(cmd_buf.data(), current_process);
hle_handler->HandleSyncRequest(*context);
ASSERT(thread->status == Kernel::ThreadStatus::Running ||
thread->status == Kernel::ThreadStatus::WaitHleEvent);
// Only write the response immediately if the thread is still running. If the HLE handler
// put the thread to sleep then the writing of the command buffer will be deferred to the
// wakeup callback.
if (thread->status == Kernel::ThreadStatus::Running) {
context->WriteToOutgoingCommandBuffer(cmd_buf.data(), *current_process);
kernel.memory.WriteBlock(*current_process, thread->GetCommandBufferAddress(),
cmd_buf.data(), cmd_buf.size() * sizeof(u32));
}
}
if (thread->status == ThreadStatus::Running) {
// Put the thread to sleep until the server replies, it will be awoken in
// svcReplyAndReceive for LLE servers.
thread->status = ThreadStatus::WaitIPC;
if (hle_handler != nullptr) {
// For HLE services, we put the request threads to sleep for a short duration to
// simulate IPC overhead, but only if the HLE handler didn't put the thread to sleep for
// other reasons like an async callback. The IPC overhead is needed to prevent
// starvation when a thread only does sync requests to HLE services while a
// lower-priority thread is waiting to run.
// This delay was approximated in a homebrew application by measuring the average time
// it takes for svcSendSyncRequest to return when performing the SetLcdForceBlack IPC
// request to the GSP:GPU service in a n3DS with firmware 11.6. The measured values have
// a high variance and vary between models.
static constexpr u64 IPCDelayNanoseconds = 39000;
thread->WakeAfterDelay(IPCDelayNanoseconds);
} else {
// Add the thread to the list of threads that have issued a sync request with this
// server.
pending_requesting_threads.push_back(std::move(thread));
}
}
// If this ServerSession does not have an HLE implementation, just wake up the threads waiting
// on it.
WakeupAllWaitingThreads();
return RESULT_SUCCESS;
}
KernelSystem::SessionPair KernelSystem::CreateSessionPair(const std::string& name,
std::shared_ptr<ClientPort> port) {
auto server_session = ServerSession::Create(*this, name + "_Server").Unwrap();
auto client_session{std::make_shared<ClientSession>(*this)};
client_session->name = name + "_Client";
std::shared_ptr<Session> parent(new Session);
parent->client = client_session.get();
parent->server = server_session.get();
parent->port = port;
client_session->parent = parent;
server_session->parent = parent;
return std::make_pair(std::move(server_session), std::move(client_session));
}
} // namespace Kernel

View File

@ -0,0 +1,117 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/assert.h"
#include "common/common_types.h"
#include "core/hle/kernel/ipc.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/wait_object.h"
#include "core/hle/result.h"
#include "core/memory.h"
namespace Kernel {
class ClientSession;
class ClientPort;
class ServerSession;
class Session;
class SessionRequestHandler;
class Thread;
/**
* Kernel object representing the server endpoint of an IPC session. Sessions are the basic CTR-OS
* primitive for communication between different processes, and are used to implement service calls
* to the various system services.
*
* To make a service call, the client must write the command header and parameters to the buffer
* located at offset 0x80 of the TLS (Thread-Local Storage) area, then execute a SendSyncRequest
* SVC call with its ClientSession handle. The kernel will read the command header, using it to
* marshall the parameters to the process at the server endpoint of the session.
* After the server replies to the request, the response is marshalled back to the caller's
* TLS buffer and control is transferred back to it.
*/
class ServerSession final : public WaitObject {
public:
~ServerSession() override;
explicit ServerSession(KernelSystem& kernel);
std::string GetName() const override {
return name;
}
std::string GetTypeName() const override {
return "ServerSession";
}
static constexpr HandleType HANDLE_TYPE = HandleType::ServerSession;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/**
* Sets the HLE handler for the session. This handler will be called to service IPC requests
* instead of the regular IPC machinery. (The regular IPC machinery is currently not
* implemented.)
*/
void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) {
hle_handler = std::move(hle_handler_);
}
/**
* Handle a sync request from the emulated application.
* @param thread Thread that initiated the request.
* @returns ResultCode from the operation.
*/
ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread);
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
std::string name; ///< The name of this session (optional)
std::shared_ptr<Session> parent; ///< The parent session, which links to the client endpoint.
std::shared_ptr<SessionRequestHandler>
hle_handler; ///< This session's HLE request handler (optional)
/// List of threads that are pending a response after a sync request. This list is processed in
/// a LIFO manner, thus, the last request will be dispatched first.
/// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
std::vector<std::shared_ptr<Thread>> pending_requesting_threads;
/// Thread whose request is currently being handled. A request is considered "handled" when a
/// response is sent via svcReplyAndReceive.
/// TODO(Subv): Find a better name for this.
std::shared_ptr<Thread> currently_handling;
/// A temporary list holding mapped buffer info from IPC request, used for during IPC reply
std::vector<MappedBufferContext> mapped_buffer_context;
private:
/**
* Creates a server session. The server session can have an optional HLE handler,
* which will be invoked to handle the IPC requests that this session receives.
* @param kernel The kernel instance to create the server session on
* @param name Optional name of the server session.
* @return The created server session
*/
static ResultVal<std::shared_ptr<ServerSession>> Create(KernelSystem& kernel,
std::string name = "Unknown");
friend class KernelSystem;
KernelSystem& kernel;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::ServerSession)
CONSTRUCT_KERNEL_OBJECT(Kernel::ServerSession)

Some files were not shown because too many files have changed in this diff Show More