kernel: Migrate to KAutoObject

This commit is contained in:
GPUCode 2023-12-05 22:25:50 +02:00
parent 8e2415f455
commit d02cb52b76
189 changed files with 7997 additions and 5297 deletions

View File

@ -6,13 +6,13 @@
#include "citra_qt/debugger/wait_tree.h"
#include "citra_qt/uisettings.h"
#include "common/assert.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/semaphore.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/timer.h"
#include "core/hle/kernel/wait_object.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_mutex.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_semaphore.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_timer.h"
namespace {
@ -98,7 +98,7 @@ QString WaitTreeText::GetText() const {
return text;
}
WaitTreeWaitObject::WaitTreeWaitObject(const Kernel::WaitObject& o) : object(o) {}
WaitTreeWaitObject::WaitTreeWaitObject(const Kernel::KSynchronizationObject& o) : object(o) {}
bool WaitTreeExpandableItem::IsExpandable() const {
return true;
@ -106,23 +106,24 @@ bool WaitTreeExpandableItem::IsExpandable() const {
QString WaitTreeWaitObject::GetText() const {
return tr("[%1]%2 %3")
.arg(object.GetObjectId())
.arg(/*object.GetObjectId()*/ 0)
.arg(QString::fromStdString(object.GetTypeName()),
QString::fromStdString(object.GetName()));
QString::fromStdString(/*object.GetName()*/ "name"));
}
std::unique_ptr<WaitTreeWaitObject> WaitTreeWaitObject::make(const Kernel::WaitObject& object) {
switch (object.GetHandleType()) {
case Kernel::HandleType::Event:
return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::Event&>(object));
case Kernel::HandleType::Mutex:
return std::make_unique<WaitTreeMutex>(static_cast<const Kernel::Mutex&>(object));
case Kernel::HandleType::Semaphore:
return std::make_unique<WaitTreeSemaphore>(static_cast<const Kernel::Semaphore&>(object));
case Kernel::HandleType::Timer:
return std::make_unique<WaitTreeTimer>(static_cast<const Kernel::Timer&>(object));
case Kernel::HandleType::Thread:
return std::make_unique<WaitTreeThread>(static_cast<const Kernel::Thread&>(object));
std::unique_ptr<WaitTreeWaitObject> WaitTreeWaitObject::make(
const Kernel::KSynchronizationObject& object) {
switch (object.GetTypeObj().GetClassToken()) {
case Kernel::ClassTokenType::KEvent:
return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::KEvent&>(object));
case Kernel::ClassTokenType::KMutex:
return std::make_unique<WaitTreeMutex>(static_cast<const Kernel::KMutex&>(object));
case Kernel::ClassTokenType::KSemaphore:
return std::make_unique<WaitTreeSemaphore>(static_cast<const Kernel::KSemaphore&>(object));
case Kernel::ClassTokenType::KTimer:
return std::make_unique<WaitTreeTimer>(static_cast<const Kernel::KTimer&>(object));
case Kernel::ClassTokenType::KThread:
return std::make_unique<WaitTreeThread>(static_cast<const Kernel::KThread&>(object));
default:
return std::make_unique<WaitTreeWaitObject>(object);
}
@ -153,7 +154,7 @@ QString WaitTreeWaitObject::GetResetTypeQString(Kernel::ResetType reset_type) {
return {};
}
WaitTreeObjectList::WaitTreeObjectList(const std::vector<std::shared_ptr<Kernel::WaitObject>>& list,
WaitTreeObjectList::WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list,
bool w_all)
: object_list(list), wait_all(w_all) {}
@ -170,12 +171,12 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeObjectList::GetChildren() con
return list;
}
WaitTreeThread::WaitTreeThread(const Kernel::Thread& thread) : WaitTreeWaitObject(thread) {}
WaitTreeThread::WaitTreeThread(const Kernel::KThread& thread) : WaitTreeWaitObject(thread) {}
QString WaitTreeThread::GetText() const {
const auto& thread = static_cast<const Kernel::Thread&>(object);
const auto& thread = static_cast<const Kernel::KThread&>(object);
QString status;
switch (thread.status) {
switch (thread.GetStatus()) {
case Kernel::ThreadStatus::Running:
status = tr("running");
break;
@ -183,7 +184,7 @@ QString WaitTreeThread::GetText() const {
status = tr("ready");
break;
case Kernel::ThreadStatus::WaitArb:
status = tr("waiting for address 0x%1").arg(thread.wait_address, 8, 16, QLatin1Char('0'));
status = tr("waiting for address 0x%1").arg(thread.m_wait_address, 8, 16, QLatin1Char('0'));
break;
case Kernel::ThreadStatus::WaitSleep:
status = tr("sleeping");
@ -205,17 +206,18 @@ QString WaitTreeThread::GetText() const {
status = tr("dead");
break;
}
const auto& context = thread.GetContext();
QString pc_info = tr(" PC = 0x%1 LR = 0x%2")
.arg(thread.context.GetProgramCounter(), 8, 16, QLatin1Char('0'))
.arg(thread.context.GetLinkRegister(), 8, 16, QLatin1Char('0'));
.arg(context.GetProgramCounter(), 8, 16, QLatin1Char('0'))
.arg(context.GetLinkRegister(), 8, 16, QLatin1Char('0'));
return QStringLiteral("%1%2 (%3) ").arg(WaitTreeWaitObject::GetText(), pc_info, status);
}
QColor WaitTreeThread::GetColor() const {
const std::size_t color_index = IsDarkTheme() ? 1 : 0;
const auto& thread = static_cast<const Kernel::Thread&>(object);
switch (thread.status) {
const auto& thread = static_cast<const Kernel::KThread&>(object);
switch (thread.GetStatus()) {
case Kernel::ThreadStatus::Running:
return QColor(WaitTreeColors[0][color_index]);
case Kernel::ThreadStatus::Ready:
@ -242,10 +244,10 @@ QColor WaitTreeThread::GetColor() const {
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& thread = static_cast<const Kernel::Thread&>(object);
const auto& thread = static_cast<const Kernel::KThread&>(object);
QString processor;
switch (thread.processor_id) {
switch (thread.m_processor_id) {
case Kernel::ThreadProcessorId::ThreadProcessorIdDefault:
processor = tr("default");
break;
@ -259,86 +261,88 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
processor = tr("SysCore");
break;
default:
processor = tr("Unknown processor %1").arg(thread.processor_id);
processor = tr("Unknown processor %1").arg(thread.m_processor_id);
break;
}
list.push_back(std::make_unique<WaitTreeText>(tr("object id = %1").arg(thread.GetObjectId())));
list.push_back(
std::make_unique<WaitTreeText>(tr("object id = %1").arg(/*thread.GetObjectId()*/ 1)));
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId())));
if (auto process = thread.owner_process.lock()) {
list.push_back(
std::make_unique<WaitTreeText>(tr("process = %1 (%2)")
.arg(QString::fromStdString(process->GetName()))
.arg(process->process_id)));
if (auto process = thread.GetOwner()) {
list.push_back(std::make_unique<WaitTreeText>(
tr("process = %1 (%2)")
.arg(QString::fromStdString(/*process->GetName()*/ ""))
.arg(process->process_id)));
}
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
.arg(thread.current_priority)
.arg(thread.nominal_priority)));
.arg(thread.GetCurrentPriority())
.arg(thread.m_nominal_priority)));
list.push_back(std::make_unique<WaitTreeText>(
tr("last running ticks = %1").arg(thread.last_running_ticks)));
tr("last running ticks = %1").arg(thread.m_last_running_ticks)));
if (thread.held_mutexes.empty()) {
if (thread.m_held_mutexes.empty()) {
list.push_back(std::make_unique<WaitTreeText>(tr("not holding mutex")));
} else {
list.push_back(std::make_unique<WaitTreeMutexList>(thread.held_mutexes));
list.push_back(std::make_unique<WaitTreeMutexList>(thread.m_held_mutexes));
}
if (thread.status == Kernel::ThreadStatus::WaitSynchAny ||
thread.status == Kernel::ThreadStatus::WaitSynchAll ||
thread.status == Kernel::ThreadStatus::WaitHleEvent) {
list.push_back(std::make_unique<WaitTreeObjectList>(thread.wait_objects,
if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynchAny ||
thread.GetStatus() == Kernel::ThreadStatus::WaitSynchAll ||
thread.GetStatus() == Kernel::ThreadStatus::WaitHleEvent) {
list.push_back(std::make_unique<WaitTreeObjectList>(thread.m_wait_objects,
thread.IsSleepingOnWaitAll()));
}
return list;
}
WaitTreeEvent::WaitTreeEvent(const Kernel::Event& object) : WaitTreeWaitObject(object) {}
WaitTreeEvent::WaitTreeEvent(const Kernel::KEvent& object) : WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeEvent::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
list.push_back(std::make_unique<WaitTreeText>(
tr("reset type = %1")
.arg(GetResetTypeQString(static_cast<const Kernel::Event&>(object).GetResetType()))));
.arg(GetResetTypeQString(static_cast<const Kernel::KEvent&>(object).GetResetType()))));
return list;
}
WaitTreeMutex::WaitTreeMutex(const Kernel::Mutex& object) : WaitTreeWaitObject(object) {}
WaitTreeMutex::WaitTreeMutex(const Kernel::KMutex& object) : WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutex::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& mutex = static_cast<const Kernel::Mutex&>(object);
if (mutex.lock_count) {
list.push_back(
std::make_unique<WaitTreeText>(tr("locked %1 times by thread:").arg(mutex.lock_count)));
list.push_back(std::make_unique<WaitTreeThread>(*mutex.holding_thread));
const auto& mutex = static_cast<const Kernel::KMutex&>(object);
if (mutex.m_lock_count) {
list.push_back(std::make_unique<WaitTreeText>(
tr("locked %1 times by thread:").arg(mutex.m_lock_count)));
list.push_back(std::make_unique<WaitTreeThread>(*mutex.m_holding_thread));
} else {
list.push_back(std::make_unique<WaitTreeText>(tr("free")));
}
return list;
}
WaitTreeSemaphore::WaitTreeSemaphore(const Kernel::Semaphore& object)
WaitTreeSemaphore::WaitTreeSemaphore(const Kernel::KSemaphore& object)
: WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeSemaphore::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& semaphore = static_cast<const Kernel::Semaphore&>(object);
const auto& semaphore = static_cast<const Kernel::KSemaphore&>(object);
list.push_back(std::make_unique<WaitTreeText>(
tr("available count = %1").arg(semaphore.GetAvailableCount())));
list.push_back(
std::make_unique<WaitTreeText>(tr("available count = %1").arg(semaphore.available_count)));
list.push_back(std::make_unique<WaitTreeText>(tr("max count = %1").arg(semaphore.max_count)));
std::make_unique<WaitTreeText>(tr("max count = %1").arg(semaphore.GetMaxCount())));
return list;
}
WaitTreeTimer::WaitTreeTimer(const Kernel::Timer& object) : WaitTreeWaitObject(object) {}
WaitTreeTimer::WaitTreeTimer(const Kernel::KTimer& object) : WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeTimer::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& timer = static_cast<const Kernel::Timer&>(object);
const auto& timer = static_cast<const Kernel::KTimer&>(object);
list.push_back(std::make_unique<WaitTreeText>(
tr("reset type = %1").arg(GetResetTypeQString(timer.GetResetType()))));
@ -349,8 +353,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeTimer::GetChildren() const {
return list;
}
WaitTreeMutexList::WaitTreeMutexList(
const boost::container::flat_set<std::shared_ptr<Kernel::Mutex>>& list)
WaitTreeMutexList::WaitTreeMutexList(const boost::container::flat_set<Kernel::KMutex*>& list)
: mutex_list(list) {}
QString WaitTreeMutexList::GetText() const {
@ -364,7 +367,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexList::GetChildren() cons
return list;
}
WaitTreeThreadList::WaitTreeThreadList(const std::vector<std::shared_ptr<Kernel::Thread>>& list)
WaitTreeThreadList::WaitTreeThreadList(const std::vector<Kernel::KThread*>& list)
: thread_list(list) {}
QString WaitTreeThreadList::GetText() const {

View File

@ -10,17 +10,16 @@
#include <QTreeView>
#include <boost/container/flat_set.hpp>
#include "core/core.h"
#include "core/hle/kernel/object.h"
class EmuThread;
namespace Kernel {
class WaitObject;
class Event;
class Mutex;
class Semaphore;
class Thread;
class Timer;
class KSynchronizationObject;
class KEvent;
class KMutex;
class KSemaphore;
class KThread;
class KTimer;
} // namespace Kernel
namespace Core {
@ -73,13 +72,13 @@ public:
class WaitTreeWaitObject : public WaitTreeExpandableItem {
Q_OBJECT
public:
explicit WaitTreeWaitObject(const Kernel::WaitObject& object);
static std::unique_ptr<WaitTreeWaitObject> make(const Kernel::WaitObject& object);
explicit WaitTreeWaitObject(const Kernel::KSynchronizationObject& object);
static std::unique_ptr<WaitTreeWaitObject> make(const Kernel::KSynchronizationObject& object);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
protected:
const Kernel::WaitObject& object;
const Kernel::KSynchronizationObject& object;
static QString GetResetTypeQString(Kernel::ResetType reset_type);
};
@ -87,19 +86,19 @@ protected:
class WaitTreeObjectList : public WaitTreeExpandableItem {
Q_OBJECT
public:
WaitTreeObjectList(const std::vector<std::shared_ptr<Kernel::WaitObject>>& list, bool wait_all);
WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list, bool wait_all);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
private:
const std::vector<std::shared_ptr<Kernel::WaitObject>>& object_list;
const std::vector<Kernel::KSynchronizationObject*>& object_list;
bool wait_all;
};
class WaitTreeThread : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeThread(const Kernel::Thread& thread);
explicit WaitTreeThread(const Kernel::KThread& thread);
QString GetText() const override;
QColor GetColor() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
@ -108,53 +107,52 @@ public:
class WaitTreeEvent : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeEvent(const Kernel::Event& object);
explicit WaitTreeEvent(const Kernel::KEvent& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeMutex : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeMutex(const Kernel::Mutex& object);
explicit WaitTreeMutex(const Kernel::KMutex& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeSemaphore : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeSemaphore(const Kernel::Semaphore& object);
explicit WaitTreeSemaphore(const Kernel::KSemaphore& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeTimer : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeTimer(const Kernel::Timer& object);
explicit WaitTreeTimer(const Kernel::KTimer& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeMutexList : public WaitTreeExpandableItem {
Q_OBJECT
public:
explicit WaitTreeMutexList(
const boost::container::flat_set<std::shared_ptr<Kernel::Mutex>>& list);
explicit WaitTreeMutexList(const boost::container::flat_set<Kernel::KMutex*>& list);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
private:
const boost::container::flat_set<std::shared_ptr<Kernel::Mutex>>& mutex_list;
const boost::container::flat_set<Kernel::KMutex*>& mutex_list;
};
class WaitTreeThreadList : public WaitTreeExpandableItem {
Q_OBJECT
public:
explicit WaitTreeThreadList(const std::vector<std::shared_ptr<Kernel::Thread>>& list);
explicit WaitTreeThreadList(const std::vector<Kernel::KThread*>& list);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
private:
const std::vector<std::shared_ptr<Kernel::Thread>>& thread_list;
const std::vector<Kernel::KThread*>& thread_list;
};
class WaitTreeModel : public QAbstractItemModel {

View File

@ -88,6 +88,7 @@ add_library(citra_common STATIC
file_util.cpp
file_util.h
hash.h
intrusive_list.h
literals.h
logging/backend.cpp
logging/backend.h
@ -109,6 +110,7 @@ add_library(citra_common STATIC
microprofileui.h
param_package.cpp
param_package.h
parent_of_member.h
polyfill_thread.h
precompiled_headers.h
quaternion.h

View File

@ -21,4 +21,10 @@ template <typename T>
return static_cast<T>(value - value % size);
}
template <typename T>
requires std::is_unsigned_v<T>
[[nodiscard]] constexpr bool Is4KBAligned(T value) {
return (value & 0xFFF) == 0;
}
} // namespace Common

View File

@ -49,6 +49,14 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
#define locale_t _locale_t
#endif // _MSC_VER
#define CITRA_NON_COPYABLE(cls) \
cls(const cls&) = delete; \
cls& operator=(const cls&) = delete
#define CITRA_NON_MOVEABLE(cls) \
cls(cls&&) = delete; \
cls& operator=(cls&&) = delete
#define DECLARE_ENUM_FLAG_OPERATORS(type) \
[[nodiscard]] constexpr type operator|(type a, type b) noexcept { \
using T = std::underlying_type_t<type>; \

631
src/common/intrusive_list.h Normal file
View File

@ -0,0 +1,631 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_funcs.h"
#include "common/parent_of_member.h"
namespace Common {
// Forward declare implementation class for Node.
namespace impl {
class IntrusiveListImpl;
}
class IntrusiveListNode {
CITRA_NON_COPYABLE(IntrusiveListNode);
private:
friend class impl::IntrusiveListImpl;
IntrusiveListNode* m_prev;
IntrusiveListNode* m_next;
public:
constexpr IntrusiveListNode() : m_prev(this), m_next(this) {}
constexpr bool IsLinked() const {
return m_next != this;
}
private:
constexpr void LinkPrev(IntrusiveListNode* node) {
// We can't link an already linked node.
ASSERT(!node->IsLinked());
this->SplicePrev(node, node);
}
constexpr void SplicePrev(IntrusiveListNode* first, IntrusiveListNode* last) {
// Splice a range into the list.
auto last_prev = last->m_prev;
first->m_prev = m_prev;
last_prev->m_next = this;
m_prev->m_next = first;
m_prev = last_prev;
}
constexpr void LinkNext(IntrusiveListNode* node) {
// We can't link an already linked node.
ASSERT(!node->IsLinked());
return this->SpliceNext(node, node);
}
constexpr void SpliceNext(IntrusiveListNode* first, IntrusiveListNode* last) {
// Splice a range into the list.
auto last_prev = last->m_prev;
first->m_prev = this;
last_prev->m_next = m_next;
m_next->m_prev = last_prev;
m_next = first;
}
constexpr void Unlink() {
this->Unlink(m_next);
}
constexpr void Unlink(IntrusiveListNode* last) {
// Unlink a node from a next node.
auto last_prev = last->m_prev;
m_prev->m_next = last;
last->m_prev = m_prev;
last_prev->m_next = this;
m_prev = last_prev;
}
constexpr IntrusiveListNode* GetPrev() {
return m_prev;
}
constexpr const IntrusiveListNode* GetPrev() const {
return m_prev;
}
constexpr IntrusiveListNode* GetNext() {
return m_next;
}
constexpr const IntrusiveListNode* GetNext() const {
return m_next;
}
};
// DEPRECATED: static_assert(std::is_literal_type<IntrusiveListNode>::value);
namespace impl {
class IntrusiveListImpl {
CITRA_NON_COPYABLE(IntrusiveListImpl);
private:
IntrusiveListNode m_root_node;
public:
template <bool Const>
class Iterator;
using value_type = IntrusiveListNode;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename IntrusiveListImpl::value_type;
using difference_type = typename IntrusiveListImpl::difference_type;
using pointer =
std::conditional_t<Const, IntrusiveListImpl::const_pointer, IntrusiveListImpl::pointer>;
using reference = std::conditional_t<Const, IntrusiveListImpl::const_reference,
IntrusiveListImpl::reference>;
private:
pointer m_node;
public:
constexpr explicit Iterator(pointer n) : m_node(n) {}
constexpr bool operator==(const Iterator& rhs) const {
return m_node == rhs.m_node;
}
constexpr pointer operator->() const {
return m_node;
}
constexpr reference operator*() const {
return *m_node;
}
constexpr Iterator& operator++() {
m_node = m_node->m_next;
return *this;
}
constexpr Iterator& operator--() {
m_node = m_node->m_prev;
return *this;
}
constexpr Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
constexpr Iterator operator--(int) {
const Iterator it{*this};
--(*this);
return it;
}
constexpr operator Iterator<true>() const {
return Iterator<true>(m_node);
}
constexpr Iterator<false> GetNonConstIterator() const {
return Iterator<false>(const_cast<IntrusiveListImpl::pointer>(m_node));
}
};
public:
constexpr IntrusiveListImpl() : m_root_node() {}
// Iterator accessors.
constexpr iterator begin() {
return iterator(m_root_node.GetNext());
}
constexpr const_iterator begin() const {
return const_iterator(m_root_node.GetNext());
}
constexpr iterator end() {
return iterator(std::addressof(m_root_node));
}
constexpr const_iterator end() const {
return const_iterator(std::addressof(m_root_node));
}
constexpr iterator iterator_to(reference v) {
// Only allow iterator_to for values in lists.
ASSERT(v.IsLinked());
return iterator(std::addressof(v));
}
constexpr const_iterator iterator_to(const_reference v) const {
// Only allow iterator_to for values in lists.
ASSERT(v.IsLinked());
return const_iterator(std::addressof(v));
}
// Content management.
constexpr bool empty() const {
return !m_root_node.IsLinked();
}
constexpr size_type size() const {
return static_cast<size_type>(std::distance(this->begin(), this->end()));
}
constexpr reference back() {
return *m_root_node.GetPrev();
}
constexpr const_reference back() const {
return *m_root_node.GetPrev();
}
constexpr reference front() {
return *m_root_node.GetNext();
}
constexpr const_reference front() const {
return *m_root_node.GetNext();
}
constexpr void push_back(reference node) {
m_root_node.LinkPrev(std::addressof(node));
}
constexpr void push_front(reference node) {
m_root_node.LinkNext(std::addressof(node));
}
constexpr void pop_back() {
m_root_node.GetPrev()->Unlink();
}
constexpr void pop_front() {
m_root_node.GetNext()->Unlink();
}
constexpr iterator insert(const_iterator pos, reference node) {
pos.GetNonConstIterator()->LinkPrev(std::addressof(node));
return iterator(std::addressof(node));
}
constexpr void splice(const_iterator pos, IntrusiveListImpl& o) {
splice_impl(pos, o.begin(), o.end());
}
constexpr void splice(const_iterator pos, IntrusiveListImpl&, const_iterator first) {
const_iterator last(first);
std::advance(last, 1);
splice_impl(pos, first, last);
}
constexpr void splice(const_iterator pos, IntrusiveListImpl&, const_iterator first,
const_iterator last) {
splice_impl(pos, first, last);
}
constexpr iterator erase(const_iterator pos) {
if (pos == this->end()) {
return this->end();
}
iterator it(pos.GetNonConstIterator());
(it++)->Unlink();
return it;
}
constexpr void clear() {
while (!this->empty()) {
this->pop_front();
}
}
private:
constexpr void splice_impl(const_iterator _pos, const_iterator _first, const_iterator _last) {
if (_first == _last) {
return;
}
iterator pos(_pos.GetNonConstIterator());
iterator first(_first.GetNonConstIterator());
iterator last(_last.GetNonConstIterator());
first->Unlink(std::addressof(*last));
pos->SplicePrev(std::addressof(*first), std::addressof(*first));
}
};
} // namespace impl
template <class T, class Traits>
class IntrusiveList {
CITRA_NON_COPYABLE(IntrusiveList);
private:
impl::IntrusiveListImpl m_impl;
public:
template <bool Const>
class Iterator;
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
public:
friend class Common::IntrusiveList<T, Traits>;
using ImplIterator =
std::conditional_t<Const, Common::impl::IntrusiveListImpl::const_iterator,
Common::impl::IntrusiveListImpl::iterator>;
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename IntrusiveList::value_type;
using difference_type = typename IntrusiveList::difference_type;
using pointer =
std::conditional_t<Const, IntrusiveList::const_pointer, IntrusiveList::pointer>;
using reference =
std::conditional_t<Const, IntrusiveList::const_reference, IntrusiveList::reference>;
private:
ImplIterator m_iterator;
private:
constexpr explicit Iterator(ImplIterator it) : m_iterator(it) {}
constexpr ImplIterator GetImplIterator() const {
return m_iterator;
}
public:
constexpr bool operator==(const Iterator& rhs) const {
return m_iterator == rhs.m_iterator;
}
constexpr pointer operator->() const {
return std::addressof(Traits::GetParent(*m_iterator));
}
constexpr reference operator*() const {
return Traits::GetParent(*m_iterator);
}
constexpr Iterator& operator++() {
++m_iterator;
return *this;
}
constexpr Iterator& operator--() {
--m_iterator;
return *this;
}
constexpr Iterator operator++(int) {
const Iterator it{*this};
++m_iterator;
return it;
}
constexpr Iterator operator--(int) {
const Iterator it{*this};
--m_iterator;
return it;
}
constexpr operator Iterator<true>() const {
return Iterator<true>(m_iterator);
}
};
private:
static constexpr IntrusiveListNode& GetNode(reference ref) {
return Traits::GetNode(ref);
}
static constexpr IntrusiveListNode const& GetNode(const_reference ref) {
return Traits::GetNode(ref);
}
static constexpr reference GetParent(IntrusiveListNode& node) {
return Traits::GetParent(node);
}
static constexpr const_reference GetParent(IntrusiveListNode const& node) {
return Traits::GetParent(node);
}
public:
constexpr IntrusiveList() : m_impl() {}
// Iterator accessors.
constexpr iterator begin() {
return iterator(m_impl.begin());
}
constexpr const_iterator begin() const {
return const_iterator(m_impl.begin());
}
constexpr iterator end() {
return iterator(m_impl.end());
}
constexpr const_iterator end() const {
return const_iterator(m_impl.end());
}
constexpr const_iterator cbegin() const {
return this->begin();
}
constexpr const_iterator cend() const {
return this->end();
}
constexpr reverse_iterator rbegin() {
return reverse_iterator(this->end());
}
constexpr const_reverse_iterator rbegin() const {
return const_reverse_iterator(this->end());
}
constexpr reverse_iterator rend() {
return reverse_iterator(this->begin());
}
constexpr const_reverse_iterator rend() const {
return const_reverse_iterator(this->begin());
}
constexpr const_reverse_iterator crbegin() const {
return this->rbegin();
}
constexpr const_reverse_iterator crend() const {
return this->rend();
}
constexpr iterator iterator_to(reference v) {
return iterator(m_impl.iterator_to(GetNode(v)));
}
constexpr const_iterator iterator_to(const_reference v) const {
return const_iterator(m_impl.iterator_to(GetNode(v)));
}
// Content management.
constexpr bool empty() const {
return m_impl.empty();
}
constexpr size_type size() const {
return m_impl.size();
}
constexpr reference back() {
return GetParent(m_impl.back());
}
constexpr const_reference back() const {
return GetParent(m_impl.back());
}
constexpr reference front() {
return GetParent(m_impl.front());
}
constexpr const_reference front() const {
return GetParent(m_impl.front());
}
constexpr void push_back(reference ref) {
m_impl.push_back(GetNode(ref));
}
constexpr void push_front(reference ref) {
m_impl.push_front(GetNode(ref));
}
constexpr void pop_back() {
m_impl.pop_back();
}
constexpr void pop_front() {
m_impl.pop_front();
}
constexpr iterator insert(const_iterator pos, reference ref) {
return iterator(m_impl.insert(pos.GetImplIterator(), GetNode(ref)));
}
constexpr void splice(const_iterator pos, IntrusiveList& o) {
m_impl.splice(pos.GetImplIterator(), o.m_impl);
}
constexpr void splice(const_iterator pos, IntrusiveList& o, const_iterator first) {
m_impl.splice(pos.GetImplIterator(), o.m_impl, first.GetImplIterator());
}
constexpr void splice(const_iterator pos, IntrusiveList& o, const_iterator first,
const_iterator last) {
m_impl.splice(pos.GetImplIterator(), o.m_impl, first.GetImplIterator(),
last.GetImplIterator());
}
constexpr iterator erase(const_iterator pos) {
return iterator(m_impl.erase(pos.GetImplIterator()));
}
constexpr void clear() {
m_impl.clear();
}
};
template <auto T, class Derived = Common::impl::GetParentType<T>>
class IntrusiveListMemberTraits;
template <class Parent, IntrusiveListNode Parent::*Member, class Derived>
class IntrusiveListMemberTraits<Member, Derived> {
public:
using ListType = IntrusiveList<Derived, IntrusiveListMemberTraits>;
private:
friend class IntrusiveList<Derived, IntrusiveListMemberTraits>;
static constexpr IntrusiveListNode& GetNode(Derived& parent) {
return parent.*Member;
}
static constexpr IntrusiveListNode const& GetNode(Derived const& parent) {
return parent.*Member;
}
static Derived& GetParent(IntrusiveListNode& node) {
return Common::GetParentReference<Member, Derived>(std::addressof(node));
}
static Derived const& GetParent(IntrusiveListNode const& node) {
return Common::GetParentReference<Member, Derived>(std::addressof(node));
}
};
template <auto T, class Derived = Common::impl::GetParentType<T>>
class IntrusiveListMemberTraitsByNonConstexprOffsetOf;
template <class Parent, IntrusiveListNode Parent::*Member, class Derived>
class IntrusiveListMemberTraitsByNonConstexprOffsetOf<Member, Derived> {
public:
using ListType = IntrusiveList<Derived, IntrusiveListMemberTraitsByNonConstexprOffsetOf>;
private:
friend class IntrusiveList<Derived, IntrusiveListMemberTraitsByNonConstexprOffsetOf>;
static constexpr IntrusiveListNode& GetNode(Derived& parent) {
return parent.*Member;
}
static constexpr IntrusiveListNode const& GetNode(Derived const& parent) {
return parent.*Member;
}
static Derived& GetParent(IntrusiveListNode& node) {
return *reinterpret_cast<Derived*>(reinterpret_cast<char*>(std::addressof(node)) -
GetOffset());
}
static Derived const& GetParent(IntrusiveListNode const& node) {
return *reinterpret_cast<const Derived*>(
reinterpret_cast<const char*>(std::addressof(node)) - GetOffset());
}
static uintptr_t GetOffset() {
return reinterpret_cast<uintptr_t>(std::addressof(reinterpret_cast<Derived*>(0)->*Member));
}
};
template <class Derived>
class IntrusiveListBaseNode : public IntrusiveListNode {};
template <class Derived>
class IntrusiveListBaseTraits {
public:
using ListType = IntrusiveList<Derived, IntrusiveListBaseTraits>;
private:
friend class IntrusiveList<Derived, IntrusiveListBaseTraits>;
static constexpr IntrusiveListNode& GetNode(Derived& parent) {
return static_cast<IntrusiveListNode&>(
static_cast<IntrusiveListBaseNode<Derived>&>(parent));
}
static constexpr IntrusiveListNode const& GetNode(Derived const& parent) {
return static_cast<const IntrusiveListNode&>(
static_cast<const IntrusiveListBaseNode<Derived>&>(parent));
}
static constexpr Derived& GetParent(IntrusiveListNode& node) {
return static_cast<Derived&>(static_cast<IntrusiveListBaseNode<Derived>&>(node));
}
static constexpr Derived const& GetParent(IntrusiveListNode const& node) {
return static_cast<const Derived&>(
static_cast<const IntrusiveListBaseNode<Derived>&>(node));
}
};
} // namespace Common

View File

@ -0,0 +1,190 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <type_traits>
#include "common/assert.h"
namespace Common {
namespace detail {
template <typename T, size_t Size, size_t Align>
struct TypedStorageImpl {
alignas(Align) u8 storage_[Size];
};
} // namespace detail
template <typename T>
using TypedStorage = detail::TypedStorageImpl<T, sizeof(T), alignof(T)>;
template <typename T>
static constexpr T* GetPointer(TypedStorage<T>& ts) {
return static_cast<T*>(static_cast<void*>(std::addressof(ts.storage_)));
}
template <typename T>
static constexpr const T* GetPointer(const TypedStorage<T>& ts) {
return static_cast<const T*>(static_cast<const void*>(std::addressof(ts.storage_)));
}
namespace impl {
template <size_t MaxDepth>
struct OffsetOfUnionHolder {
template <typename ParentType, typename MemberType, size_t Offset>
union UnionImpl {
using PaddingMember = char;
static constexpr size_t GetOffset() {
return Offset;
}
#pragma pack(push, 1)
struct {
PaddingMember padding[Offset];
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
} data;
#pragma pack(pop)
UnionImpl<ParentType, MemberType, Offset + 1> next_union;
};
template <typename ParentType, typename MemberType>
union UnionImpl<ParentType, MemberType, 0> {
static constexpr size_t GetOffset() {
return 0;
}
struct {
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
} data;
UnionImpl<ParentType, MemberType, 1> next_union;
};
template <typename ParentType, typename MemberType>
union UnionImpl<ParentType, MemberType, MaxDepth> {};
};
template <typename ParentType, typename MemberType>
struct OffsetOfCalculator {
using UnionHolder =
typename OffsetOfUnionHolder<sizeof(MemberType)>::template UnionImpl<ParentType, MemberType,
0>;
union Union {
char c{};
UnionHolder first_union;
TypedStorage<ParentType> parent;
constexpr Union() : c() {}
};
static constexpr Union U = {};
static constexpr const MemberType* GetNextAddress(const MemberType* start,
const MemberType* target) {
while (start < target) {
start++;
}
return start;
}
static constexpr std::ptrdiff_t GetDifference(const MemberType* start,
const MemberType* target) {
return (target - start) * sizeof(MemberType);
}
template <typename CurUnion>
static constexpr std::ptrdiff_t OffsetOfImpl(MemberType ParentType::*member,
CurUnion& cur_union) {
constexpr size_t Offset = CurUnion::GetOffset();
const auto target = std::addressof(GetPointer(U.parent)->*member);
const auto start = std::addressof(cur_union.data.members[0]);
const auto next = GetNextAddress(start, target);
if (next != target) {
if constexpr (Offset < sizeof(MemberType) - 1) {
return OffsetOfImpl(member, cur_union.next_union);
} else {
UNREACHABLE();
}
}
return static_cast<ptrdiff_t>(static_cast<size_t>(next - start) * sizeof(MemberType) +
Offset);
}
static constexpr std::ptrdiff_t OffsetOf(MemberType ParentType::*member) {
return OffsetOfImpl(member, U.first_union);
}
};
template <typename T>
struct GetMemberPointerTraits;
template <typename P, typename M>
struct GetMemberPointerTraits<M P::*> {
using Parent = P;
using Member = M;
};
template <auto MemberPtr>
using GetParentType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Parent;
template <auto MemberPtr>
using GetMemberType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Member;
template <auto MemberPtr, typename RealParentType = GetParentType<MemberPtr>>
constexpr std::ptrdiff_t OffsetOf() {
using DeducedParentType = GetParentType<MemberPtr>;
using MemberType = GetMemberType<MemberPtr>;
static_assert(std::is_base_of<DeducedParentType, RealParentType>::value ||
std::is_same<RealParentType, DeducedParentType>::value);
return OffsetOfCalculator<RealParentType, MemberType>::OffsetOf(MemberPtr);
};
} // namespace impl
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>* member) {
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>();
return *static_cast<RealParentType*>(
static_cast<void*>(static_cast<uint8_t*>(static_cast<void*>(member)) - Offset));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const* member) {
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>();
return *static_cast<const RealParentType*>(static_cast<const void*>(
static_cast<const uint8_t*>(static_cast<const void*>(member)) - Offset));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>* member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const* member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>& member) {
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const& member) {
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>& member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const& member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
} // namespace Common

View File

@ -128,60 +128,70 @@ add_library(citra_core STATIC
hle/applets/swkbd.h
hle/ipc.h
hle/ipc_helpers.h
hle/kernel/address_arbiter.cpp
hle/kernel/address_arbiter.h
hle/kernel/client_port.cpp
hle/kernel/client_port.h
hle/kernel/client_session.cpp
hle/kernel/client_session.h
hle/kernel/config_mem.cpp
hle/kernel/config_mem.h
hle/kernel/errors.h
hle/kernel/event.cpp
hle/kernel/event.h
hle/kernel/handle_table.cpp
hle/kernel/handle_table.h
hle/kernel/hle_ipc.cpp
hle/kernel/hle_ipc.h
hle/kernel/ipc.cpp
hle/kernel/ipc.h
hle/kernel/ipc_debugger/recorder.cpp
hle/kernel/ipc_debugger/recorder.h
hle/kernel/k_address_arbiter.cpp
hle/kernel/k_address_arbiter.h
hle/kernel/k_auto_object.cpp
hle/kernel/k_auto_object.h
hle/kernel/k_auto_object_container.cpp
hle/kernel/k_auto_object_container.h
hle/kernel/k_client_port.cpp
hle/kernel/k_client_port.h
hle/kernel/k_client_session.cpp
hle/kernel/k_client_session.h
hle/kernel/k_code_set.h
hle/kernel/k_event.cpp
hle/kernel/k_event.h
hle/kernel/k_handle_table.cpp
hle/kernel/k_handle_table.h
hle/kernel/k_linked_list.h
hle/kernel/k_mutex.cpp
hle/kernel/k_mutex.h
hle/kernel/k_object_name.cpp
hle/kernel/k_object_name.h
hle/kernel/k_port.cpp
hle/kernel/k_port.h
hle/kernel/k_process.cpp
hle/kernel/k_process.h
hle/kernel/k_resource_limit.cpp
hle/kernel/k_resource_limit.h
hle/kernel/k_scoped_resource_reservation.h
hle/kernel/k_semaphore.cpp
hle/kernel/k_semaphore.h
hle/kernel/k_server_port.cpp
hle/kernel/k_server_port.h
hle/kernel/k_server_session.cpp
hle/kernel/k_server_session.h
hle/kernel/k_session.cpp
hle/kernel/k_session.h
hle/kernel/k_shared_memory.cpp
hle/kernel/k_shared_memory.h
hle/kernel/k_slab_heap.h
hle/kernel/k_synchronization_object.cpp
hle/kernel/k_synchronization_object.h
hle/kernel/k_thread.cpp
hle/kernel/k_thread.h
hle/kernel/k_timer.cpp
hle/kernel/k_timer.h
hle/kernel/kernel.cpp
hle/kernel/kernel.h
hle/kernel/memory.cpp
hle/kernel/memory.h
hle/kernel/mutex.cpp
hle/kernel/mutex.h
hle/kernel/object.cpp
hle/kernel/object.h
hle/kernel/process.cpp
hle/kernel/process.h
hle/kernel/resource_limit.cpp
hle/kernel/resource_limit.h
hle/kernel/semaphore.cpp
hle/kernel/semaphore.h
hle/kernel/server_port.cpp
hle/kernel/server_port.h
hle/kernel/server_session.cpp
hle/kernel/server_session.h
hle/kernel/session.h
hle/kernel/session.cpp
hle/kernel/shared_memory.cpp
hle/kernel/shared_memory.h
hle/kernel/shared_page.cpp
hle/kernel/shared_page.h
hle/kernel/svc.cpp
hle/kernel/svc.h
hle/kernel/svc_wrapper.h
hle/kernel/thread.cpp
hle/kernel/thread.h
hle/kernel/timer.cpp
hle/kernel/timer.h
hle/kernel/vm_manager.cpp
hle/kernel/vm_manager.h
hle/kernel/wait_object.cpp
hle/kernel/wait_object.h
hle/mii.h
hle/mii.cpp
hle/result.h
@ -323,6 +333,8 @@ add_library(citra_core STATIC
hle/service/ir/ir_u.h
hle/service/ir/ir_user.cpp
hle/service/ir/ir_user.h
hle/service/kernel_helpers.cpp
hle/service/kernel_helpers.h
hle/service/ldr_ro/cro_helper.cpp
hle/service/ldr_ro/cro_helper.h
hle/service/ldr_ro/ldr_ro.cpp

View File

@ -292,8 +292,8 @@ void ARM_Dynarmic::SetPageTable(const std::shared_ptr<Memory::PageTable>& page_t
}
void ARM_Dynarmic::ServeBreak() {
Kernel::Thread* thread = system.Kernel().GetCurrentThreadManager().GetCurrentThread();
SaveContext(thread->context);
Kernel::KThread* thread = system.Kernel().GetCurrentThreadManager().GetCurrentThread();
SaveContext(thread->GetContext());
GDBStub::Break();
GDBStub::SendTrap(thread, 5);
}

View File

@ -609,8 +609,8 @@ void ARMul_State::ServeBreak() {
DEBUG_ASSERT(Reg[15] == last_bkpt.address);
}
Kernel::Thread* thread = system.Kernel().GetCurrentThreadManager().GetCurrentThread();
system.GetRunningCore().SaveContext(thread->context);
Kernel::KThread* thread = system.Kernel().GetCurrentThreadManager().GetCurrentThread();
system.GetRunningCore().SaveContext(thread->GetContext());
if (last_bkpt_hit || GDBStub::IsMemoryBreak() || GDBStub::GetCpuStepFlag()) {
last_bkpt_hit = false;

View File

@ -27,9 +27,9 @@
#include "core/frontend/image_interface.h"
#include "core/gdbstub/gdbstub.h"
#include "core/global.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/service/apt/applet_manager.h"
#include "core/hle/service/apt/apt.h"
#include "core/hle/service/cam/cam.h"
@ -83,9 +83,9 @@ System::ResultStatus System::RunLoop(bool tight_loop) {
}
if (GDBStub::IsServerEnabled()) {
Kernel::Thread* thread = kernel->GetCurrentThreadManager().GetCurrentThread();
Kernel::KThread* thread = kernel->GetCurrentThreadManager().GetCurrentThread();
if (thread && running_core) {
running_core->SaveContext(thread->context);
running_core->SaveContext(thread->GetContext());
}
GDBStub::HandlePacket(*this);
@ -311,8 +311,8 @@ System::ResultStatus System::Load(Frontend::EmuWindow& emu_window, const std::st
}
telemetry_session->AddInitialInfo(*app_loader);
std::shared_ptr<Kernel::Process> process;
const Loader::ResultStatus load_result{app_loader->Load(process)};
Kernel::Process* process;
const Loader::ResultStatus load_result{app_loader->Load(std::addressof(process))};
if (Loader::ResultStatus::Success != load_result) {
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
System::Shutdown();

View File

@ -7,7 +7,7 @@
#include "common/archives.h"
#include "core/file_sys/archive_other_savedata.h"
#include "core/file_sys/errors.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/service/fs/archive.h"
SERIALIZE_EXPORT_IMPL(FileSys::ArchiveFactory_OtherSaveDataPermitted)

View File

@ -6,7 +6,7 @@
#include "common/archives.h"
#include "core/core.h"
#include "core/file_sys/archive_savedata.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_process.h"
SERIALIZE_EXPORT_IMPL(FileSys::ArchiveFactory_SaveData)

View File

@ -11,7 +11,7 @@
#include "core/file_sys/archive_selfncch.h"
#include "core/file_sys/errors.h"
#include "core/file_sys/ivfc_archive.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_process.h"
SERIALIZE_EXPORT_IMPL(FileSys::ArchiveFactory_SelfNCCH)

View File

@ -121,7 +121,7 @@ Loader::ResultStatus FileSys::Plugin3GXLoader::Load(
if (!compatible_TID.empty() &&
std::find(compatible_TID.begin(), compatible_TID.end(),
static_cast<u32>(process.codeset->program_id)) == compatible_TID.end()) {
static_cast<u32>(process.codeset.program_id)) == compatible_TID.end()) {
LOG_ERROR(Service_PLGLDR,
"Failed to load 3GX plugin. Not compatible with loaded process: {}",
plg_context.plugin_path);
@ -291,7 +291,7 @@ void FileSys::Plugin3GXLoader::MapBootloader(Kernel::Process& process, Kernel::K
u32 exe_checksum, bool no_flash) {
u32_le game_instructions[2];
kernel.memory.ReadBlock(process, process.codeset->CodeSegment().addr, game_instructions,
kernel.memory.ReadBlock(process, process.codeset.CodeSegment().addr, game_instructions,
sizeof(u32) * 2);
std::array<u32_le, g_plugin_loader_bootloader.size() / sizeof(u32)> bootloader;
@ -307,7 +307,7 @@ void FileSys::Plugin3GXLoader::MapBootloader(Kernel::Process& process, Kernel::K
*it = game_instructions[1];
} break;
case 0xDEAD0002: {
*it = process.codeset->CodeSegment().addr;
*it = process.codeset.CodeSegment().addr;
} break;
case 0xDEAD0003: {
for (u32 i = 0;
@ -361,6 +361,6 @@ void FileSys::Plugin3GXLoader::MapBootloader(Kernel::Process& process, Kernel::K
game_instructions[0] = 0xE51FF004; // ldr pc, [pc, #-4]
game_instructions[1] = _3GX_exe_load_addr - bootloader_memory_size;
kernel.memory.WriteBlock(process, process.codeset->CodeSegment().addr, game_instructions,
kernel.memory.WriteBlock(process, process.codeset.CodeSegment().addr, game_instructions,
sizeof(u32) * 2);
}

View File

@ -25,7 +25,7 @@
#include "common/common_types.h"
#include "common/swap.h"
#include "core/file_sys/archive_backend.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/service/plgldr/plgldr.h"
namespace Loader {

View File

@ -35,7 +35,7 @@
#include "core/core.h"
#include "core/gdbstub/gdbstub.h"
#include "core/gdbstub/hio.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_process.h"
#include "core/memory.h"
namespace GDBStub {
@ -128,7 +128,7 @@ u32 command_length;
u32 latest_signal = 0;
bool memory_break = false;
static Kernel::Thread* current_thread = nullptr;
static Kernel::KThread* current_thread = nullptr;
// Binding to a port within the reserved ports range (0-1023) requires root permissions,
// so default to a port outside of that range.
@ -159,72 +159,76 @@ BreakpointMap breakpoints_read;
BreakpointMap breakpoints_write;
} // Anonymous namespace
static Kernel::Thread* FindThreadById(int id) {
static Kernel::KThread* FindThreadById(int id) {
u32 num_cores = Core::GetNumCores();
for (u32 i = 0; i < num_cores; ++i) {
const auto& threads =
Core::System::GetInstance().Kernel().GetThreadManager(i).GetThreadList();
for (auto& thread : threads) {
if (thread->GetThreadId() == static_cast<u32>(id)) {
return thread.get();
return thread;
}
}
}
return nullptr;
}
static u32 RegRead(std::size_t id, Kernel::Thread* thread = nullptr) {
static u32 RegRead(std::size_t id, Kernel::KThread* thread = nullptr) {
if (!thread) {
return 0;
}
const auto& context = thread->GetContext();
if (id <= PC_REGISTER) {
return thread->context.cpu_registers[id];
return context.cpu_registers[id];
} else if (id == CPSR_REGISTER) {
return thread->context.cpsr;
return context.cpsr;
} else {
return 0;
}
}
static void RegWrite(std::size_t id, u32 val, Kernel::Thread* thread = nullptr) {
static void RegWrite(std::size_t id, u32 val, Kernel::KThread* thread = nullptr) {
if (!thread) {
return;
}
auto& context = thread->GetContext();
if (id <= PC_REGISTER) {
thread->context.cpu_registers[id] = val;
context.cpu_registers[id] = val;
} else if (id == CPSR_REGISTER) {
thread->context.cpsr = val;
context.cpsr = val;
}
}
static u64 FpuRead(std::size_t id, Kernel::Thread* thread = nullptr) {
static u64 FpuRead(std::size_t id, Kernel::KThread* thread = nullptr) {
if (!thread) {
return 0;
}
const auto& context = thread->GetContext();
if (id >= D0_REGISTER && id < FPSCR_REGISTER) {
u64 ret = thread->context.fpu_registers[2 * (id - D0_REGISTER)];
ret |= static_cast<u64>(thread->context.fpu_registers[2 * (id - D0_REGISTER) + 1]) << 32;
u64 ret = context.fpu_registers[2 * (id - D0_REGISTER)];
ret |= static_cast<u64>(context.fpu_registers[2 * (id - D0_REGISTER) + 1]) << 32;
return ret;
} else if (id == FPSCR_REGISTER) {
return thread->context.fpscr;
return context.fpscr;
} else {
return 0;
}
}
static void FpuWrite(std::size_t id, u64 val, Kernel::Thread* thread = nullptr) {
static void FpuWrite(std::size_t id, u64 val, Kernel::KThread* thread = nullptr) {
if (!thread) {
return;
}
auto& context = thread->GetContext();
if (id >= D0_REGISTER && id < FPSCR_REGISTER) {
thread->context.fpu_registers[2 * (id - D0_REGISTER)] = static_cast<u32>(val);
thread->context.fpu_registers[2 * (id - D0_REGISTER) + 1] = static_cast<u32>(val >> 32);
context.fpu_registers[2 * (id - D0_REGISTER)] = static_cast<u32>(val);
context.fpu_registers[2 * (id - D0_REGISTER) + 1] = static_cast<u32>(val >> 32);
} else if (id == FPSCR_REGISTER) {
thread->context.fpscr = static_cast<u32>(val);
context.fpscr = static_cast<u32>(val);
}
}
@ -606,7 +610,7 @@ static void HandleThreadAlive() {
*
* @param signal Signal to be sent to client.
*/
static void SendSignal(Kernel::Thread* thread, u32 signal, bool full = true) {
static void SendSignal(Kernel::KThread* thread, u32 signal, bool full = true) {
if (gdbserver_socket == -1) {
return;
}
@ -785,7 +789,7 @@ static void WriteRegister() {
return SendReply("E01");
}
Core::GetRunningCore().LoadContext(current_thread->context);
Core::GetRunningCore().LoadContext(current_thread->GetContext());
SendReply("OK");
}
@ -815,7 +819,7 @@ static void WriteRegisters() {
}
}
Core::GetRunningCore().LoadContext(current_thread->context);
Core::GetRunningCore().LoadContext(current_thread->GetContext());
SendReply("OK");
}
@ -890,7 +894,7 @@ void Break(bool is_memory_break) {
static void Step() {
if (command_length > 1) {
RegWrite(PC_REGISTER, GdbHexToInt(command_buffer + 1), current_thread);
Core::GetRunningCore().LoadContext(current_thread->context);
Core::GetRunningCore().LoadContext(current_thread->GetContext());
}
step_loop = true;
halt_loop = true;
@ -1266,7 +1270,7 @@ void SetCpuStepFlag(bool is_step) {
step_loop = is_step;
}
void SendTrap(Kernel::Thread* thread, int trap) {
void SendTrap(Kernel::KThread* thread, int trap) {
if (!send_trap) {
return;
}

View File

@ -8,7 +8,7 @@
#include <span>
#include "common/common_types.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/k_thread.h"
namespace Core {
class System;
@ -118,7 +118,7 @@ void SetCpuStepFlag(bool is_step);
* @param thread Sending thread.
* @param trap Trap no.
*/
void SendTrap(Kernel::Thread* thread, int trap);
void SendTrap(Kernel::KThread* thread, int trap);
/**
* Send reply to gdb client.

View File

@ -47,10 +47,10 @@ void Applet::SendParameter(const Service::APT::MessageParameter& parameter) {
}
}
void Applet::CloseApplet(std::shared_ptr<Kernel::Object> object, const std::vector<u8>& buffer) {
void Applet::CloseApplet(Kernel::KAutoObject* object, const std::vector<u8>& buffer) {
if (auto locked = manager.lock()) {
locked->PrepareToCloseLibraryApplet(true, false, false);
locked->CloseLibraryApplet(std::move(object), buffer);
locked->CloseLibraryApplet(object, buffer);
} else {
LOG_ERROR(Service_APT, "called after destructing applet manager");
}

View File

@ -8,6 +8,10 @@
#include "core/hle/result.h"
#include "core/hle/service/apt/applet_manager.h"
namespace Core {
class System;
}
namespace HLE::Applets {
class Applet {
@ -39,7 +43,8 @@ public:
protected:
Applet(Core::System& system, Service::APT::AppletId id, Service::APT::AppletId parent,
bool preload, std::weak_ptr<Service::APT::AppletManager> manager)
: system(system), id(id), parent(parent), preload(preload), manager(std::move(manager)) {}
: system(system), id(id), parent(parent), preload(preload), service_context(system),
manager(std::move(manager)) {}
/**
* Handles a parameter from the application.
@ -62,11 +67,11 @@ protected:
virtual Result Finalize() = 0;
Core::System& system;
Service::APT::AppletId id; ///< Id of this Applet
Service::APT::AppletId parent; ///< Id of this Applet's parent
bool preload; ///< Whether the Applet is being preloaded.
std::shared_ptr<std::vector<u8>> heap_memory; ///< Heap memory for this Applet
Service::KernelHelpers::ServiceContext service_context;
/// Whether this applet is running.
bool is_running = true;
@ -75,7 +80,7 @@ protected:
bool is_active = false;
void SendParameter(const Service::APT::MessageParameter& parameter);
void CloseApplet(std::shared_ptr<Kernel::Object> object, const std::vector<u8>& buffer);
void CloseApplet(Kernel::KAutoObject* object, const std::vector<u8>& buffer);
private:
std::weak_ptr<Service::APT::AppletManager> manager;

View File

@ -5,6 +5,7 @@
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/erreula.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/service/apt/apt.h"
namespace HLE::Applets {
@ -28,7 +29,7 @@ Result ErrEula::ReceiveParameterImpl(const Service::APT::MessageParameter& param
// TODO: allocated memory never released
using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = system.Kernel().CreateSharedMemoryForApplet(
framebuffer_memory = service_context.CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"ErrEula Memory");

View File

@ -5,7 +5,10 @@
#pragma once
#include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
namespace Kernel {
class KSharedMemory;
}
namespace HLE::Applets {
@ -24,7 +27,7 @@ private:
/// This SharedMemory will be created when we receive the LibAppJustStarted message.
/// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo
std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
Kernel::KSharedMemory* framebuffer_memory;
/// Parameter received by the applet on start.
std::vector<u8> startup_param;

View File

@ -11,8 +11,8 @@
#include "core/core.h"
#include "core/frontend/applets/mii_selector.h"
#include "core/hle/applets/mii_selector.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/result.h"
namespace HLE::Applets {
@ -35,7 +35,7 @@ Result MiiSelector::ReceiveParameterImpl(const Service::APT::MessageParameter& p
using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = system.Kernel().CreateSharedMemoryForApplet(
framebuffer_memory = service_context.CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"MiiSelector Memory");

View File

@ -8,7 +8,6 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/mii.h"
#include "core/hle/result.h"
#include "core/hle/service/apt/apt.h"
@ -18,6 +17,10 @@ class MiiSelector;
struct MiiSelectorConfig;
} // namespace Frontend
namespace Kernel {
class KSharedMemory;
}
namespace HLE::Applets {
struct MiiConfig {
@ -79,7 +82,7 @@ private:
/// This SharedMemory will be created when we receive the LibAppJustStarted message.
/// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo
std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
Kernel::KSharedMemory* framebuffer_memory;
MiiConfig config;

View File

@ -2,10 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/mint.h"
#include "core/hle/service/apt/apt.h"
#include "core/hle/kernel/k_shared_memory.h"
namespace HLE::Applets {
@ -28,7 +26,7 @@ Result Mint::ReceiveParameterImpl(const Service::APT::MessageParameter& paramete
// TODO: allocated memory never released
using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = system.Kernel().CreateSharedMemoryForApplet(
framebuffer_memory = service_context.CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"Mint Memory");

View File

@ -5,7 +5,10 @@
#pragma once
#include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
namespace Kernel {
class KSharedMemory;
}
namespace HLE::Applets {
@ -24,7 +27,7 @@ private:
/// This SharedMemory will be created when we receive the Request message.
/// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo
std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
Kernel::KSharedMemory* framebuffer_memory;
/// Parameter received by the applet on start.
std::vector<u8> startup_param;

View File

@ -10,12 +10,9 @@
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/swkbd.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/result.h"
#include "core/hle/service/gsp/gsp.h"
#include "core/hle/service/hid/hid.h"
#include "core/memory.h"
namespace HLE::Applets {
@ -32,7 +29,7 @@ Result SoftwareKeyboard::ReceiveParameterImpl(Service::APT::MessageParameter con
using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = system.Kernel().CreateSharedMemoryForApplet(
framebuffer_memory = service_context.CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"SoftwareKeyboard Memory");
@ -94,7 +91,7 @@ Result SoftwareKeyboard::Start(Service::APT::MessageParameter const& parameter)
"The size of the parameter (SoftwareKeyboardConfig) is wrong");
std::memcpy(&config, parameter.buffer.data(), parameter.buffer.size());
text_memory = std::static_pointer_cast<Kernel::SharedMemory, Kernel::Object>(parameter.object);
text_memory = parameter.object->DynamicCast<Kernel::KSharedMemory*>();
DrawScreenKeyboard();

View File

@ -9,7 +9,6 @@
#include "common/common_types.h"
#include "core/frontend/applets/swkbd.h"
#include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/result.h"
#include "core/hle/service/apt/apt.h"
@ -195,10 +194,10 @@ private:
/// This SharedMemory will be created when we receive the LibAppJustStarted message.
/// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo
std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
Kernel::KSharedMemory* framebuffer_memory;
/// SharedMemory where the output text will be stored
std::shared_ptr<Kernel::SharedMemory> text_memory;
Kernel::KSharedMemory* text_memory;
/// Configuration of this instance of the SoftwareKeyboard, as received from the application
SoftwareKeyboardConfig config;

View File

@ -6,8 +6,6 @@
#include "common/common_types.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/thread.h"
#include "core/memory.h"
namespace IPC {

View File

@ -87,11 +87,11 @@ public:
void PushRaw(const T& value);
// TODO : ensure that translate params are added after all regular params
template <typename... O>
void PushCopyObjects(std::shared_ptr<O>... pointers);
template <typename... T>
void PushCopyObjects(T*... pointers);
template <typename... O>
void PushMoveObjects(std::shared_ptr<O>... pointers);
template <typename... T>
void PushMoveObjects(T*... pointers);
void PushStaticBuffer(std::vector<u8> buffer, u8 buffer_id);
@ -183,14 +183,14 @@ inline void RequestBuilder::PushMoveHLEHandles(H... handles) {
Push(static_cast<u32>(handles)...);
}
template <typename... O>
inline void RequestBuilder::PushCopyObjects(std::shared_ptr<O>... pointers) {
PushCopyHLEHandles(context->AddOutgoingHandle(std::move(pointers))...);
template <typename... T>
inline void RequestBuilder::PushCopyObjects(T*... pointers) {
PushCopyHLEHandles(context->AddOutgoingHandle(pointers)...);
}
template <typename... O>
inline void RequestBuilder::PushMoveObjects(std::shared_ptr<O>... pointers) {
PushMoveHLEHandles(context->AddOutgoingHandle(std::move(pointers))...);
template <typename... T>
inline void RequestBuilder::PushMoveObjects(T*... pointers) {
PushMoveHLEHandles(context->AddOutgoingHandle(pointers)...);
}
inline void RequestBuilder::PushStaticBuffer(std::vector<u8> buffer, u8 buffer_id) {
@ -241,11 +241,11 @@ public:
}
/// Equivalent to calling `PopGenericObjects<1>()[0]`.
std::shared_ptr<Kernel::Object> PopGenericObject();
Kernel::KAutoObject* PopGenericObject();
/// Equivalent to calling `std::get<0>(PopObjects<T>())`.
template <typename T>
std::shared_ptr<T> PopObject();
T* PopObject();
/**
* Pop a descriptor containing `N` handles and resolves them to Kernel::Object pointers. If a
@ -255,7 +255,7 @@ public:
* call to read 2 single-handle descriptors.
*/
template <unsigned int N>
std::array<std::shared_ptr<Kernel::Object>, N> PopGenericObjects();
std::array<Kernel::KAutoObject*, N> PopGenericObjects();
/**
* Resolves handles to Kernel::Objects as in PopGenericsObjects(), but then also casts them to
@ -263,11 +263,11 @@ public:
* not match, null is returned instead.
*/
template <typename... T>
std::tuple<std::shared_ptr<T>...> PopObjects();
std::tuple<T*...> PopObjects();
/// Convenience wrapper around PopObjects() which assigns the handles to the passed references.
template <typename... T>
void PopObjects(std::shared_ptr<T>&... pointers) {
void PopObjects(T**... pointers) {
std::tie(pointers...) = PopObjects<T...>();
}
@ -401,20 +401,20 @@ std::array<u32, N> RequestParser::PopHLEHandles() {
return handles;
}
inline std::shared_ptr<Kernel::Object> RequestParser::PopGenericObject() {
inline Kernel::KAutoObject* RequestParser::PopGenericObject() {
auto [handle] = PopHLEHandles<1>();
return context->GetIncomingHandle(handle);
}
template <typename T>
std::shared_ptr<T> RequestParser::PopObject() {
return Kernel::DynamicObjectCast<T>(PopGenericObject());
T* RequestParser::PopObject() {
return PopGenericObject()->DynamicCast<T*>();
}
template <unsigned int N>
inline std::array<std::shared_ptr<Kernel::Object>, N> RequestParser::PopGenericObjects() {
template <u32 N>
inline std::array<Kernel::KAutoObject*, N> RequestParser::PopGenericObjects() {
std::array<u32, N> handles = PopHLEHandles<N>();
std::array<std::shared_ptr<Kernel::Object>, N> pointers;
std::array<Kernel::KAutoObject*, N> pointers;
for (int i = 0; i < N; ++i) {
pointers[i] = context->GetIncomingHandle(handles[i]);
}
@ -423,15 +423,14 @@ inline std::array<std::shared_ptr<Kernel::Object>, N> RequestParser::PopGenericO
namespace detail {
template <typename... T, std::size_t... I>
std::tuple<std::shared_ptr<T>...> PopObjectsHelper(
std::array<std::shared_ptr<Kernel::Object>, sizeof...(T)>&& pointers,
std::index_sequence<I...>) {
return std::make_tuple(Kernel::DynamicObjectCast<T>(std::move(pointers[I]))...);
std::tuple<T*...> PopObjectsHelper(std::array<Kernel::KAutoObject*, sizeof...(T)>& pointers,
std::index_sequence<I...>) {
return std::make_tuple((pointers[I]->template DynamicCast<T*>())...);
}
} // namespace detail
template <typename... T>
inline std::tuple<std::shared_ptr<T>...> RequestParser::PopObjects() {
inline std::tuple<T*...> RequestParser::PopObjects() {
return detail::PopObjectsHelper<T...>(PopGenericObjects<sizeof...(T)>(),
std::index_sequence_for<T...>{});
}

View File

@ -1,220 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::AddressArbiter)
SERIALIZE_EXPORT_IMPL(Kernel::AddressArbiter::Callback)
namespace Kernel {
void AddressArbiter::WaitThread(std::shared_ptr<Thread> thread, VAddr wait_address) {
thread->wait_address = wait_address;
thread->status = ThreadStatus::WaitArb;
waiting_threads.emplace_back(std::move(thread));
}
u64 AddressArbiter::ResumeAllThreads(VAddr address) {
// Determine which threads are waiting on this address, those should be woken up.
auto itr = std::stable_partition(waiting_threads.begin(), waiting_threads.end(),
[address](const auto& thread) {
ASSERT_MSG(thread->status == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->wait_address != address;
});
// Wake up all the found threads
const u64 num_threads = std::distance(itr, waiting_threads.end());
std::for_each(itr, waiting_threads.end(), [](auto& thread) { thread->ResumeFromWait(); });
// Remove the woken up threads from the wait list.
waiting_threads.erase(itr, waiting_threads.end());
return num_threads;
}
bool AddressArbiter::ResumeHighestPriorityThread(VAddr address) {
// Determine which threads are waiting on this address, those should be considered for wakeup.
auto matches_start = std::stable_partition(
waiting_threads.begin(), waiting_threads.end(), [address](const auto& thread) {
ASSERT_MSG(thread->status == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->wait_address != address;
});
// Iterate through threads, find highest priority thread that is waiting to be arbitrated.
// Note: The real kernel will pick the first thread in the list if more than one have the
// same highest priority value. Lower priority values mean higher priority.
auto itr = std::min_element(matches_start, waiting_threads.end(),
[](const auto& lhs, const auto& rhs) {
return lhs->current_priority < rhs->current_priority;
});
if (itr == waiting_threads.end()) {
return false;
}
auto thread = *itr;
thread->ResumeFromWait();
waiting_threads.erase(itr);
return true;
}
AddressArbiter::AddressArbiter(KernelSystem& kernel)
: Object(kernel), kernel(kernel), timeout_callback(std::make_shared<Callback>(*this)) {}
AddressArbiter::~AddressArbiter() {
if (resource_limit) {
resource_limit->Release(ResourceLimitType::AddressArbiter, 1);
}
}
std::shared_ptr<AddressArbiter> KernelSystem::CreateAddressArbiter(std::string name) {
auto address_arbiter = std::make_shared<AddressArbiter>(*this);
address_arbiter->name = std::move(name);
return address_arbiter;
}
class AddressArbiter::Callback : public WakeupCallback {
public:
explicit Callback(AddressArbiter& _parent) : parent(_parent) {}
AddressArbiter& parent;
void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) override {
parent.WakeUp(reason, std::move(thread), std::move(object));
}
private:
template <class Archive>
void serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<WakeupCallback>(*this);
}
friend class boost::serialization::access;
};
void AddressArbiter::WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) {
ASSERT(reason == ThreadWakeupReason::Timeout);
// Remove the newly-awakened thread from the Arbiter's waiting list.
waiting_threads.erase(std::remove(waiting_threads.begin(), waiting_threads.end(), thread),
waiting_threads.end());
};
Result AddressArbiter::ArbitrateAddress(std::shared_ptr<Thread> thread, ArbitrationType type,
VAddr address, s32 value, u64 nanoseconds) {
switch (type) {
// Signal thread(s) waiting for arbitrate address...
case ArbitrationType::Signal: {
u64 num_threads{};
// Negative value means resume all threads
if (value < 0) {
num_threads = ResumeAllThreads(address);
} else {
// Resume first N threads
for (s32 i = 0; i < value; i++) {
num_threads += ResumeHighestPriorityThread(address);
}
}
// Prevents lag from low priority threads that spam svcArbitrateAddress and wake no threads
// The tick count is taken directly from official HOS kernel. The priority value is one less
// than official kernel as the affected FMV threads dont meet the priority threshold of 50.
// TODO: Revisit this when scheduler is rewritten and adjust if there isn't a problem there.
if (num_threads == 0 && thread->current_priority >= 49) {
kernel.current_cpu->GetTimer().AddTicks(1614u);
}
break;
}
// Wait current thread (acquire the arbiter)...
case ArbitrationType::WaitIfLessThan:
if ((s32)kernel.memory.Read32(address) < value) {
WaitThread(std::move(thread), address);
}
break;
case ArbitrationType::WaitIfLessThanWithTimeout:
if ((s32)kernel.memory.Read32(address) < value) {
thread->wakeup_callback = timeout_callback;
thread->WakeAfterDelay(nanoseconds);
WaitThread(std::move(thread), address);
}
break;
case ArbitrationType::DecrementAndWaitIfLessThan: {
s32 memory_value = kernel.memory.Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
kernel.memory.Write32(address, (s32)memory_value - 1);
WaitThread(std::move(thread), address);
}
break;
}
case ArbitrationType::DecrementAndWaitIfLessThanWithTimeout: {
s32 memory_value = kernel.memory.Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
kernel.memory.Write32(address, (s32)memory_value - 1);
thread->wakeup_callback = timeout_callback;
thread->WakeAfterDelay(nanoseconds);
WaitThread(std::move(thread), address);
}
break;
}
default:
LOG_ERROR(Kernel, "unknown type={}", type);
return ResultInvalidEnumValueFnd;
}
// The calls that use a timeout seem to always return a Timeout error even if they did not put
// the thread to sleep
if (type == ArbitrationType::WaitIfLessThanWithTimeout ||
type == ArbitrationType::DecrementAndWaitIfLessThanWithTimeout) {
return ResultTimeout;
}
return ResultSuccess;
}
template <class Archive>
void AddressArbiter::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& name;
ar& waiting_threads;
ar& timeout_callback;
ar& resource_limit;
}
SERIALIZE_IMPL(AddressArbiter)
} // namespace Kernel
namespace boost::serialization {
template <class Archive>
void save_construct_data(Archive& ar, const Kernel::AddressArbiter::Callback* t,
const unsigned int) {
ar << Kernel::SharedFrom(&t->parent);
}
template <class Archive>
void load_construct_data(Archive& ar, Kernel::AddressArbiter::Callback* t, const unsigned int) {
std::shared_ptr<Kernel::AddressArbiter> parent;
ar >> parent;
::new (t) Kernel::AddressArbiter::Callback(*parent);
}
} // namespace boost::serialization

View File

@ -1,88 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
// Address arbiters are an underlying kernel synchronization object that can be created/used via
// supervisor calls (SVCs). They function as sort of a global lock. Typically, games/other CTR
// applications use them as an underlying mechanism to implement thread-safe barriers, events, and
// semaphores.
namespace Kernel {
class Thread;
class ResourceLimit;
enum class ArbitrationType : u32 {
Signal,
WaitIfLessThan,
DecrementAndWaitIfLessThan,
WaitIfLessThanWithTimeout,
DecrementAndWaitIfLessThanWithTimeout,
};
class AddressArbiter final : public Object, public WakeupCallback {
public:
explicit AddressArbiter(KernelSystem& kernel);
~AddressArbiter() override;
std::string GetTypeName() const override {
return "Arbiter";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::AddressArbiter;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ResourceLimit> resource_limit;
std::string name; ///< Name of address arbiter object (optional)
Result ArbitrateAddress(std::shared_ptr<Thread> thread, ArbitrationType type, VAddr address,
s32 value, u64 nanoseconds);
class Callback;
private:
KernelSystem& kernel;
/// Puts the thread to wait on the specified arbitration address under this address arbiter.
void WaitThread(std::shared_ptr<Thread> thread, VAddr wait_address);
/// Resume all threads found to be waiting on the address under this address arbiter
u64 ResumeAllThreads(VAddr address);
/// Resume one thread found to be waiting on the address under this address arbiter and return
/// the resumed thread.
bool ResumeHighestPriorityThread(VAddr address);
/// Threads waiting for the address arbiter to be signaled.
std::vector<std::shared_ptr<Thread>> waiting_threads;
std::shared_ptr<Callback> timeout_callback;
void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) override;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::AddressArbiter)
BOOST_CLASS_EXPORT_KEY(Kernel::AddressArbiter::Callback)
CONSTRUCT_KERNEL_OBJECT(Kernel::AddressArbiter)

View File

@ -1,63 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/global.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h"
SERIALIZE_EXPORT_IMPL(Kernel::ClientPort)
namespace Kernel {
ClientPort::ClientPort(KernelSystem& kernel) : Object(kernel), kernel(kernel) {}
ClientPort::~ClientPort() = default;
Result ClientPort::Connect(std::shared_ptr<ClientSession>* out_client_session) {
// Note: Threads do not wait for the server endpoint to call
// AcceptSession before returning from this call.
R_UNLESS(active_sessions < max_sessions, ResultMaxConnectionsReached);
active_sessions++;
// Create a new session pair, let the created sessions inherit the parent port's HLE handler.
auto [server, client] = kernel.CreateSessionPair(server_port->GetName(), SharedFrom(this));
if (server_port->hle_handler) {
server_port->hle_handler->ClientConnected(server);
} else {
server_port->pending_sessions.push_back(server);
}
// Wake the threads waiting on the ServerPort
server_port->WakeupAllWaitingThreads();
*out_client_session = client;
return ResultSuccess;
}
void ClientPort::ConnectionClosed() {
ASSERT(active_sessions > 0);
--active_sessions;
}
template <class Archive>
void ClientPort::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& server_port;
ar& max_sessions;
ar& active_sessions;
ar& name;
}
SERIALIZE_IMPL(ClientPort)
} // namespace Kernel

View File

@ -1,73 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/result.h"
namespace Kernel {
class ClientSession;
class ClientPort final : public Object {
public:
explicit ClientPort(KernelSystem& kernel);
~ClientPort() override;
friend class ServerPort;
std::string GetTypeName() const override {
return "ClientPort";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ClientPort;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ServerPort> GetServerPort() const {
return server_port;
}
/**
* Creates a new Session pair, adds the created ServerSession to the associated ServerPort's
* list of pending sessions, and signals the ServerPort, causing any threads
* waiting on it to awake.
* @returns ClientSession The client endpoint of the created Session pair, or error code.
*/
Result Connect(std::shared_ptr<ClientSession>* out_client_session);
/**
* Signifies that a previously active connection has been closed,
* decreasing the total number of active connections to this port.
*/
void ConnectionClosed();
private:
KernelSystem& kernel;
std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
u32 active_sessions = 0; ///< Number of currently open sessions to this port
std::string name; ///< Name of client port (optional)
friend class KernelSystem;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::ClientPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::ClientPort)

View File

@ -1,67 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::ClientSession)
namespace Kernel {
ClientSession::ClientSession(KernelSystem& kernel) : Object(kernel) {}
ClientSession::~ClientSession() {
// This destructor will be called automatically when the last ClientSession handle is closed by
// the emulated application.
// Local references to ServerSession and SessionRequestHandler are necessary to guarantee they
// will be kept alive until after ClientDisconnected() returns.
std::shared_ptr<ServerSession> server = SharedFrom(parent->server);
if (server) {
std::shared_ptr<SessionRequestHandler> hle_handler = server->hle_handler;
if (hle_handler)
hle_handler->ClientDisconnected(server);
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
server->pending_requesting_threads.clear();
server->currently_handling = nullptr;
}
parent->client = nullptr;
if (server) {
// Notify any threads waiting on the ServerSession that the endpoint has been closed. Note
// that this call has to happen after `Session::client` has been set to nullptr to let the
// ServerSession know that the client endpoint has been closed.
server->WakeupAllWaitingThreads();
}
}
Result ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread) {
// Keep ServerSession alive until we're done working with it.
std::shared_ptr<ServerSession> server = SharedFrom(parent->server);
R_UNLESS(server, ResultSessionClosed);
// Signal the server session that new data is available
return server->HandleSyncRequest(std::move(thread));
}
template <class Archive>
void ClientSession::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& name;
ar& parent;
}
SERIALIZE_IMPL(ClientSession)
} // namespace Kernel

View File

@ -1,60 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
class Session;
class Thread;
class ClientSession final : public Object {
public:
explicit ClientSession(KernelSystem& kernel);
~ClientSession() override;
friend class KernelSystem;
std::string GetTypeName() const override {
return "ClientSession";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ClientSession;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/**
* Sends an SyncRequest from the current emulated thread.
* @param thread Thread that initiated the request.
* @return Result of the operation.
*/
Result SendSyncRequest(std::shared_ptr<Thread> thread);
std::string name; ///< Name of client port (optional)
/// The parent session, which links to the server endpoint.
std::shared_ptr<Session> parent;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::ClientSession)
CONSTRUCT_KERNEL_OBJECT(Kernel::ClientSession)

View File

@ -16,6 +16,7 @@ enum {
OutOfEvents = 15,
OutOfTimers = 16,
OutOfHandles = 19,
ProcessNotFound = 24,
SessionClosedByRemote = 26,
PortNameTooLong = 30,
WrongLockingThread = 31,
@ -109,5 +110,8 @@ constexpr Result ResultTimeout(ErrorDescription::Timeout, ErrorModule::OS,
constexpr Result ResultNoPendingSessions(ErrCodes::NoPendingSessions, ErrorModule::OS,
ErrorSummary::WouldBlock,
ErrorLevel::Permanent); // 0xD8401823
constexpr Result ResultProcessNotFound(ErrCodes::ProcessNotFound, ErrorModule::OS,
ErrorSummary::WrongArgument,
ErrorLevel::Permanent); // 0xD9001818
} // namespace Kernel

View File

@ -1,73 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::Event)
namespace Kernel {
Event::Event(KernelSystem& kernel) : WaitObject(kernel) {}
Event::~Event() {
if (resource_limit) {
resource_limit->Release(ResourceLimitType::Event, 1);
}
}
std::shared_ptr<Event> KernelSystem::CreateEvent(ResetType reset_type, std::string name) {
auto event = std::make_shared<Event>(*this);
event->signaled = false;
event->reset_type = reset_type;
event->name = std::move(name);
return event;
}
bool Event::ShouldWait(const Thread* thread) const {
return !signaled;
}
void Event::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
if (reset_type == ResetType::OneShot) {
signaled = false;
}
}
void Event::Signal() {
signaled = true;
WakeupAllWaitingThreads();
}
void Event::Clear() {
signaled = false;
}
void Event::WakeupAllWaitingThreads() {
WaitObject::WakeupAllWaitingThreads();
if (reset_type == ResetType::Pulse) {
signaled = false;
}
}
template <class Archive>
void Event::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<WaitObject>(*this);
ar& reset_type;
ar& signaled;
ar& name;
ar& resource_limit;
}
SERIALIZE_IMPL(Event)
} // namespace Kernel

View File

@ -1,64 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/wait_object.h"
namespace Kernel {
class Event final : public WaitObject {
public:
explicit Event(KernelSystem& kernel);
~Event() override;
std::string GetTypeName() const override {
return "Event";
}
std::string GetName() const override {
return name;
}
void SetName(const std::string& name_) {
name = name_;
}
static constexpr HandleType HANDLE_TYPE = HandleType::Event;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
ResetType GetResetType() const {
return reset_type;
}
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
void WakeupAllWaitingThreads() override;
void Signal();
void Clear();
std::shared_ptr<ResourceLimit> resource_limit;
private:
ResetType reset_type; ///< Current ResetType
bool signaled; ///< Whether the event has already been signaled
std::string name; ///< Name of event (optional)
friend class KernelSystem;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::Event)
CONSTRUCT_KERNEL_OBJECT(Kernel::Event)

View File

@ -1,111 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <utility>
#include <boost/serialization/array.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::HandleTable)
namespace Kernel {
namespace {
constexpr u16 GetSlot(Handle handle) {
return handle >> 15;
}
constexpr u16 GetGeneration(Handle handle) {
return handle & 0x7FFF;
}
} // Anonymous namespace
HandleTable::HandleTable(KernelSystem& kernel) : kernel(kernel) {
next_generation = 1;
Clear();
}
HandleTable::~HandleTable() = default;
Result HandleTable::Create(Handle* out_handle, std::shared_ptr<Object> obj) {
DEBUG_ASSERT(obj != nullptr);
u16 slot = next_free_slot;
R_UNLESS(slot < generations.size(), ResultOutOfHandles);
next_free_slot = generations[slot];
u16 generation = next_generation++;
// Overflow count so it fits in the 15 bits dedicated to the generation in the handle.
// CTR-OS doesn't use generation 0, so skip straight to 1.
if (next_generation >= (1 << 15)) {
next_generation = 1;
}
generations[slot] = generation;
objects[slot] = std::move(obj);
*out_handle = generation | (slot << 15);
return ResultSuccess;
}
Result HandleTable::Duplicate(Handle* out_handle, Handle handle) {
std::shared_ptr<Object> object = GetGeneric(handle);
R_UNLESS(object, ResultInvalidHandle);
return Create(out_handle, std::move(object));
}
Result HandleTable::Close(Handle handle) {
R_UNLESS(IsValid(handle), ResultInvalidHandle);
const u16 slot = GetSlot(handle);
objects[slot] = nullptr;
generations[slot] = next_free_slot;
next_free_slot = slot;
return ResultSuccess;
}
bool HandleTable::IsValid(Handle handle) const {
const u16 slot = GetSlot(handle);
const u16 generation = GetGeneration(handle);
return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation;
}
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
if (handle == CurrentThread) {
return SharedFrom(kernel.GetCurrentThreadManager().GetCurrentThread());
} else if (handle == CurrentProcess) {
return kernel.GetCurrentProcess();
}
if (!IsValid(handle)) {
return nullptr;
}
return objects[GetSlot(handle)];
}
void HandleTable::Clear() {
for (u16 i = 0; i < MAX_COUNT; ++i) {
generations[i] = i + 1;
objects[i] = nullptr;
}
next_free_slot = 0;
}
template <class Archive>
void HandleTable::serialize(Archive& ar, const unsigned int) {
ar& objects;
ar& generations;
ar& next_generation;
ar& next_free_slot;
}
SERIALIZE_IMPL(HandleTable)
} // namespace Kernel

View File

@ -1,129 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <cstddef>
#include <memory>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
enum KernelHandle : Handle {
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
};
/**
* This class allows the creation of Handles, which are references to objects that can be tested
* for validity and looked up. Here they are used to pass references to kernel objects to/from the
* emulated process. it has been designed so that it follows the same handle format and has
* approximately the same restrictions as the handle manager in the CTR-OS.
*
* Handles contain two sub-fields: a slot index (bits 31:15) and a generation value (bits 14:0).
* The slot index is used to index into the arrays in this class to access the data corresponding
* to the Handle.
*
* To prevent accidental use of a freed Handle whose slot has already been reused, a global counter
* is kept and incremented every time a Handle is created. This is the Handle's "generation". The
* value of the counter is stored into the Handle as well as in the handle table (in the
* "generations" array). When looking up a handle, the Handle's generation must match with the
* value stored on the class, otherwise the Handle is considered invalid.
*
* To find free slots when allocating a Handle without needing to scan the entire object array, the
* generations field of unallocated slots is re-purposed as a linked list of indices to free slots.
* When a Handle is created, an index is popped off the list and used for the new Handle. When it
* is destroyed, it is again pushed onto the list to be re-used by the next allocation. It is
* likely that this allocation strategy differs from the one used in CTR-OS, but this hasn't been
* verified and isn't likely to cause any problems.
*/
class HandleTable final : NonCopyable {
public:
explicit HandleTable(KernelSystem& kernel);
~HandleTable();
/**
* Allocates a handle for the given object.
* @return The created Handle or one of the following errors:
* - `ResultOutOfHandles`: the maximum number of handles has been exceeded.
*/
Result Create(Handle* out_handle, std::shared_ptr<Object> obj);
/**
* Returns a new handle that points to the same object as the passed in handle.
* @return The duplicated Handle or one of the following errors:
* - `ResultInvalidHandle`: an invalid handle was passed in.
* - Any errors returned by `Create()`.
*/
Result Duplicate(Handle* out_handle, Handle handle);
/**
* Closes a handle, removing it from the table and decreasing the object's ref-count.
* @return `ResultSuccess` or one of the following errors:
* - `ResultInvalidHandle`: an invalid handle was passed in.
*/
Result Close(Handle handle);
/// Checks if a handle is valid and points to an existing object.
bool IsValid(Handle handle) const;
/**
* Looks up a handle.
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid.
*/
std::shared_ptr<Object> GetGeneric(Handle handle) const;
/**
* Looks up a handle while verifying its type.
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid or its
* type differs from the requested one.
*/
template <class T>
std::shared_ptr<T> Get(Handle handle) const {
return DynamicObjectCast<T>(GetGeneric(handle));
}
/// Closes all handles held in this table.
void Clear();
private:
/**
* This is the maximum limit of handles allowed per process in CTR-OS. It can be further
* reduced by ExHeader values, but this is not emulated here.
*/
static const std::size_t MAX_COUNT = 4096;
/// Stores the Object referenced by the handle or null if the slot is empty.
std::array<std::shared_ptr<Object>, MAX_COUNT> objects;
/**
* The value of `next_generation` when the handle was created, used to check for validity. For
* empty slots, contains the index of the next free slot in the list.
*/
std::array<u16, MAX_COUNT> generations;
/**
* Global counter of the number of created handles. Stored in `generations` when a handle is
* created, and wraps around to 1 when it hits 0x8000.
*/
u16 next_generation;
/// Head of the free slots linked list.
u16 next_free_slot;
KernelSystem& kernel;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::HandleTable)
CONSTRUCT_KERNEL_OBJECT(Kernel::HandleTable)

View File

@ -12,12 +12,12 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "core/core.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
SERIALIZE_EXPORT_IMPL(Kernel::SessionRequestHandler)
SERIALIZE_EXPORT_IMPL(Kernel::SessionRequestHandler::SessionDataBase)
@ -33,15 +33,13 @@ public:
ThreadCallback(std::shared_ptr<HLERequestContext> context_,
std::shared_ptr<HLERequestContext::WakeupCallback> callback_)
: callback(std::move(callback_)), context(std::move(context_)) {}
void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) {
ASSERT(thread->status == ThreadStatus::WaitHleEvent);
void WakeUp(ThreadWakeupReason reason, KThread* thread, KSynchronizationObject* object) {
ASSERT(thread->m_status == ThreadStatus::WaitHleEvent);
if (callback) {
callback->WakeUp(thread, *context, reason);
}
auto process = thread->owner_process.lock();
ASSERT(process);
Process* process = thread->GetOwner();
// We must copy the entire command buffer *plus* the entire static buffers area, since
// the translation might need to read from it in order to retrieve the StaticBuffer
@ -70,16 +68,16 @@ private:
friend class boost::serialization::access;
};
SessionRequestHandler::SessionInfo::SessionInfo(std::shared_ptr<ServerSession> session,
SessionRequestHandler::SessionInfo::SessionInfo(KServerSession* session_,
std::unique_ptr<SessionDataBase> data)
: session(std::move(session)), data(std::move(data)) {}
: session(session_), data(std::move(data)) {}
void SessionRequestHandler::ClientConnected(std::shared_ptr<ServerSession> server_session) {
void SessionRequestHandler::ClientConnected(KServerSession* server_session) {
server_session->SetHleHandler(shared_from_this());
connected_sessions.emplace_back(std::move(server_session), MakeSessionData());
connected_sessions.emplace_back(server_session, MakeSessionData());
}
void SessionRequestHandler::ClientDisconnected(std::shared_ptr<ServerSession> server_session) {
void SessionRequestHandler::ClientDisconnected(KServerSession* server_session) {
server_session->SetHleHandler(nullptr);
connected_sessions.erase(
std::remove_if(connected_sessions.begin(), connected_sessions.end(),
@ -104,40 +102,46 @@ void SessionRequestHandler::SessionInfo::serialize(Archive& ar, const unsigned i
}
SERIALIZE_IMPL(SessionRequestHandler::SessionInfo)
std::shared_ptr<Event> HLERequestContext::SleepClientThread(
const std::string& reason, std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback) {
KEvent* HLERequestContext::SleepClientThread(const std::string& reason,
std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback) {
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
thread->wakeup_callback = std::make_shared<ThreadCallback>(shared_from_this(), callback);
thread->m_wakeup_callback = std::make_shared<ThreadCallback>(shared_from_this(), callback);
auto event = kernel.CreateEvent(Kernel::ResetType::OneShot, "HLE Pause Event: " + reason);
thread->status = ThreadStatus::WaitHleEvent;
thread->wait_objects = {event};
// Create pause event.
auto* event = KEvent::Create(kernel);
event->Initialize(nullptr, ResetType::OneShot);
event->SetName("HLE Pause Event: " + reason);
KEvent::Register(kernel, event);
// Add the event to the list of objects the thread is waiting for.
thread->m_status = ThreadStatus::WaitHleEvent;
thread->m_wait_objects = {event};
event->AddWaitingThread(thread);
if (timeout.count() > 0)
if (timeout.count() > 0) {
thread->WakeAfterDelay(timeout.count());
}
return event;
}
HLERequestContext::HLERequestContext() : kernel(Core::Global<KernelSystem>()) {}
HLERequestContext::HLERequestContext(KernelSystem& kernel, std::shared_ptr<ServerSession> session,
std::shared_ptr<Thread> thread)
: kernel(kernel), session(std::move(session)), thread(thread) {
HLERequestContext::HLERequestContext(KernelSystem& kernel, KServerSession* session, KThread* thread)
: kernel(kernel), session(session), thread(thread) {
cmd_buf[0] = 0;
}
HLERequestContext::~HLERequestContext() = default;
std::shared_ptr<Object> HLERequestContext::GetIncomingHandle(u32 id_from_cmdbuf) const {
KAutoObject* HLERequestContext::GetIncomingHandle(u32 id_from_cmdbuf) const {
ASSERT(id_from_cmdbuf < request_handles.size());
return request_handles[id_from_cmdbuf];
}
u32 HLERequestContext::AddOutgoingHandle(std::shared_ptr<Object> object) {
request_handles.push_back(std::move(object));
u32 HLERequestContext::AddOutgoingHandle(KAutoObject* object) {
request_handles.push_back(object);
return static_cast<u32>(request_handles.size() - 1);
}
@ -154,8 +158,7 @@ void HLERequestContext::AddStaticBuffer(u8 buffer_id, std::vector<u8> data) {
}
Result HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf,
std::shared_ptr<Process> src_process_) {
auto& src_process = *src_process_;
Process* src_process) {
IPC::Header header{src_cmdbuf[0]};
std::size_t untranslated_size = 1u + header.normal_params_size;
@ -179,25 +182,32 @@ Result HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cm
switch (IPC::GetDescriptorType(descriptor)) {
case IPC::DescriptorType::CopyHandle:
case IPC::DescriptorType::MoveHandle: {
u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
const u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
auto& src_handle_table = src_process->handle_table;
ASSERT(i + num_handles <= command_size); // TODO(yuriks): Return error
for (u32 j = 0; j < num_handles; ++j) {
Handle handle = src_cmdbuf[i];
std::shared_ptr<Object> object = nullptr;
if (handle != 0) {
object = src_process.handle_table.GetGeneric(handle);
ASSERT(object != nullptr); // TODO(yuriks): Return error
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_process.handle_table.Close(handle);
}
const Handle handle = src_cmdbuf[i];
if (!handle) {
cmd_buf[i++] = AddOutgoingHandle(nullptr);
continue;
}
cmd_buf[i++] = AddOutgoingHandle(std::move(object));
// Get object from the handle table.
KScopedAutoObject object =
src_handle_table.GetObjectForIpcWithoutPseudoHandle(handle);
ASSERT(object.IsNotNull());
// If we are moving, remove the old handle.
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_handle_table.Remove(handle);
}
cmd_buf[i++] = AddOutgoingHandle(object.GetPointerUnsafe());
}
break;
}
case IPC::DescriptorType::CallingPid: {
cmd_buf[i++] = src_process.process_id;
cmd_buf[i++] = src_process->process_id;
break;
}
case IPC::DescriptorType::StaticBuffer: {
@ -206,7 +216,7 @@ Result HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cm
// Copy the input buffer into our own vector and store it.
std::vector<u8> data(buffer_info.size);
kernel.memory.ReadBlock(src_process, source_address, data.data(), data.size());
kernel.memory.ReadBlock(*src_process, source_address, data.data(), data.size());
AddStaticBuffer(buffer_info.buffer_id, std::move(data));
cmd_buf[i++] = source_address;
@ -214,7 +224,7 @@ Result HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cm
}
case IPC::DescriptorType::MappedBuffer: {
u32 next_id = static_cast<u32>(request_mapped_buffers.size());
request_mapped_buffers.emplace_back(kernel.memory, src_process_, descriptor,
request_mapped_buffers.emplace_back(kernel.memory, src_process, descriptor,
src_cmdbuf[i], next_id);
cmd_buf[i++] = next_id;
break;
@ -259,14 +269,13 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf,
case IPC::DescriptorType::CopyHandle:
case IPC::DescriptorType::MoveHandle: {
// HLE services don't use handles, so we treat both CopyHandle and MoveHandle equally
u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
const u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
ASSERT(i + num_handles <= command_size);
for (u32 j = 0; j < num_handles; ++j) {
std::shared_ptr<Object> object = GetIncomingHandle(cmd_buf[i]);
KAutoObject* object = GetIncomingHandle(cmd_buf[i]);
Handle handle = 0;
if (object != nullptr) {
// TODO(yuriks): Figure out the proper error handling for if this fails
R_ASSERT(dst_process.handle_table.Create(std::addressof(handle), object));
dst_process.handle_table.Add(std::addressof(handle), object);
}
dst_cmdbuf[i++] = handle;
}
@ -327,7 +336,7 @@ void HLERequestContext::serialize(Archive& ar, const unsigned int) {
ar& cmd_buf;
ar& session;
ar& thread;
ar& request_handles;
// ar& request_handles;
ar& static_buffers;
ar& request_mapped_buffers;
}
@ -335,8 +344,8 @@ SERIALIZE_IMPL(HLERequestContext)
MappedBuffer::MappedBuffer() : memory(&Core::Global<Core::System>().Memory()) {}
MappedBuffer::MappedBuffer(Memory::MemorySystem& memory, std::shared_ptr<Process> process,
u32 descriptor, VAddr address, u32 id)
MappedBuffer::MappedBuffer(Memory::MemorySystem& memory, Process* process, u32 descriptor,
VAddr address, u32 id)
: memory(&memory), id(id), address(address), process(std::move(process)) {
IPC::MappedBufferDescInfo desc{descriptor};
size = desc.size;

View File

@ -17,8 +17,8 @@
#include "common/serialization/boost_small_vector.hpp"
#include "common/swap.h"
#include "core/hle/ipc.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
namespace Service {
class ServiceFrameworkBase;
@ -32,8 +32,8 @@ namespace Kernel {
class HandleTable;
class Process;
class Thread;
class Event;
class KThread;
class KEvent;
class HLERequestContext;
class KernelSystem;
@ -58,14 +58,14 @@ public:
* associated ServerSession alive for the duration of the connection.
* @param server_session Owning pointer to the ServerSession associated with the connection.
*/
virtual void ClientConnected(std::shared_ptr<ServerSession> server_session);
virtual void ClientConnected(KServerSession* server_session);
/**
* Signals that a client has just disconnected from this HLE handler and releases the
* associated ServerSession.
* @param server_session ServerSession associated with the connection.
*/
virtual void ClientDisconnected(std::shared_ptr<ServerSession> server_session);
virtual void ClientDisconnected(KServerSession* server_session);
/// Empty placeholder structure for services with no per-session data. The session data classes
/// in each service must inherit from this.
@ -79,9 +79,9 @@ public:
};
struct SessionInfo {
SessionInfo(std::shared_ptr<ServerSession> session, std::unique_ptr<SessionDataBase> data);
SessionInfo(KServerSession* session, std::unique_ptr<SessionDataBase> data);
std::shared_ptr<ServerSession> session;
KServerSession* session;
std::unique_ptr<SessionDataBase> data;
private:
@ -97,7 +97,7 @@ protected:
/// Returns the session data associated with the server session.
template <typename T>
T* GetSessionData(std::shared_ptr<ServerSession> session) {
T* GetSessionData(KServerSession* session) {
static_assert(std::is_base_of<SessionDataBase, T>(),
"T is not a subclass of SessionDataBase");
auto itr = std::find_if(connected_sessions.begin(), connected_sessions.end(),
@ -120,8 +120,8 @@ private:
class MappedBuffer {
public:
MappedBuffer(Memory::MemorySystem& memory, std::shared_ptr<Process> process, u32 descriptor,
VAddr address, u32 id);
MappedBuffer(Memory::MemorySystem& memory, Process* process, u32 descriptor, VAddr address,
u32 id);
// interface for service
void Read(void* dest_buffer, std::size_t offset, std::size_t size);
@ -144,7 +144,7 @@ private:
Memory::MemorySystem* memory;
u32 id;
VAddr address;
std::shared_ptr<Process> process;
Process* process;
u32 size;
IPC::MappedBufferPermissions perms;
@ -192,8 +192,7 @@ private:
*/
class HLERequestContext : public std::enable_shared_from_this<HLERequestContext> {
public:
HLERequestContext(KernelSystem& kernel, std::shared_ptr<ServerSession> session,
std::shared_ptr<Thread> thread);
explicit HLERequestContext(KernelSystem& kernel, KServerSession* session, KThread* thread);
~HLERequestContext();
/// Returns a pointer to the IPC command buffer for this request.
@ -210,21 +209,21 @@ public:
* Returns the session through which this request was made. This can be used as a map key to
* access per-client data on services.
*/
std::shared_ptr<ServerSession> Session() const {
KServerSession* Session() const {
return session;
}
/**
* Returns the client thread that made the service request.
*/
std::shared_ptr<Thread> ClientThread() const {
KThread* ClientThread() const {
return thread;
}
class WakeupCallback {
public:
virtual ~WakeupCallback() = default;
virtual void WakeUp(std::shared_ptr<Thread> thread, HLERequestContext& context,
virtual void WakeUp(KThread* thread, HLERequestContext& context,
ThreadWakeupReason reason) = 0;
private:
@ -244,9 +243,8 @@ public:
* was called.
* @returns Event that when signaled will resume the thread and call the callback function.
*/
std::shared_ptr<Event> SleepClientThread(const std::string& reason,
std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback);
KEvent* SleepClientThread(const std::string& reason, std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback);
private:
template <typename ResultFunctor>
@ -257,7 +255,7 @@ private:
future = std::move(fut);
}
void WakeUp(std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
void WakeUp(Kernel::KThread* thread, Kernel::HLERequestContext& ctx,
Kernel::ThreadWakeupReason reason) {
functor(ctx);
}
@ -322,13 +320,13 @@ public:
* Resolves a object id from the request command buffer into a pointer to an object. See the
* "HLE handle protocol" section in the class documentation for more details.
*/
std::shared_ptr<Object> GetIncomingHandle(u32 id_from_cmdbuf) const;
KAutoObject* GetIncomingHandle(u32 id_from_cmdbuf) const;
/**
* Adds an outgoing object to the response, returning the id which should be used to reference
* it. See the "HLE handle protocol" section in the class documentation for more details.
*/
u32 AddOutgoingHandle(std::shared_ptr<Object> object);
u32 AddOutgoingHandle(KAutoObject* object);
/**
* Discards all Objects from the context, invalidating all ids. This may be called after reading
@ -356,8 +354,8 @@ public:
MappedBuffer& GetMappedBuffer(u32 id_from_cmdbuf);
/// Populates this context with data from the requesting process/thread.
Result PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf,
std::shared_ptr<Process> src_process);
Result PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf, Process* src_process);
/// Writes data from this context back to the requesting process/thread.
Result WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, Process& dst_process) const;
@ -370,10 +368,10 @@ public:
private:
KernelSystem& kernel;
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
std::shared_ptr<ServerSession> session;
std::shared_ptr<Thread> thread;
KServerSession* session;
KThread* thread;
// TODO(yuriks): Check common usage of this and optimize size accordingly
boost::container::small_vector<std::shared_ptr<Object>, 8> request_handles;
boost::container::small_vector<KAutoObject*, 8> request_handles;
// The static buffers will be created when the IPC request is translated.
std::array<std::vector<u8>, IPC::MAX_STATIC_BUFFERS> static_buffers;
// The mapped buffers will be created when the IPC request is translated

View File

@ -9,13 +9,13 @@
#include "common/memory_ref.h"
#include "core/core.h"
#include "core/hle/ipc.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/ipc.h"
#include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::MappedBufferContext)
@ -23,12 +23,11 @@ SERIALIZE_EXPORT_IMPL(Kernel::MappedBufferContext)
namespace Kernel {
Result TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySystem& memory,
std::shared_ptr<Thread> src_thread,
std::shared_ptr<Thread> dst_thread, VAddr src_address,
KThread* src_thread, KThread* dst_thread, VAddr src_address,
VAddr dst_address,
std::vector<MappedBufferContext>& mapped_buffer_context, bool reply) {
auto src_process = src_thread->owner_process.lock();
auto dst_process = dst_thread->owner_process.lock();
auto src_process = src_thread->GetOwner();
auto dst_process = dst_thread->GetOwner();
ASSERT(src_process && dst_process);
IPC::Header header;
@ -69,30 +68,34 @@ Result TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySystem
for (u32 j = 0; j < num_handles; ++j) {
Handle handle = cmd_buf[i];
std::shared_ptr<Object> object = nullptr;
// Perform pseudo-handle detection here because by the time this function is called,
// the current thread and process are no longer the ones which created this IPC
// request, but the ones that are handling it.
if (handle == CurrentThread) {
object = src_thread;
} else if (handle == CurrentProcess) {
object = src_process;
} else if (handle != 0) {
object = src_process->handle_table.GetGeneric(handle);
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_process->handle_table.Close(handle);
KScopedAutoObject object = [&]() -> KScopedAutoObject<KAutoObject> {
if (handle == CurrentThread) {
return src_thread;
} else if (handle == CurrentProcess) {
return src_process;
} else if (handle != 0) {
auto obj = src_process->handle_table.GetObject(handle);
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_process->handle_table.Remove(handle);
}
return obj;
}
}
return nullptr;
}();
if (object == nullptr) {
if (object.IsNull()) {
// Note: The real kernel sets invalid translated handles to 0 in the target
// command buffer.
cmd_buf[i++] = 0;
continue;
}
R_ASSERT(dst_process->handle_table.Create(std::addressof(cmd_buf[i++]),
std::move(object)));
Handle dst_handle = 0;
dst_process->handle_table.Add(&dst_handle, object.GetPointerUnsafe());
cmd_buf[i++] = dst_handle;
}
break;
}

View File

@ -4,12 +4,11 @@
#pragma once
#include <memory>
#include <vector>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "common/memory_ref.h"
#include "core/hle/ipc.h"
#include "core/hle/kernel/thread.h"
namespace Memory {
class MemorySystem;
@ -18,6 +17,7 @@ class MemorySystem;
namespace Kernel {
class KernelSystem;
class KThread;
struct MappedBufferContext {
IPC::MappedBufferPermissions permissions;
@ -35,8 +35,7 @@ private:
/// Performs IPC command buffer translation from one process to another.
Result TranslateCommandBuffer(KernelSystem& system, Memory::MemorySystem& memory,
std::shared_ptr<Thread> src_thread,
std::shared_ptr<Thread> dst_thread, VAddr src_address,
KThread* src_thread, KThread* dst_thread, VAddr src_address,
VAddr dst_address,
std::vector<MappedBufferContext>& mapped_buffer_context, bool reply);
} // namespace Kernel

View File

@ -4,73 +4,80 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "common/scope_exit.h"
#include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/service/service.h"
namespace IPCDebugger {
namespace {
ObjectInfo GetObjectInfo(const Kernel::Object* object) {
ObjectInfo GetObjectInfo(const Kernel::KAutoObject* object) {
if (object == nullptr) {
return {};
}
return {object->GetTypeName(), object->GetName(), static_cast<int>(object->GetObjectId())};
return {object->GetTypeName(), /*object->GetName()*/ "KAutoObject",
/*static_cast<int>(object->GetObjectId())*/ 1};
}
ObjectInfo GetObjectInfo(const Kernel::Thread* thread) {
ObjectInfo GetObjectInfo(const Kernel::KThread* thread) {
if (thread == nullptr) {
return {};
}
return {thread->GetTypeName(), thread->GetName(), static_cast<int>(thread->GetThreadId())};
return {thread->GetTypeName(), /*thread->GetName()*/ "KThread",
/*static_cast<int>(object->GetObjectId())*/ 1};
}
ObjectInfo GetObjectInfo(const Kernel::Process* process) {
if (process == nullptr) {
return {};
}
return {process->GetTypeName(), process->GetName(), static_cast<int>(process->process_id)};
return {process->GetTypeName(), /*process->GetName()*/ "KProcess",
static_cast<int>(process->process_id)};
}
} // namespace
} // Anonymous namespace
Recorder::Recorder() = default;
Recorder::~Recorder() = default;
bool Recorder::IsEnabled() const {
return enabled.load(std::memory_order_relaxed);
}
void Recorder::RegisterRequest(const std::shared_ptr<Kernel::ClientSession>& client_session,
const std::shared_ptr<Kernel::Thread>& client_thread) {
void Recorder::RegisterRequest(const Kernel::KClientSession* client_session,
const Kernel::KThread* client_thread) {
const u32 thread_id = client_thread->GetThreadId();
const RequestRecord record = {
.id = ++record_count,
.status = RequestStatus::Sent,
.client_process = GetObjectInfo(client_thread->GetOwner()),
.client_thread = GetObjectInfo(client_thread),
.client_session = GetObjectInfo(client_session),
.client_port = GetObjectInfo(client_session->GetParent()->GetParent()),
.server_process = {},
.server_thread = {},
.server_session = GetObjectInfo(&client_session->GetParent()->GetServerSession()),
};
if (auto owner_process = client_thread->owner_process.lock()) {
RequestRecord record = {/* id */ ++record_count,
/* status */ RequestStatus::Sent,
/* client_process */ GetObjectInfo(owner_process.get()),
/* client_thread */ GetObjectInfo(client_thread.get()),
/* client_session */ GetObjectInfo(client_session.get()),
/* client_port */ GetObjectInfo(client_session->parent->port.get()),
/* server_process */ {},
/* server_thread */ {},
/* server_session */ GetObjectInfo(client_session->parent->server)};
record_map.insert_or_assign(thread_id, std::make_unique<RequestRecord>(record));
client_session_map.insert_or_assign(thread_id, client_session);
InvokeCallbacks(record);
}
record_map.insert_or_assign(thread_id, std::make_unique<RequestRecord>(record));
client_session_map.insert_or_assign(thread_id, client_session);
InvokeCallbacks(record);
}
void Recorder::SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
void Recorder::SetRequestInfo(const Kernel::KThread* client_thread,
std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf,
const std::shared_ptr<Kernel::Thread>& server_thread) {
const Kernel::KThread* server_thread) {
const u32 thread_id = client_thread->GetThreadId();
if (!record_map.count(thread_id)) {
// This is possible when the recorder is enabled after application started
@ -84,30 +91,34 @@ void Recorder::SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thre
record.translated_request_cmdbuf = std::move(translated_cmdbuf);
if (server_thread) {
if (auto owner_process = server_thread->owner_process.lock()) {
record.server_process = GetObjectInfo(owner_process.get());
}
record.server_thread = GetObjectInfo(server_thread.get());
record.server_process = GetObjectInfo(server_thread->GetOwner());
record.server_thread = GetObjectInfo(server_thread);
} else {
record.is_hle = true;
}
// Function name
ASSERT_MSG(client_session_map.count(thread_id), "Client session is missing");
const auto& client_session = client_session_map[thread_id];
if (client_session->parent->port &&
client_session->parent->port->GetServerPort()->hle_handler) {
const auto client_session = client_session_map[thread_id];
record.function_name = std::dynamic_pointer_cast<Service::ServiceFrameworkBase>(
client_session->parent->port->GetServerPort()->hle_handler)
SCOPE_EXIT({
client_session_map.erase(thread_id);
InvokeCallbacks(record);
});
auto port = client_session->GetParent()->GetParent();
if (!port) {
return;
}
auto hle_handler = port->GetParent()->GetServerPort().GetHleHandler();
if (hle_handler) {
record.function_name = std::dynamic_pointer_cast<Service::ServiceFrameworkBase>(hle_handler)
->GetFunctionName({record.untranslated_request_cmdbuf[0]});
}
client_session_map.erase(thread_id);
InvokeCallbacks(record);
}
void Recorder::SetReplyInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
void Recorder::SetReplyInfo(const Kernel::KThread* client_thread,
std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf) {
const u32 thread_id = client_thread->GetThreadId();
@ -129,7 +140,7 @@ void Recorder::SetReplyInfo(const std::shared_ptr<Kernel::Thread>& client_thread
record_map.erase(thread_id);
}
void Recorder::SetHLEUnimplemented(const std::shared_ptr<Kernel::Thread>& client_thread) {
void Recorder::SetHLEUnimplemented(const Kernel::KThread* client_thread) {
const u32 thread_id = client_thread->GetThreadId();
if (!record_map.count(thread_id)) {
// This is possible when the recorder is enabled after application started

View File

@ -15,8 +15,9 @@
#include "common/common_types.h"
namespace Kernel {
class ClientSession;
class Thread;
class KClientSession;
class KThread;
enum class ClassTokenType : u32;
} // namespace Kernel
namespace IPCDebugger {
@ -27,7 +28,7 @@ namespace IPCDebugger {
struct ObjectInfo {
std::string type;
std::string name;
int id = -1;
int id;
};
/**
@ -80,28 +81,28 @@ public:
/**
* Registers a request into the recorder. The request is then assoicated with the client thread.
*/
void RegisterRequest(const std::shared_ptr<Kernel::ClientSession>& client_session,
const std::shared_ptr<Kernel::Thread>& client_thread);
void RegisterRequest(const Kernel::KClientSession* client_session,
const Kernel::KThread* client_thread);
/**
* Sets the request information of the request record associated with the client thread.
* When the server thread is empty, the request will be considered HLE.
*/
void SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
std::vector<u32> untranslated_cmdbuf, std::vector<u32> translated_cmdbuf,
const std::shared_ptr<Kernel::Thread>& server_thread = {});
void SetRequestInfo(const Kernel::KThread* client_thread, std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf,
const Kernel::KThread* server_thread = nullptr);
/**
* Sets the reply information of the request record assoicated with the client thread.
* The request is then unlinked from the client thread.
*/
void SetReplyInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
std::vector<u32> untranslated_cmdbuf, std::vector<u32> translated_cmdbuf);
void SetReplyInfo(const Kernel::KThread* client_thread, std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf);
/**
* Set the status of a record to HLEUnimplemented.
*/
void SetHLEUnimplemented(const std::shared_ptr<Kernel::Thread>& client_thread);
void SetHLEUnimplemented(const Kernel::KThread* client_thread);
/**
* Set the status of the debugger (enabled/disabled).
@ -118,7 +119,7 @@ private:
int record_count{};
// Temporary client session map for function name handling
std::unordered_map<u32, std::shared_ptr<Kernel::ClientSession>> client_session_map;
std::unordered_map<u32, const Kernel::KClientSession*> client_session_map;
std::atomic_bool enabled{false};

View File

@ -0,0 +1,226 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/memory.h"
namespace Kernel {
class KAddressArbiter::Callback : public WakeupCallback {
public:
explicit Callback(KAddressArbiter* _parent) : parent(_parent) {}
KAddressArbiter* parent;
void WakeUp(ThreadWakeupReason reason, KThread* thread,
KSynchronizationObject* object) override {
parent->WakeUp(reason, thread, object);
}
private:
template <class Archive>
void serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<WakeupCallback>(*this);
}
friend class boost::serialization::access;
};
KAddressArbiter::KAddressArbiter(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel},
m_timeout_callback(std::make_shared<Callback>(this)) {}
KAddressArbiter::~KAddressArbiter() = default;
void KAddressArbiter::Initialize(Process* owner) {
m_owner = owner;
m_owner->Open();
}
void KAddressArbiter::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::AddressArbiter, 1);
owner->Close();
}
}
void KAddressArbiter::WaitThread(KThread* thread, VAddr wait_address) {
thread->m_wait_address = wait_address;
thread->m_status = ThreadStatus::WaitArb;
m_waiting_threads.emplace_back(thread);
}
u64 KAddressArbiter::ResumeAllThreads(VAddr address) {
// Determine which threads are waiting on this address, those should be woken up.
auto itr = std::stable_partition(m_waiting_threads.begin(), m_waiting_threads.end(),
[address](KThread* thread) {
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->m_wait_address != address;
});
// Wake up all the found threads
const u64 num_threads = std::distance(itr, m_waiting_threads.end());
std::for_each(itr, m_waiting_threads.end(), [](KThread* thread) { thread->ResumeFromWait(); });
// Remove the woken up threads from the wait list.
m_waiting_threads.erase(itr, m_waiting_threads.end());
return num_threads;
}
bool KAddressArbiter::ResumeHighestPriorityThread(VAddr address) {
// Determine which threads are waiting on this address, those should be considered for wakeup.
auto matches_start = std::stable_partition(
m_waiting_threads.begin(), m_waiting_threads.end(), [address](KThread* thread) {
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->m_wait_address != address;
});
// Iterate through threads, find highest priority thread that is waiting to be arbitrated.
// Note: The real kernel will pick the first thread in the list if more than one have the
// same highest priority value. Lower priority values mean higher priority.
auto itr =
std::min_element(matches_start, m_waiting_threads.end(), [](KThread* lhs, KThread* rhs) {
return lhs->GetCurrentPriority() < rhs->GetCurrentPriority();
});
if (itr == m_waiting_threads.end()) {
return false;
}
auto thread = *itr;
thread->ResumeFromWait();
m_waiting_threads.erase(itr);
return true;
}
void KAddressArbiter::WakeUp(ThreadWakeupReason reason, KThread* thread,
KSynchronizationObject* object) {
ASSERT(reason == ThreadWakeupReason::Timeout);
// Remove the newly-awakened thread from the Arbiter's waiting list.
m_waiting_threads.erase(std::remove(m_waiting_threads.begin(), m_waiting_threads.end(), thread),
m_waiting_threads.end());
};
Result KAddressArbiter::ArbitrateAddress(KThread* thread, ArbitrationType type, VAddr address,
s32 value, u64 nanoseconds) {
switch (type) {
// Signal thread(s) waiting for arbitrate address...
case ArbitrationType::Signal: {
u64 num_threads{};
// Negative value means resume all threads
if (value < 0) {
num_threads = ResumeAllThreads(address);
} else {
// Resume first N threads
for (s32 i = 0; i < value; i++) {
num_threads += ResumeHighestPriorityThread(address);
}
}
// Prevents lag from low priority threads that spam svcArbitrateAddress and wake no threads
// The tick count is taken directly from official HOS kernel. The priority value is one less
// than official kernel as the affected FMV threads dont meet the priority threshold of 50.
// TODO: Revisit this when scheduler is rewritten and adjust if there isn't a problem there.
auto* core = m_kernel.current_cpu;
if (num_threads == 0 && core->GetID() == 0 && thread->GetCurrentPriority() >= 49) {
core->GetTimer().AddTicks(1614u);
}
break;
}
// Wait current thread (acquire the arbiter)...
case ArbitrationType::WaitIfLessThan:
if ((s32)m_kernel.memory.Read32(address) < value) {
WaitThread(thread, address);
}
break;
case ArbitrationType::WaitIfLessThanWithTimeout:
if ((s32)m_kernel.memory.Read32(address) < value) {
thread->SetWakeupCallback(m_timeout_callback);
thread->WakeAfterDelay(nanoseconds);
WaitThread(thread, address);
}
break;
case ArbitrationType::DecrementAndWaitIfLessThan: {
s32 memory_value = m_kernel.memory.Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
m_kernel.memory.Write32(address, (s32)memory_value - 1);
WaitThread(thread, address);
}
break;
}
case ArbitrationType::DecrementAndWaitIfLessThanWithTimeout: {
s32 memory_value = m_kernel.memory.Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
m_kernel.memory.Write32(address, (s32)memory_value - 1);
thread->SetWakeupCallback(m_timeout_callback);
thread->WakeAfterDelay(nanoseconds);
WaitThread(thread, address);
}
break;
}
default:
LOG_ERROR(Kernel, "unknown type={}", type);
return ResultInvalidEnumValueFnd;
}
// The calls that use a timeout seem to always return a Timeout error even if they did not put
// the thread to sleep
if (type == ArbitrationType::WaitIfLessThanWithTimeout ||
type == ArbitrationType::DecrementAndWaitIfLessThanWithTimeout) {
return ResultTimeout;
}
return ResultSuccess;
}
template <class Archive>
void KAddressArbiter::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& m_name;
ar& m_waiting_threads;
// ar& m_timeout_callback;
}
SERIALIZE_IMPL(KAddressArbiter)
} // namespace Kernel
namespace boost::serialization {
template <class Archive>
void save_construct_data(Archive& ar, const Kernel::KAddressArbiter::Callback* t,
const unsigned int) {
ar << t->parent;
}
template <class Archive>
void load_construct_data(Archive& ar, Kernel::KAddressArbiter::Callback* t, const unsigned int) {
Kernel::KAddressArbiter* parent;
ar >> parent;
::new (t) Kernel::KAddressArbiter::Callback(parent);
}
} // namespace boost::serialization
SERIALIZE_EXPORT_IMPL(Kernel::KAddressArbiter)
SERIALIZE_EXPORT_IMPL(Kernel::KAddressArbiter::Callback)

View File

@ -0,0 +1,75 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KThread;
enum class ArbitrationType : u32 {
Signal,
WaitIfLessThan,
DecrementAndWaitIfLessThan,
WaitIfLessThanWithTimeout,
DecrementAndWaitIfLessThanWithTimeout,
};
class KAddressArbiter final : public KAutoObjectWithSlabHeapAndContainer<KAddressArbiter>,
public WakeupCallback {
KERNEL_AUTOOBJECT_TRAITS(KAddressArbiter, KAutoObject);
public:
explicit KAddressArbiter(KernelSystem& kernel);
~KAddressArbiter() override;
void Initialize(Process* owner);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
Result ArbitrateAddress(KThread* thread, ArbitrationType type, VAddr address, s32 value,
u64 nanoseconds);
private:
void WaitThread(KThread* thread, VAddr wait_address);
u64 ResumeAllThreads(VAddr address);
bool ResumeHighestPriorityThread(VAddr address);
void WakeUp(ThreadWakeupReason reason, KThread* thread,
KSynchronizationObject* object) override;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
public:
Process* m_owner{};
std::string m_name{};
std::vector<KThread*> m_waiting_threads;
class Callback;
std::shared_ptr<Callback> m_timeout_callback;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KAddressArbiter)
BOOST_CLASS_EXPORT_KEY(Kernel::KAddressArbiter::Callback)
CONSTRUCT_KERNEL_OBJECT(Kernel::KAddressArbiter)

View File

@ -0,0 +1,32 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/archives.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
KAutoObject* KAutoObject::Create(KAutoObject* obj) {
obj->m_ref_count = 1;
return obj;
}
void KAutoObject::RegisterWithKernel() {
m_kernel.RegisterKernelObject(this);
}
void KAutoObject::UnregisterWithKernel(KernelSystem& kernel, KAutoObject* self) {
kernel.UnregisterKernelObject(self);
}
template <class Archive>
void KAutoObject::serialize(Archive& ar, const unsigned int) {
ar& m_name;
// ar& m_ref_count;
}
SERIALIZE_IMPL(KAutoObject)
} // namespace Kernel

View File

@ -0,0 +1,305 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include <boost/serialization/access.hpp>
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/global.h"
namespace Kernel {
class KernelSystem;
class Process;
using Handle = u32;
constexpr u32 DefaultStackSize = 0x4000;
enum class ClassTokenType : u32 {
KAutoObject = 0,
KSynchronizationObject = 1,
KSemaphore = 27,
KEvent = 31,
KTimer = 53,
KMutex = 57,
Debug = 77,
KServerPort = 85,
DmaObject = 89,
KClientPort = 101,
CodeSet = 104,
KSession = 112,
KThread = 141,
KServerSession = 149,
KAddressArbiter = 152,
KClientSession = 165,
KPort = 168,
KSharedMemory = 176,
Process = 197,
KResourceLimit = 200,
};
DECLARE_ENUM_FLAG_OPERATORS(ClassTokenType)
#define KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, ATTRIBUTE) \
private: \
static constexpr inline const char* const TypeName = #CLASS; \
static constexpr inline auto ClassToken = ClassTokenType::CLASS; \
\
public: \
CITRA_NON_COPYABLE(CLASS); \
CITRA_NON_MOVEABLE(CLASS); \
\
using BaseClass = BASE_CLASS; \
static constexpr TypeObj GetStaticTypeObj() { return TypeObj(TypeName, ClassToken); } \
static constexpr const char* GetStaticTypeName() { return TypeName; } \
virtual TypeObj GetTypeObj() ATTRIBUTE { return GetStaticTypeObj(); } \
virtual const char* GetTypeName() ATTRIBUTE { return GetStaticTypeName(); } \
\
private: \
constexpr bool operator!=(const TypeObj& rhs)
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, const override)
class KAutoObject {
protected:
class TypeObj {
public:
constexpr explicit TypeObj(const char* n, ClassTokenType tok)
: m_name(n), m_class_token(tok) {}
constexpr const char* GetName() const {
return m_name;
}
constexpr ClassTokenType GetClassToken() const {
return m_class_token;
}
constexpr bool operator==(const TypeObj& rhs) const {
return this->GetClassToken() == rhs.GetClassToken();
}
constexpr bool operator!=(const TypeObj& rhs) const {
return this->GetClassToken() != rhs.GetClassToken();
}
constexpr bool IsDerivedFrom(const TypeObj& rhs) const {
return (this->GetClassToken() | rhs.GetClassToken()) == this->GetClassToken();
}
private:
const char* m_name;
ClassTokenType m_class_token;
};
private:
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
public:
explicit KAutoObject(KernelSystem& kernel) : m_kernel(kernel) {
RegisterWithKernel();
}
virtual ~KAutoObject() = default;
static KAutoObject* Create(KAutoObject* ptr);
// Destroy is responsible for destroying the auto object's resources when ref_count hits zero.
virtual void Destroy() {
UNIMPLEMENTED();
}
// Finalize is responsible for cleaning up resource, but does not destroy the object.
virtual void Finalize() {}
virtual Process* GetOwner() const {
return nullptr;
}
u32 GetReferenceCount() const {
return m_ref_count.load();
}
bool IsDerivedFrom(const TypeObj& rhs) const {
return this->GetTypeObj().IsDerivedFrom(rhs);
}
bool IsDerivedFrom(const KAutoObject& rhs) const {
return this->IsDerivedFrom(rhs.GetTypeObj());
}
template <typename Derived>
Derived DynamicCast() {
static_assert(std::is_pointer_v<Derived>);
using DerivedType = std::remove_pointer_t<Derived>;
if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) {
return static_cast<Derived>(this);
} else {
return nullptr;
}
}
template <typename Derived>
const Derived DynamicCast() const {
static_assert(std::is_pointer_v<Derived>);
using DerivedType = std::remove_pointer_t<Derived>;
if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) {
return static_cast<Derived>(this);
} else {
return nullptr;
}
}
bool Open() {
// Atomically increment the reference count, only if it's positive.
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
do {
if (cur_ref_count == 0) {
return false;
}
ASSERT(cur_ref_count < cur_ref_count + 1);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1,
std::memory_order_relaxed));
return true;
}
void Close() {
// Atomically decrement the reference count, not allowing it to become negative.
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
do {
ASSERT(cur_ref_count > 0);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1,
std::memory_order_acq_rel));
// If ref count hits zero, destroy the object.
if (cur_ref_count - 1 == 0) {
KernelSystem& kernel = m_kernel;
this->Destroy();
KAutoObject::UnregisterWithKernel(kernel, this);
}
}
private:
void RegisterWithKernel();
static void UnregisterWithKernel(KernelSystem& kernel, KAutoObject* self);
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
protected:
KernelSystem& m_kernel;
std::string m_name{};
private:
std::atomic<u32> m_ref_count{};
};
template <typename T>
class KScopedAutoObject {
public:
CITRA_NON_COPYABLE(KScopedAutoObject);
constexpr KScopedAutoObject() = default;
constexpr KScopedAutoObject(T* o) : m_obj(o) {
if (m_obj != nullptr) {
m_obj->Open();
}
}
~KScopedAutoObject() {
if (m_obj != nullptr) {
m_obj->Close();
}
m_obj = nullptr;
}
template <typename U>
requires(std::derived_from<T, U> || std::derived_from<U, T>)
constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) {
if constexpr (std::derived_from<U, T>) {
// Upcast.
m_obj = rhs.m_obj;
rhs.m_obj = nullptr;
} else {
// Downcast.
T* derived = nullptr;
if (rhs.m_obj != nullptr) {
derived = rhs.m_obj->template DynamicCast<T*>();
if (derived == nullptr) {
rhs.m_obj->Close();
}
}
m_obj = derived;
rhs.m_obj = nullptr;
}
}
constexpr KScopedAutoObject<T>& operator=(KScopedAutoObject<T>&& rhs) {
rhs.Swap(*this);
return *this;
}
constexpr T* operator->() {
return m_obj;
}
constexpr T& operator*() {
return *m_obj;
}
constexpr void Reset(T* o) {
KScopedAutoObject(o).Swap(*this);
}
constexpr T* GetPointerUnsafe() {
return m_obj;
}
constexpr T* GetPointerUnsafe() const {
return m_obj;
}
constexpr T* ReleasePointerUnsafe() {
T* ret = m_obj;
m_obj = nullptr;
return ret;
}
constexpr bool IsNull() const {
return m_obj == nullptr;
}
constexpr bool IsNotNull() const {
return m_obj != nullptr;
}
private:
template <typename U>
friend class KScopedAutoObject;
private:
T* m_obj{};
private:
constexpr void Swap(KScopedAutoObject& rhs) noexcept {
std::swap(m_obj, rhs.m_obj);
}
};
} // namespace Kernel
#define CONSTRUCT_KERNEL_OBJECT(T) \
namespace boost::serialization { \
template <class Archive> \
void load_construct_data(Archive& ar, T* t, const unsigned int file_version) { \
::new (t) T(Core::Global<Kernel::KernelSystem>()); \
} \
}

View File

@ -0,0 +1,31 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include "core/hle/kernel/k_auto_object_container.h"
namespace Kernel {
void KAutoObjectWithListContainer::Register(KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
m_object_list.push_back(*obj);
}
void KAutoObjectWithListContainer::Unregister(KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
for (auto it = m_object_list.begin(); it != m_object_list.end(); it++) {
if (std::addressof(*it) == obj) {
m_object_list.erase(it);
return;
}
}
}
size_t KAutoObjectWithListContainer::GetOwnedCount(Process* owner) {
// KScopedLightMutex lk{m_mutex};
return std::count_if(m_object_list.begin(), m_object_list.end(),
[&](const auto& obj) { return obj.GetOwner() == owner; });
}
} // namespace Kernel

View File

@ -0,0 +1,37 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_funcs.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_linked_list.h"
namespace Kernel {
class KernelSystem;
class Process;
class KAutoObjectWithListContainer {
public:
CITRA_NON_COPYABLE(KAutoObjectWithListContainer);
CITRA_NON_MOVEABLE(KAutoObjectWithListContainer);
using ListType = KLinkedList<KAutoObject>;
KAutoObjectWithListContainer(KernelSystem& kernel) : m_object_list(kernel) {}
void Initialize() {}
void Finalize() {}
void Register(KAutoObject* obj);
void Unregister(KAutoObject* obj);
size_t GetOwnedCount(Process* owner);
private:
// KLightMutex m_mutex;
ListType m_object_list;
};
} // namespace Kernel

View File

@ -0,0 +1,79 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/export.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_session.h"
SERIALIZE_EXPORT_IMPL(Kernel::KClientPort)
namespace Kernel {
KClientPort::KClientPort(KernelSystem& kernel) : KAutoObject(kernel) {}
KClientPort::~KClientPort() = default;
void KClientPort::Initialize(KPort* parent, s32 max_sessions, std::string name) {
// Set member variables.
m_parent = parent;
m_max_sessions = max_sessions;
m_name = name + "_Client";
}
Result KClientPort::CreateSession(KClientSession** out) {
R_UNLESS(m_active_sessions < m_max_sessions, ResultMaxConnectionsReached);
m_active_sessions++;
// Allocate a new session.
KSession* session = KSession::Create(m_kernel);
// Initialize the session.
session->Initialize(this);
// Register the session.
KSession::Register(m_kernel, session);
// Let the created sessions inherit the parent port's HLE handler.
auto* server = &m_parent->GetServerPort();
auto hle_handler = server->GetHleHandler();
if (hle_handler) {
hle_handler->ClientConnected(&session->GetServerSession());
} else {
server->EnqueueSession(&session->GetServerSession());
}
// Wake the threads waiting on the ServerPort
m_parent->GetServerPort().WakeupAllWaitingThreads();
// We succeeded, so set the output.
*out = std::addressof(session->GetClientSession());
return ResultSuccess;
}
void KClientPort::ConnectionClosed() {
ASSERT(m_active_sessions > 0);
--m_active_sessions;
}
template <class Archive>
void KClientPort::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
// ar& m_parent;
ar& m_max_sessions;
ar& m_active_sessions;
ar& m_name;
}
SERIALIZE_IMPL(KClientPort)
} // namespace Kernel

View File

@ -0,0 +1,52 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/result.h"
namespace Kernel {
class KClientSession;
class KClientPort final : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KClientPort, KAutoObject);
public:
explicit KClientPort(KernelSystem& kernel);
~KClientPort() override;
void Initialize(KPort* parent, s32 max_sessions, std::string name);
const KPort* GetParent() const {
return m_parent;
}
KPort* GetParent() {
return m_parent;
}
Result CreateSession(KClientSession** out);
void ConnectionClosed();
private:
KPort* m_parent{};
u32 m_max_sessions{};
u32 m_active_sessions{};
std::string m_name;
friend class KernelSystem;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KClientPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::KClientPort)

View File

@ -0,0 +1,42 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KClientSession)
namespace Kernel {
KClientSession::KClientSession(KernelSystem& kernel) : KAutoObject(kernel) {}
KClientSession::~KClientSession() = default;
void KClientSession::Destroy() {
m_parent->OnClientClosed();
m_parent->Close();
}
void KClientSession::OnServerClosed() {}
Result KClientSession::SendSyncRequest(KThread* thread) {
// Signal the server session that new data is available
return m_parent->GetServerSession().HandleSyncRequest(thread);
}
template <class Archive>
void KClientSession::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
// ar& m_parent;
}
SERIALIZE_IMPL(KClientSession)
} // namespace Kernel

View File

@ -0,0 +1,50 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/result.h"
namespace Kernel {
class KSession;
class KThread;
class KClientSession final : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
public:
explicit KClientSession(KernelSystem& kernel);
~KClientSession() override;
void Initialize(KSession* parent) {
// Set member variables.
m_parent = parent;
}
void Destroy() override;
KSession* GetParent() const {
return m_parent;
}
Result SendSyncRequest(KThread* thread);
void OnServerClosed();
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
KSession* m_parent{};
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KClientSession)
CONSTRUCT_KERNEL_OBJECT(Kernel::KClientSession)

View File

@ -0,0 +1,76 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include <boost/serialization/vector.hpp>
namespace Kernel {
class CodeSet {
public:
CodeSet() = default;
~CodeSet() = default;
struct Segment {
std::size_t offset = 0;
VAddr addr = 0;
u32 size = 0;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& offset;
ar& addr;
ar& size;
}
};
Segment& CodeSegment() {
return segments[0];
}
const Segment& CodeSegment() const {
return segments[0];
}
Segment& RODataSegment() {
return segments[1];
}
const Segment& RODataSegment() const {
return segments[1];
}
Segment& DataSegment() {
return segments[2];
}
const Segment& DataSegment() const {
return segments[2];
}
std::vector<u8> memory;
std::array<Segment, 3> segments;
VAddr entrypoint;
u64 program_id;
std::string name;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& memory;
ar& segments;
ar& entrypoint;
ar& program_id;
ar& name;
}
};
} // namespace Kernel

View File

@ -0,0 +1,78 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
SERIALIZE_EXPORT_IMPL(Kernel::KEvent)
namespace Kernel {
KEvent::KEvent(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KEvent::~KEvent() = default;
void KEvent::Initialize(Process* owner, ResetType reset_type) {
// Open a reference to the owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set member variables.
m_reset_type = reset_type;
}
void KEvent::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Event, 1);
owner->Close();
}
}
bool KEvent::ShouldWait(const KThread* thread) const {
return !m_signaled;
}
void KEvent::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
if (m_reset_type == ResetType::OneShot) {
m_signaled = false;
}
}
void KEvent::Signal() {
m_signaled = true;
this->WakeupAllWaitingThreads();
}
void KEvent::Clear() {
m_signaled = false;
}
void KEvent::WakeupAllWaitingThreads() {
KSynchronizationObject::WakeupAllWaitingThreads();
if (m_reset_type == ResetType::Pulse) {
m_signaled = false;
}
}
template <class Archive>
void KEvent::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_owner;
ar& m_reset_type;
ar& m_signaled;
}
SERIALIZE_IMPL(KEvent)
} // namespace Kernel

View File

@ -0,0 +1,74 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/global.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
enum class ResetType : u32 {
OneShot,
Sticky,
Pulse,
};
class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KEvent, KSynchronizationObject);
public:
explicit KEvent(KernelSystem& kernel);
~KEvent() override;
std::string GetName() const {
return m_name;
}
void SetName(const std::string& name) {
m_name = name;
}
void Initialize(Process* owner, ResetType reset_type);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
ResetType GetResetType() const {
return m_reset_type;
}
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
void WakeupAllWaitingThreads() override;
void Signal();
void Clear();
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
Process* m_owner{};
ResetType m_reset_type{};
bool m_signaled{};
std::string m_name;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KEvent)
CONSTRUCT_KERNEL_OBJECT(Kernel::KEvent)

View File

@ -0,0 +1,106 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/array.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include "common/archives.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
namespace Kernel {
Result KHandleTable::Finalize() {
// Close and free all entries.
for (size_t i = 0; i < m_table_size; i++) {
if (KAutoObject* obj = m_objects[i]; obj != nullptr) {
obj->Close();
}
}
return ResultSuccess;
}
bool KHandleTable::Remove(Handle handle) {
// Don't allow removal of a pseudo-handle.
if (handle == KernelHandle::CurrentProcess || handle == KernelHandle::CurrentThread)
[[unlikely]] {
return false;
}
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) [[unlikely]] {
return false;
}
// Find the object and free the entry.
KAutoObject* obj = nullptr;
{
// KScopedLightMutex lk{m_mutex};
if (this->IsValidHandle(handle)) [[likely]] {
const auto index = handle_pack.index;
obj = m_objects[index];
this->FreeEntry(index);
} else {
return false;
}
}
// Close the object.
obj->Close();
return true;
}
Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
// Never exceed our capacity.
R_UNLESS(m_count < m_table_size, ResultOutOfHandles);
// Allocate entry, set output handle.
const auto linear_id = this->AllocateLinearId();
const auto index = this->AllocateEntry();
m_entry_infos[index].linear_id = linear_id;
m_objects[index] = obj;
obj->Open();
*out_handle = EncodeHandle(static_cast<u16>(index), linear_id);
return ResultSuccess;
}
KScopedAutoObject<KAutoObject> KHandleTable::GetObjectForIpc(Handle handle,
KThread* cur_thread) const {
// Handle pseudo-handles.
ASSERT(cur_thread != nullptr);
if (handle == KernelHandle::CurrentProcess) {
auto* cur_process = cur_thread->GetOwner();
ASSERT(cur_process != nullptr);
return cur_process;
}
if (handle == KernelHandle::CurrentThread) {
return cur_thread;
}
return this->GetObjectForIpcWithoutPseudoHandle(handle);
}
template <class Archive>
void KHandleTable::serialize(Archive& ar, const u32 file_version) {
// ar& m_entry_infos;
// ar& m_objects;
ar& m_free_head_index;
ar& m_table_size;
ar& m_next_id;
ar& m_max_count;
ar& m_next_linear_id;
ar& m_count;
}
SERIALIZE_IMPL(KHandleTable)
} // namespace Kernel

View File

@ -0,0 +1,286 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/result.h"
namespace Kernel {
enum KernelHandle : Handle {
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
};
class KHandleTable {
CITRA_NON_COPYABLE(KHandleTable);
CITRA_NON_MOVEABLE(KHandleTable);
public:
static constexpr size_t MaxTableSize = 1024;
public:
explicit KHandleTable(KernelSystem& kernel) : m_kernel(kernel) {}
Result Initialize(s32 size) {
// KScopedLightMutex lk{m_mutex};
// Initialize all fields.
m_max_count = 0;
m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size);
m_next_linear_id = MinLinearId;
m_count = 0;
m_free_head_index = -1;
// Create the arrays
m_objects.resize(m_table_size);
m_entry_infos.resize(m_table_size);
// Free all entries.
for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
m_objects[i] = nullptr;
m_entry_infos[i].next_free_index = static_cast<s16>(i - 1);
m_free_head_index = i;
}
return ResultSuccess;
}
size_t GetTableSize() const {
return m_table_size;
}
size_t GetCount() const {
return m_count;
}
size_t GetMaxCount() const {
return m_max_count;
}
Result Finalize();
bool Remove(Handle handle);
Result Add(Handle* out_handle, KAutoObject* obj);
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
// KScopedLightMutex lk{m_mutex};
if constexpr (std::is_same_v<T, KAutoObject>) {
return this->GetObjectImpl(handle);
} else {
if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] {
return obj->DynamicCast<T*>();
} else {
return nullptr;
}
}
}
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObject(Handle handle) const {
// Handle pseudo-handles.
if constexpr (std::derived_from<Process, T>) {
if (handle == KernelHandle::CurrentProcess) {
auto* const cur_process = m_kernel.GetCurrentProcess();
ASSERT(cur_process != nullptr);
return cur_process;
}
} else if constexpr (std::derived_from<KThread, T>) {
if (handle == KernelHandle::CurrentThread) {
auto* const cur_thread = m_kernel.GetCurrentThreadManager().GetCurrentThread();
ASSERT(cur_thread != nullptr);
return cur_thread;
}
}
return this->template GetObjectWithoutPseudoHandle<T>(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const {
return this->GetObjectImpl(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, KThread* cur_thread) const;
template <typename T>
bool GetMultipleObjects(T** out, const Handle* handles, size_t num_handles) const {
// Try to convert and open all the handles.
size_t num_opened;
{
// KScopedLightMutex lk{m_mutex};
for (num_opened = 0; num_opened < num_handles; num_opened++) {
// Get the current handle.
const auto cur_handle = handles[num_opened];
// Get the object for the current handle.
KAutoObject* cur_object = this->GetObjectImpl(cur_handle);
if (cur_object == nullptr) [[unlikely]] {
break;
}
// Cast the current object to the desired type.
T* cur_t = cur_object->DynamicCast<T*>();
if (cur_t == nullptr) [[unlikely]] {
break;
}
// Open a reference to the current object.
cur_t->Open();
out[num_opened] = cur_t;
}
}
// If we converted every object, succeed.
if (num_opened == num_handles) [[likely]] {
return true;
}
// If we didn't convert entry object, close the ones we opened.
for (size_t i = 0; i < num_opened; i++) {
out[i]->Close();
}
return false;
}
private:
s32 AllocateEntry() {
ASSERT(m_count < m_table_size);
const auto index = m_free_head_index;
m_free_head_index = m_entry_infos[index].GetNextFreeIndex();
m_max_count = std::max(m_max_count, ++m_count);
return index;
}
void FreeEntry(s32 index) {
ASSERT(m_count > 0);
m_objects[index] = nullptr;
m_entry_infos[index].next_free_index = static_cast<s16>(m_free_head_index);
m_free_head_index = index;
--m_count;
}
u16 AllocateLinearId() {
const u16 id = m_next_linear_id++;
if (m_next_linear_id > MaxLinearId) {
m_next_linear_id = MinLinearId;
}
return id;
}
bool IsValidHandle(Handle handle) const {
// Unpack the handle.
const auto handle_pack = HandlePack(handle);
const auto raw_value = handle_pack.raw;
const auto index = handle_pack.index;
const auto linear_id = handle_pack.linear_id;
const auto reserved = handle_pack.reserved;
ASSERT(reserved == 0);
// Validate our indexing information.
if (raw_value == 0) [[unlikely]] {
return false;
}
if (linear_id == 0) [[unlikely]] {
return false;
}
if (index >= m_table_size) [[unlikely]] {
return false;
}
// Check that there's an object, and our serial id is correct.
if (m_objects[index] == nullptr) [[unlikely]] {
return false;
}
if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] {
return false;
}
return true;
}
KAutoObject* GetObjectImpl(Handle handle) const {
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) [[unlikely]] {
return nullptr;
}
if (this->IsValidHandle(handle)) [[likely]] {
return m_objects[handle_pack.index];
} else {
return nullptr;
}
}
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
union HandlePack {
constexpr HandlePack() = default;
constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
u32 raw{};
BitField<0, 15, u32> index;
BitField<15, 15, u32> linear_id;
BitField<30, 2, u32> reserved;
};
static constexpr Handle EncodeHandle(u16 index, u16 linear_id) {
HandlePack handle{};
handle.index.Assign(index);
handle.linear_id.Assign(linear_id);
handle.reserved.Assign(0);
return handle.raw;
}
private:
static constexpr u16 MinLinearId = 1;
static constexpr u16 MaxLinearId = 0x7FFF;
union EntryInfo {
u16 linear_id;
s16 next_free_index;
constexpr u16 GetLinearId() const {
return linear_id;
}
constexpr s32 GetNextFreeIndex() const {
return next_free_index;
}
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& linear_id;
ar& next_free_index;
}
};
private:
KernelSystem& m_kernel;
std::vector<EntryInfo> m_entry_infos{};
std::vector<KAutoObject*> m_objects{};
s32 m_free_head_index{};
u16 m_table_size{};
u16 m_next_id{};
u16 m_max_count{};
u16 m_next_linear_id{};
u16 m_count{};
// KLightMutex mutex;
};
} // namespace Kernel

View File

@ -0,0 +1,237 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/intrusive_list.h"
#include "core/hle/kernel/k_slab_heap.h"
namespace Kernel {
class KernelSystem;
class KLinkedListNode : public Common::IntrusiveListBaseNode<KLinkedListNode>,
public KSlabAllocated<KLinkedListNode> {
public:
explicit KLinkedListNode(KernelSystem&) {}
KLinkedListNode() = default;
void Initialize(void* it) {
m_item = it;
}
void* GetItem() const {
return m_item;
}
private:
void* m_item = nullptr;
};
template <typename T>
class KLinkedList : private Common::IntrusiveListBaseTraits<KLinkedListNode>::ListType {
private:
using BaseList = Common::IntrusiveListBaseTraits<KLinkedListNode>::ListType;
public:
template <bool Const>
class Iterator;
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
private:
using BaseIterator = BaseList::iterator;
friend class KLinkedList;
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename KLinkedList::value_type;
using difference_type = typename KLinkedList::difference_type;
using pointer = std::conditional_t<Const, KLinkedList::const_pointer, KLinkedList::pointer>;
using reference =
std::conditional_t<Const, KLinkedList::const_reference, KLinkedList::reference>;
public:
explicit Iterator(BaseIterator it) : m_base_it(it) {}
pointer GetItem() const {
return static_cast<pointer>(m_base_it->GetItem());
}
bool operator==(const Iterator& rhs) const {
return m_base_it == rhs.m_base_it;
}
bool operator!=(const Iterator& rhs) const {
return !(*this == rhs);
}
pointer operator->() const {
return this->GetItem();
}
reference operator*() const {
return *this->GetItem();
}
Iterator& operator++() {
++m_base_it;
return *this;
}
Iterator& operator--() {
--m_base_it;
return *this;
}
Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
Iterator operator--(int) {
const Iterator it{*this};
--(*this);
return it;
}
operator Iterator<true>() const {
return Iterator<true>(m_base_it);
}
private:
BaseIterator m_base_it;
};
public:
constexpr KLinkedList(KernelSystem& kernel_) : BaseList(), kernel{kernel_} {}
~KLinkedList() {
// Erase all elements.
for (auto it = begin(); it != end(); it = erase(it)) {
}
// Ensure we succeeded.
ASSERT(this->empty());
}
// Iterator accessors.
iterator begin() {
return iterator(BaseList::begin());
}
const_iterator begin() const {
return const_iterator(BaseList::begin());
}
iterator end() {
return iterator(BaseList::end());
}
const_iterator end() const {
return const_iterator(BaseList::end());
}
const_iterator cbegin() const {
return this->begin();
}
const_iterator cend() const {
return this->end();
}
reverse_iterator rbegin() {
return reverse_iterator(this->end());
}
const_reverse_iterator rbegin() const {
return const_reverse_iterator(this->end());
}
reverse_iterator rend() {
return reverse_iterator(this->begin());
}
const_reverse_iterator rend() const {
return const_reverse_iterator(this->begin());
}
const_reverse_iterator crbegin() const {
return this->rbegin();
}
const_reverse_iterator crend() const {
return this->rend();
}
// Content management.
using BaseList::empty;
using BaseList::size;
reference back() {
return *(--this->end());
}
const_reference back() const {
return *(--this->end());
}
reference front() {
return *this->begin();
}
const_reference front() const {
return *this->begin();
}
iterator insert(const_iterator pos, reference ref) {
KLinkedListNode* new_node = KLinkedListNode::Allocate(kernel);
ASSERT(new_node != nullptr);
new_node->Initialize(std::addressof(ref));
return iterator(BaseList::insert(pos.m_base_it, *new_node));
}
void push_back(reference ref) {
this->insert(this->end(), ref);
}
void push_front(reference ref) {
this->insert(this->begin(), ref);
}
void pop_back() {
this->erase(--this->end());
}
void pop_front() {
this->erase(this->begin());
}
iterator erase(const iterator pos) {
KLinkedListNode* freed_node = std::addressof(*pos.m_base_it);
iterator ret = iterator(BaseList::erase(pos.m_base_it));
KLinkedListNode::Free(kernel, freed_node);
return ret;
}
private:
KernelSystem& kernel;
};
} // namespace Kernel

View File

@ -0,0 +1,150 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_mutex.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
SERIALIZE_EXPORT_IMPL(Kernel::KMutex)
namespace Kernel {
void ReleaseThreadMutexes(KThread* thread) {
for (KMutex* mtx : thread->m_held_mutexes) {
mtx->m_lock_count = 0;
mtx->m_holding_thread = nullptr;
mtx->WakeupAllWaitingThreads();
}
thread->m_held_mutexes.clear();
}
KMutex::KMutex(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KMutex::~KMutex() = default;
void KMutex::Initialize(Process* owner, bool initial_locked) {
// Open a reference to the owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set default priority
m_priority = ThreadPrioLowest;
// Acquire mutex with current thread if initialized as locked
if (initial_locked) {
KThread* thread = m_kernel.GetCurrentThreadManager().GetCurrentThread();
this->Acquire(thread);
}
}
void KMutex::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Mutex, 1);
owner->Close();
}
}
bool KMutex::ShouldWait(const KThread* thread) const {
return m_lock_count > 0 && thread != m_holding_thread;
}
void KMutex::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
// Actually "acquire" the mutex only if we don't already have it
if (m_lock_count == 0) {
m_priority = thread->m_current_priority;
thread->m_held_mutexes.insert(this);
m_holding_thread = thread;
thread->UpdatePriority();
m_kernel.PrepareReschedule();
}
m_lock_count++;
}
Result KMutex::Release(KThread* thread) {
// We can only release the mutex if it's held by the calling thread.
if (thread != m_holding_thread) {
if (m_holding_thread) {
LOG_ERROR(
Kernel,
"Tried to release a mutex (owned by thread id {}) from a different thread id {}",
m_holding_thread->m_thread_id, thread->m_thread_id);
}
return Result(ErrCodes::WrongLockingThread, ErrorModule::Kernel,
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
}
// Note: It should not be possible for the situation where the mutex has a holding thread with a
// zero lock count to occur. The real kernel still checks for this, so we do too.
if (m_lock_count <= 0) {
return Result(ErrorDescription::InvalidResultValue, ErrorModule::Kernel,
ErrorSummary::InvalidState, ErrorLevel::Permanent);
}
m_lock_count--;
// Yield to the next thread only if we've fully released the mutex
if (m_lock_count == 0) {
m_holding_thread->m_held_mutexes.erase(this);
m_holding_thread->UpdatePriority();
m_holding_thread = nullptr;
WakeupAllWaitingThreads();
m_kernel.PrepareReschedule();
}
return ResultSuccess;
}
void KMutex::AddWaitingThread(KThread* thread) {
KSynchronizationObject::AddWaitingThread(thread);
thread->m_pending_mutexes.insert(this);
this->UpdatePriority();
}
void KMutex::RemoveWaitingThread(KThread* thread) {
KSynchronizationObject::RemoveWaitingThread(thread);
thread->m_pending_mutexes.erase(this);
this->UpdatePriority();
}
void KMutex::UpdatePriority() {
if (!m_holding_thread) {
return;
}
u32 best_priority = ThreadPrioLowest;
for (const KThread* waiter : GetWaitingThreads()) {
if (waiter->m_current_priority < best_priority) {
best_priority = waiter->m_current_priority;
}
}
if (best_priority != m_priority) {
m_priority = best_priority;
m_holding_thread->UpdatePriority();
}
}
template <class Archive>
void KMutex::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_lock_count;
ar& m_priority;
ar& m_holding_thread;
}
SERIALIZE_IMPL(KMutex)
} // namespace Kernel

View File

@ -0,0 +1,81 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/global.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KThread;
class KMutex final : public KAutoObjectWithSlabHeapAndContainer<KMutex, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KMutex, KSynchronizationObject);
public:
explicit KMutex(KernelSystem& kernel);
~KMutex() override;
void Initialize(Process* owner, bool initial_locked);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
u32 GetPriority() const {
return m_priority;
}
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
void AddWaitingThread(KThread* thread) override;
void RemoveWaitingThread(KThread* thread) override;
/**
* Elevate the mutex priority to the best priority
* among the priorities of all its waiting threads.
*/
void UpdatePriority();
/**
* Attempts to release the mutex from the specified thread.
* @param thread Thread that wants to release the mutex.
* @returns The result code of the operation.
*/
Result Release(KThread* thread);
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
public:
Process* m_owner{};
int m_lock_count{};
u32 m_priority{};
KThread* m_holding_thread{};
};
/**
* Releases all the mutexes held by the specified thread
* @param thread Thread that is holding the mutexes
*/
void ReleaseThreadMutexes(KThread* thread);
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KMutex)
CONSTRUCT_KERNEL_OBJECT(Kernel::KMutex)

View File

@ -0,0 +1,103 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_object_name.h"
namespace Kernel {
KObjectNameGlobalData::KObjectNameGlobalData(KernelSystem& kernel) {}
KObjectNameGlobalData::~KObjectNameGlobalData() = default;
void KObjectName::Initialize(KAutoObject* obj, const char* name) {
// Set member variables.
m_object = obj;
std::strncpy(m_name.data(), name, sizeof(m_name) - 1);
m_name[sizeof(m_name) - 1] = '\x00';
// Open a reference to the object we hold.
m_object->Open();
}
bool KObjectName::MatchesName(const char* name) const {
return std::strncmp(m_name.data(), name, sizeof(m_name)) == 0;
}
Result KObjectName::NewFromName(KernelSystem& kernel, KAutoObject* obj, const char* name) {
// Create a new object name.
KObjectName* new_name = KObjectName::Allocate(kernel);
R_UNLESS(new_name != nullptr, Result{0xD86007F3});
// Initialize the new name.
new_name->Initialize(obj, name);
// Check if there's an existing name.
{
// Get the global data.
KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Ensure we have exclusive access to the global list.
// KScopedLightMutex lk{gd.GetObjectListLock()};
// If the object doesn't exist, put it into the list.
KScopedAutoObject existing_object = FindImpl(kernel, name);
if (existing_object.IsNull()) {
gd.GetObjectList().push_back(*new_name);
return ResultSuccess;
}
}
// The object already exists, the kernel does not check for this.
UNREACHABLE();
}
Result KObjectName::Delete(KernelSystem& kernel, KAutoObject* obj, const char* compare_name) {
// Get the global data.
KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Ensure we have exclusive access to the global list.
// KScopedLightMutex lk{gd.GetObjectListLock()};
// Find a matching entry in the list, and delete it.
for (auto& name : gd.GetObjectList()) {
if (name.MatchesName(compare_name) && obj == name.GetObject()) {
// We found a match, clean up its resources.
obj->Close();
gd.GetObjectList().erase(gd.GetObjectList().iterator_to(name));
KObjectName::Free(kernel, std::addressof(name));
return ResultSuccess;
}
}
// We didn't find the object in the list.
return ResultNotFound;
}
KScopedAutoObject<KAutoObject> KObjectName::Find(KernelSystem& kernel, const char* name) {
// Get the global data.
// KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Ensure we have exclusive access to the global list.
// KScopedLightMutex lk{gd.GetObjectListLock()};
return FindImpl(kernel, name);
}
KScopedAutoObject<KAutoObject> KObjectName::FindImpl(KernelSystem& kernel,
const char* compare_name) {
// Get the global data.
KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Try to find a matching object in the global list.
for (const auto& name : gd.GetObjectList()) {
if (name.MatchesName(compare_name)) {
return name.GetObject();
}
}
// There's no matching entry in the list.
return nullptr;
}
} // namespace Kernel

View File

@ -0,0 +1,82 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include "common/intrusive_list.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_slab_heap.h"
namespace Kernel {
class KObjectNameGlobalData;
class KObjectName : public KSlabAllocated<KObjectName>,
public Common::IntrusiveListBaseNode<KObjectName> {
public:
explicit KObjectName(KernelSystem&) {}
virtual ~KObjectName() = default;
static constexpr size_t NameLengthMax = 12;
using List = Common::IntrusiveListBaseTraits<KObjectName>::ListType;
static Result NewFromName(KernelSystem& kernel, KAutoObject* obj, const char* name);
static Result Delete(KernelSystem& kernel, KAutoObject* obj, const char* name);
static KScopedAutoObject<KAutoObject> Find(KernelSystem& kernel, const char* name);
template <typename Derived>
static Result Delete(KernelSystem& kernel, const char* name) {
// Find the object.
KScopedAutoObject obj = Find(kernel, name);
R_UNLESS(obj.IsNotNull(), ResultNotFound);
// Cast the object to the desired type.
Derived* derived = obj->DynamicCast<Derived*>();
R_UNLESS(derived != nullptr, ResultNotFound);
// Check that the object is closed.
R_UNLESS(derived->IsServerClosed(), ResultInvalidAddressState);
return Delete(kernel, obj.GetPointerUnsafe(), name);
}
template <typename Derived>
requires(std::derived_from<Derived, KAutoObject>)
static KScopedAutoObject<Derived> Find(KernelSystem& kernel, const char* name) {
return Find(kernel, name);
}
private:
static KScopedAutoObject<KAutoObject> FindImpl(KernelSystem& kernel, const char* name);
void Initialize(KAutoObject* obj, const char* name);
bool MatchesName(const char* name) const;
KAutoObject* GetObject() const {
return m_object;
}
private:
std::array<char, NameLengthMax> m_name{};
KAutoObject* m_object{};
};
class KObjectNameGlobalData {
public:
explicit KObjectNameGlobalData(KernelSystem& kernel);
~KObjectNameGlobalData();
KObjectName::List& GetObjectList() {
return m_object_list;
}
private:
// KMutex m_mutex;
KObjectName::List m_object_list;
};
} // namespace Kernel

View File

@ -0,0 +1,25 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_port.h"
namespace Kernel {
KPort::KPort(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
KPort::~KPort() = default;
void KPort::Initialize(s32 max_sessions, std::string name) {
// Open a new reference count to the initialized port.
this->Open();
// Create and initialize our server/client pair.
KAutoObject::Create(std::addressof(m_server));
KAutoObject::Create(std::addressof(m_client));
m_server.Initialize(this, name);
m_client.Initialize(this, max_sessions, name);
}
} // namespace Kernel

View File

@ -0,0 +1,52 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KServerSession;
class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort> {
KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject);
public:
explicit KPort(KernelSystem& kernel);
~KPort() override;
static void PostDestroy(uintptr_t arg) {}
void Initialize(s32 max_sessions, std::string name);
void OnClientClosed();
void OnServerClosed();
bool IsServerClosed() const;
Result EnqueueSession(KServerSession* session);
KClientPort& GetClientPort() {
return m_client;
}
KServerPort& GetServerPort() {
return m_server;
}
const KClientPort& GetClientPort() const {
return m_client;
}
const KServerPort& GetServerPort() const {
return m_server;
}
private:
KServerPort m_server;
KClientPort m_client;
};
} // namespace Kernel

View File

@ -12,18 +12,18 @@
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/logging/log.h"
#include "common/serialization/boost_vector.hpp"
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/hle/service/plgldr/plgldr.h"
#include "core/loader/loader.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::AddressMapping)
@ -44,14 +44,13 @@ SERIALIZE_IMPL(AddressMapping)
template <class Archive>
void Process::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& handle_table;
ar& codeset; // TODO: Replace with apploader reference
ar& resource_limit;
ar& svc_access_mask;
ar& handle_table_size;
ar&(boost::container::vector<AddressMapping, boost::container::dtl::static_storage_allocator<
AddressMapping, 8, 0, true>>&)address_mappings;
// ar& address_mappings;
ar& flags.raw;
ar& no_thread_restrictions;
ar& kernel_version;
@ -68,52 +67,7 @@ void Process::serialize(Archive& ar, const unsigned int) {
}
SERIALIZE_IMPL(Process)
std::shared_ptr<CodeSet> KernelSystem::CreateCodeSet(std::string name, u64 program_id) {
auto codeset{std::make_shared<CodeSet>(*this)};
codeset->name = std::move(name);
codeset->program_id = program_id;
return codeset;
}
CodeSet::CodeSet(KernelSystem& kernel) : Object(kernel) {}
CodeSet::~CodeSet() {}
template <class Archive>
void CodeSet::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& memory;
ar& segments;
ar& entrypoint;
ar& name;
ar& program_id;
}
SERIALIZE_IMPL(CodeSet)
template <class Archive>
void CodeSet::Segment::serialize(Archive& ar, const unsigned int) {
ar& offset;
ar& addr;
ar& size;
}
SERIALIZE_IMPL(CodeSet::Segment)
std::shared_ptr<Process> KernelSystem::CreateProcess(std::shared_ptr<CodeSet> code_set) {
auto process{std::make_shared<Process>(*this)};
process->codeset = std::move(code_set);
process->flags.raw = 0;
process->flags.memory_region.Assign(MemoryRegion::APPLICATION);
process->status = ProcessStatus::Created;
process->process_id = ++next_process_id;
process->creation_time_ticks = timing.GetTicks();
process_list.push_back(process);
return process;
}
void KernelSystem::TerminateProcess(std::shared_ptr<Process> process) {
void KernelSystem::TerminateProcess(Process* process) {
LOG_INFO(Kernel_SVC, "Process {} exiting", process->process_id);
ASSERT_MSG(process->status == ProcessStatus::Running, "Process has already exited");
@ -198,6 +152,8 @@ void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x{:08X}", descriptor);
}
}
handle_table.Initialize(handle_table_size);
}
void Process::Set3dsxKernelCaps() {
@ -219,25 +175,20 @@ void Process::Set3dsxKernelCaps() {
void Process::Run(s32 main_thread_priority, u32 stack_size) {
memory_region = kernel.GetMemoryRegion(flags.memory_region);
// Ensure we can reserve a thread. Real kernel returns 0xC860180C if this fails.
if (!resource_limit->Reserve(ResourceLimitType::Thread, 1)) {
return;
}
VAddr out_addr{};
auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
MemoryState memory_state) {
HeapAllocate(std::addressof(out_addr), segment.addr, segment.size, permissions,
memory_state, true);
kernel.memory.WriteBlock(*this, segment.addr, codeset->memory.data() + segment.offset,
kernel.memory.WriteBlock(*this, segment.addr, codeset.memory.data() + segment.offset,
segment.size);
};
// Map CodeSet segments
MapSegment(codeset->CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
MapSegment(codeset->RODataSegment(), VMAPermission::Read, MemoryState::Code);
MapSegment(codeset->DataSegment(), VMAPermission::ReadWrite, MemoryState::Private);
MapSegment(codeset.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
MapSegment(codeset.RODataSegment(), VMAPermission::Read, MemoryState::Code);
MapSegment(codeset.DataSegment(), VMAPermission::ReadWrite, MemoryState::Private);
// Allocate and map stack
HeapAllocate(std::addressof(out_addr), Memory::HEAP_VADDR_END - stack_size, stack_size,
@ -255,9 +206,23 @@ void Process::Run(s32 main_thread_priority, u32 stack_size) {
}
status = ProcessStatus::Running;
vm_manager.LogLayout(Common::Log::Level::Debug);
Kernel::SetupMainThread(kernel, codeset->entrypoint, main_thread_priority, SharedFrom(this));
// Place a tentative reservation of a thread for this process.
KScopedResourceReservation thread_reservation(this, ResourceLimitType::Thread);
ASSERT(thread_reservation.Succeeded());
// Create a new thread for the process.
KThread* main_thread = KThread::Create(m_kernel);
ASSERT(main_thread != nullptr);
// Initialize the thread.
main_thread->Initialize("", codeset.entrypoint, main_thread_priority, 0, ideal_processor,
Memory::HEAP_VADDR_END, this);
// Register the thread, and commit our reservation.
KThread::Register(m_kernel, main_thread);
thread_reservation.Commit();
}
void Process::Exit() {
@ -425,7 +390,7 @@ Result Process::LinearFree(VAddr target, u32 size) {
return ResultSuccess;
}
ResultVal<VAddr> Process::AllocateThreadLocalStorage() {
Result Process::AllocateThreadLocalStorage(VAddr* out_tls_addr) {
std::size_t tls_page;
std::size_t tls_slot;
bool needs_allocation = true;
@ -492,7 +457,8 @@ ResultVal<VAddr> Process::AllocateThreadLocalStorage() {
static_cast<VAddr>(tls_slot) * Memory::TLS_ENTRY_SIZE;
kernel.memory.ZeroBlock(*this, tls_address, Memory::TLS_ENTRY_SIZE);
return tls_address;
*out_tls_addr = tls_address;
return ResultSuccess;
}
Result Process::Map(VAddr target, VAddr source, u32 size, VMAPermission perms, bool privileged) {
@ -590,6 +556,11 @@ Result Process::Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms,
return ResultSuccess;
}
void Process::ReleaseResource(ResourceLimitType type, s32 amount) {
ASSERT(resource_limit);
resource_limit->Release(type, amount);
}
void Process::FreeAllMemory() {
if (memory_region == nullptr || resource_limit == nullptr) {
return;
@ -627,30 +598,35 @@ void Process::FreeAllMemory() {
}
Kernel::Process::Process(KernelSystem& kernel)
: Object(kernel), handle_table(kernel), vm_manager(kernel.memory, *this), kernel(kernel) {
: KAutoObjectWithSlabHeapAndContainer(kernel), handle_table(kernel),
vm_manager(kernel.memory, *this), kernel(kernel) {
kernel.memory.RegisterPageTable(vm_manager.page_table);
}
Kernel::Process::~Process() {
LOG_INFO(Kernel, "Cleaning up process {}", process_id);
// Release all objects this process owns first so that their potential destructor can do clean
// up with this process before further destruction.
// TODO(wwylele): explicitly destroy or invalidate objects this process owns (threads, shared
// memory etc.) even if they are still referenced by other processes.
handle_table.Clear();
Kernel::Process::~Process() = default;
void Process::Initialize() {
flags.memory_region.Assign(MemoryRegion::APPLICATION);
status = ProcessStatus::Created;
process_id = m_kernel.NewProcessId();
creation_time_ticks = m_kernel.timing.GetTicks();
m_kernel.process_list.push_back(this);
handle_table.Initialize(handle_table_size);
}
void Process::Finalize() {
handle_table.Finalize();
FreeAllMemory();
kernel.memory.UnregisterPageTable(vm_manager.page_table);
}
std::shared_ptr<Process> KernelSystem::GetProcessById(u32 process_id) const {
auto itr = std::find_if(
process_list.begin(), process_list.end(),
[&](const std::shared_ptr<Process>& process) { return process->process_id == process_id; });
Process* KernelSystem::GetProcessById(u32 process_id) const {
auto it = std::ranges::find_if(
process_list, [&](const auto process) { return process->process_id == process_id; });
if (itr == process_list.end())
if (it == process_list.end()) {
return nullptr;
return *itr;
}
return *it;
}
} // namespace Kernel

View File

@ -4,18 +4,17 @@
#pragma once
#include <array>
#include <bitset>
#include <cstddef>
#include <memory>
#include <string>
#include <vector>
#include <boost/container/static_vector.hpp>
#include <boost/serialization/export.hpp>
#include "common/bit_field.h"
#include "common/common_types.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/k_code_set.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/vm_manager.h"
namespace Kernel {
@ -51,110 +50,37 @@ union ProcessFlags {
BitField<12, 1, u16> loaded_high; ///< Application loaded high (not at 0x00100000).
};
enum class ProcessStatus { Created, Running, Exited };
class ResourceLimit;
struct MemoryRegionInfo;
class CodeSet final : public Object {
public:
explicit CodeSet(KernelSystem& kernel);
~CodeSet() override;
struct Segment {
std::size_t offset = 0;
VAddr addr = 0;
u32 size = 0;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
std::string GetTypeName() const override {
return "CodeSet";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::CodeSet;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
Segment& CodeSegment() {
return segments[0];
}
const Segment& CodeSegment() const {
return segments[0];
}
Segment& RODataSegment() {
return segments[1];
}
const Segment& RODataSegment() const {
return segments[1];
}
Segment& DataSegment() {
return segments[2];
}
const Segment& DataSegment() const {
return segments[2];
}
std::vector<u8> memory;
std::array<Segment, 3> segments;
VAddr entrypoint;
/// Name of the process
std::string name;
/// Title ID corresponding to the process
u64 program_id;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
enum class ProcessStatus {
Created,
Running,
Exited,
};
class Process final : public Object {
class KResourceLimit;
enum class ResourceLimitType : u32;
struct MemoryRegionInfo;
class Process final : public KAutoObjectWithSlabHeapAndContainer<Process> {
KERNEL_AUTOOBJECT_TRAITS(Process, KAutoObject);
public:
explicit Process(Kernel::KernelSystem& kernel);
~Process() override;
std::string GetTypeName() const override {
return "Process";
}
std::string GetName() const override {
return codeset->name;
}
KHandleTable handle_table;
static constexpr HandleType HANDLE_TYPE = HandleType::Process;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
HandleTable handle_table;
std::shared_ptr<CodeSet> codeset;
CodeSet codeset{};
/// Resource limit descriptor for this process
std::shared_ptr<ResourceLimit> resource_limit;
KResourceLimit* resource_limit{};
/// The process may only call SVCs which have the corresponding bit set.
std::bitset<0x80> svc_access_mask;
/// Maximum size of the handle table for the process.
unsigned int handle_table_size = 0x200;
u32 handle_table_size = 0x200;
/// Special memory ranges mapped into this processes address space. This is used to give
/// processes access to specific I/O regions and device memory.
boost::container::static_vector<AddressMapping, 8> address_mappings;
ProcessFlags flags;
ProcessFlags flags{};
bool no_thread_restrictions = false;
/// Kernel compatibility version for this process
u16 kernel_version = 0;
@ -169,6 +95,12 @@ public:
// Creation time in ticks of the process.
u64 creation_time_ticks;
void Initialize();
static void PostDestroy(uintptr_t arg) {}
void Finalize() override;
/**
* Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them
* to this process.
@ -190,9 +122,6 @@ public:
*/
void Exit();
///////////////////////////////////////////////////////////////////////////////////////////////
// Memory Management
VMManager vm_manager;
u32 memory_used = 0;
@ -220,12 +149,14 @@ public:
Result LinearAllocate(VAddr* out_addr, VAddr target, u32 size, VMAPermission perms);
Result LinearFree(VAddr target, u32 size);
ResultVal<VAddr> AllocateThreadLocalStorage();
Result AllocateThreadLocalStorage(VAddr* out_tls);
Result Map(VAddr target, VAddr source, u32 size, VMAPermission perms, bool privileged = false);
Result Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms,
bool privileged = false);
void ReleaseResource(ResourceLimitType type, s32 amount);
private:
void FreeAllMemory();
@ -238,9 +169,5 @@ private:
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::AddressMapping)
BOOST_CLASS_EXPORT_KEY(Kernel::CodeSet)
BOOST_CLASS_EXPORT_KEY(Kernel::CodeSet::Segment)
BOOST_CLASS_EXPORT_KEY(Kernel::Process)
CONSTRUCT_KERNEL_OBJECT(Kernel::CodeSet)
CONSTRUCT_KERNEL_OBJECT(Kernel::Process)

View File

@ -1,4 +1,4 @@
// Copyright 2015 Citra Emulator Project
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
@ -9,40 +9,36 @@
#include "common/archives.h"
#include "common/assert.h"
#include "common/settings.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_resource_limit.h"
SERIALIZE_EXPORT_IMPL(Kernel::ResourceLimit)
SERIALIZE_EXPORT_IMPL(Kernel::KResourceLimit)
SERIALIZE_EXPORT_IMPL(Kernel::ResourceLimitList)
namespace Kernel {
ResourceLimit::ResourceLimit(KernelSystem& kernel) : Object(kernel) {}
KResourceLimit::KResourceLimit(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer(kernel) {}
ResourceLimit::~ResourceLimit() = default;
KResourceLimit::~KResourceLimit() = default;
std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelSystem& kernel, std::string name) {
auto resource_limit = std::make_shared<ResourceLimit>(kernel);
resource_limit->m_name = std::move(name);
return resource_limit;
}
s32 ResourceLimit::GetCurrentValue(ResourceLimitType type) const {
const auto index = static_cast<std::size_t>(type);
s32 KResourceLimit::GetCurrentValue(ResourceLimitType type) const {
const auto index = static_cast<size_t>(type);
return m_current_values[index];
}
s32 ResourceLimit::GetLimitValue(ResourceLimitType type) const {
const auto index = static_cast<std::size_t>(type);
s32 KResourceLimit::GetLimitValue(ResourceLimitType type) const {
const auto index = static_cast<size_t>(type);
return m_limit_values[index];
}
void ResourceLimit::SetLimitValue(ResourceLimitType type, s32 value) {
const auto index = static_cast<std::size_t>(type);
void KResourceLimit::SetLimitValue(ResourceLimitType type, s32 value) {
const auto index = static_cast<size_t>(type);
m_limit_values[index] = value;
}
bool ResourceLimit::Reserve(ResourceLimitType type, s32 amount) {
const auto index = static_cast<std::size_t>(type);
bool KResourceLimit::Reserve(ResourceLimitType type, s32 amount) {
const auto index = static_cast<size_t>(type);
const s32 limit = m_limit_values[index];
const s32 new_value = m_current_values[index] + amount;
if (new_value > limit) {
@ -54,8 +50,8 @@ bool ResourceLimit::Reserve(ResourceLimitType type, s32 amount) {
return true;
}
bool ResourceLimit::Release(ResourceLimitType type, s32 amount) {
const auto index = static_cast<std::size_t>(type);
bool KResourceLimit::Release(ResourceLimitType type, s32 amount) {
const auto index = static_cast<size_t>(type);
const s32 value = m_current_values[index];
if (amount > value) {
LOG_ERROR(Kernel, "Amount {} exceeds current value {} for resource type {}", amount, value,
@ -67,13 +63,13 @@ bool ResourceLimit::Release(ResourceLimitType type, s32 amount) {
}
template <class Archive>
void ResourceLimit::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
void KResourceLimit::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& m_name;
ar& m_limit_values;
ar& m_current_values;
}
SERIALIZE_IMPL(ResourceLimit)
SERIALIZE_IMPL(KResourceLimit)
ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
// PM makes APPMEMALLOC always match app RESLIMIT_COMMIT.
@ -81,8 +77,15 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
const bool is_new_3ds = Settings::values.is_new_3ds.GetValue();
const auto& appmemalloc = kernel.GetMemoryRegion(MemoryRegion::APPLICATION);
const auto CreateLimit = [&](std::string name) {
KResourceLimit* limit = KResourceLimit::Create(kernel);
limit->Initialize(name);
KResourceLimit::Register(kernel, limit);
return limit;
};
// Create the Application resource limit
auto resource_limit = ResourceLimit::Create(kernel, "Applications");
auto resource_limit = CreateLimit("Applications");
resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x18);
resource_limit->SetLimitValue(ResourceLimitType::Commit, appmemalloc->size);
resource_limit->SetLimitValue(ResourceLimitType::Thread, 0x20);
@ -96,7 +99,7 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
resource_limits[static_cast<u8>(ResourceLimitCategory::Application)] = resource_limit;
// Create the SysApplet resource limit
resource_limit = ResourceLimit::Create(kernel, "System Applets");
resource_limit = CreateLimit("System Applets");
resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x4);
resource_limit->SetLimitValue(ResourceLimitType::Commit, is_new_3ds ? 0x5E06000 : 0x2606000);
resource_limit->SetLimitValue(ResourceLimitType::Thread, is_new_3ds ? 0x1D : 0xE);
@ -110,7 +113,7 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
resource_limits[static_cast<u8>(ResourceLimitCategory::SysApplet)] = resource_limit;
// Create the LibApplet resource limit
resource_limit = ResourceLimit::Create(kernel, "Library Applets");
resource_limit = CreateLimit("Library Applets");
resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x4);
resource_limit->SetLimitValue(ResourceLimitType::Commit, 0x602000);
resource_limit->SetLimitValue(ResourceLimitType::Thread, 0xE);
@ -124,7 +127,7 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
resource_limits[static_cast<u8>(ResourceLimitCategory::LibApplet)] = resource_limit;
// Create the Other resource limit
resource_limit = ResourceLimit::Create(kernel, "Others");
resource_limit = CreateLimit("Others");
resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x4);
resource_limit->SetLimitValue(ResourceLimitType::Commit, is_new_3ds ? 0x2182000 : 0x1682000);
resource_limit->SetLimitValue(ResourceLimitType::Thread, is_new_3ds ? 0xE1 : 0xCA);
@ -140,7 +143,7 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
ResourceLimitList::~ResourceLimitList() = default;
std::shared_ptr<ResourceLimit> ResourceLimitList::GetForCategory(ResourceLimitCategory category) {
KResourceLimit* ResourceLimitList::GetForCategory(ResourceLimitCategory category) {
switch (category) {
case ResourceLimitCategory::Application:
case ResourceLimitCategory::SysApplet:

View File

@ -1,4 +1,4 @@
// Copyright 2015 Citra Emulator Project
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
@ -8,7 +8,8 @@
#include <memory>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/global.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
@ -33,28 +34,14 @@ enum class ResourceLimitType : u32 {
Max = 10,
};
class ResourceLimit final : public Object {
class KResourceLimit final : public KAutoObjectWithSlabHeapAndContainer<KResourceLimit> {
KERNEL_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject);
public:
explicit ResourceLimit(KernelSystem& kernel);
~ResourceLimit() override;
explicit KResourceLimit(KernelSystem& kernel);
~KResourceLimit() override;
/**
* Creates a resource limit object.
*/
static std::shared_ptr<ResourceLimit> Create(KernelSystem& kernel,
std::string name = "Unknown");
std::string GetTypeName() const override {
return "ResourceLimit";
}
std::string GetName() const override {
return m_name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
void Initialize(std::string name) {}
s32 GetCurrentValue(ResourceLimitType type) const;
s32 GetLimitValue(ResourceLimitType type) const;
@ -64,16 +51,18 @@ public:
bool Reserve(ResourceLimitType type, s32 amount);
bool Release(ResourceLimitType type, s32 amount);
static void PostDestroy(uintptr_t arg) {}
private:
using ResourceArray = std::array<s32, static_cast<std::size_t>(ResourceLimitType::Max)>;
ResourceArray m_limit_values{};
ResourceArray m_current_values{};
std::string m_name;
std::string m_name{};
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
void serialize(Archive& ar, const unsigned int file_version);
};
class ResourceLimitList {
@ -86,10 +75,10 @@ public:
* @param category The resource limit category
* @returns The resource limit associated with the category
*/
std::shared_ptr<ResourceLimit> GetForCategory(ResourceLimitCategory category);
KResourceLimit* GetForCategory(ResourceLimitCategory category);
private:
std::array<std::shared_ptr<ResourceLimit>, 4> resource_limits;
std::array<KResourceLimit*, 4> resource_limits;
friend class boost::serialization::access;
template <class Archive>
@ -98,7 +87,7 @@ private:
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::ResourceLimit)
BOOST_CLASS_EXPORT_KEY(Kernel::KResourceLimit)
BOOST_CLASS_EXPORT_KEY(Kernel::ResourceLimitList)
CONSTRUCT_KERNEL_OBJECT(Kernel::ResourceLimit)
CONSTRUCT_KERNEL_OBJECT(Kernel::KResourceLimit)
CONSTRUCT_KERNEL_OBJECT(Kernel::ResourceLimitList)

View File

@ -0,0 +1,50 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
namespace Kernel {
class KScopedResourceReservation {
public:
explicit KScopedResourceReservation(KResourceLimit* l, ResourceLimitType type, s32 amount = 1)
: m_limit(l), m_amount(amount), m_type(type) {
if (m_limit) {
m_succeeded = m_limit->Reserve(m_type, m_amount);
} else {
m_succeeded = true;
}
}
explicit KScopedResourceReservation(const Process* p, ResourceLimitType type, s32 amount = 1)
: KScopedResourceReservation(p->resource_limit, type, amount) {}
~KScopedResourceReservation() noexcept {
if (m_limit && m_succeeded) {
// Resource was not committed, release the reservation.
m_limit->Release(m_type, m_amount);
}
}
/// Commit the resource reservation, destruction of this object does not release the resource
void Commit() {
m_limit = nullptr;
}
bool Succeeded() const {
return m_succeeded;
}
private:
KResourceLimit* m_limit{};
s32 m_amount{};
ResourceLimitType m_type{};
bool m_succeeded{};
};
} // namespace Kernel

View File

@ -0,0 +1,78 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/export.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_semaphore.h"
#include "core/hle/kernel/kernel.h"
SERIALIZE_EXPORT_IMPL(Kernel::KSemaphore)
namespace Kernel {
KSemaphore::KSemaphore(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KSemaphore::~KSemaphore() = default;
void KSemaphore::Initialize(Process* owner, s32 initial_count, s32 max_count, std::string name) {
// Open a reference to the owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set member variables
m_available_count = initial_count;
m_max_count = max_count;
m_name = name;
}
void KSemaphore::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Semaphore, 1);
owner->Close();
}
}
bool KSemaphore::ShouldWait(const KThread* thread) const {
return m_available_count <= 0;
}
void KSemaphore::Acquire(KThread* thread) {
if (m_available_count <= 0) {
return;
}
--m_available_count;
}
Result KSemaphore::Release(s32* out_count, s32 release_count) {
R_UNLESS(release_count + m_available_count <= m_max_count, ResultOutOfRangeKernel);
// Update available count.
const s32 previous_count = m_available_count;
m_available_count += release_count;
// Wakeup waiting threads and return.
this->WakeupAllWaitingThreads();
*out_count = previous_count;
return ResultSuccess;
}
template <class Archive>
void KSemaphore::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_max_count;
ar& m_available_count;
}
SERIALIZE_IMPL(KSemaphore)
} // namespace Kernel

View File

@ -0,0 +1,67 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/global.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class ResourceLimit;
class KSemaphore final
: public KAutoObjectWithSlabHeapAndContainer<KSemaphore, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KSemaphore, KSynchronizationObject);
public:
explicit KSemaphore(KernelSystem& kernel);
~KSemaphore() override;
void Initialize(Process* owner, s32 initial_count, s32 max_count, std::string name);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
s32 GetAvailableCount() const {
return m_available_count;
}
s32 GetMaxCount() const {
return m_max_count;
}
Result Release(s32* out_count, s32 release_count);
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
Process* m_owner{};
s32 m_max_count{};
s32 m_available_count{};
std::string m_name;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KSemaphore)
CONSTRUCT_KERNEL_OBJECT(Kernel::KSemaphore)

View File

@ -0,0 +1,68 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KServerPort)
namespace Kernel {
KServerPort::KServerPort(KernelSystem& kernel) : KSynchronizationObject(kernel) {}
KServerPort::~KServerPort() = default;
void KServerPort::Initialize(KPort* parent, std::string name) {
m_parent = parent;
m_name = name + "_Server";
}
void KServerPort::Destroy() {
// Close our reference to our parent.
m_parent->Close();
}
KServerSession* KServerPort::AcceptSession() {
// Return the first session in the list.
if (m_pending_sessions.empty()) {
return nullptr;
}
KServerSession* session = m_pending_sessions.back();
m_pending_sessions.pop_back();
return session;
}
void KServerPort::EnqueueSession(KServerSession* session) {
// Add the session to our queue.
m_pending_sessions.push_back(session);
}
bool KServerPort::ShouldWait(const KThread* thread) const {
// If there are no pending sessions, we wait until a new one is added.
return m_pending_sessions.size() == 0;
}
void KServerPort::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
}
template <class Archive>
void KServerPort::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_name;
ar& m_pending_sessions;
// ar& m_hle_handler;
}
SERIALIZE_IMPL(KServerPort)
} // namespace Kernel

View File

@ -0,0 +1,61 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_synchronization_object.h"
namespace Kernel {
class KClientPort;
class KServerSession;
class KPort;
class SessionRequestHandler;
class KServerPort final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject);
public:
explicit KServerPort(KernelSystem& kernel);
~KServerPort() override;
void Initialize(KPort* parent, std::string name);
void Destroy() override;
void EnqueueSession(KServerSession* session);
KServerSession* AcceptSession();
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) {
m_hle_handler = std::move(hle_handler_);
}
std::shared_ptr<SessionRequestHandler> GetHleHandler() {
return m_hle_handler;
}
private:
KPort* m_parent{};
std::string m_name;
std::vector<KServerSession*> m_pending_sessions;
std::shared_ptr<SessionRequestHandler> m_hle_handler;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KServerPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::KServerPort)

View File

@ -0,0 +1,144 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KServerSession)
namespace Kernel {
KServerSession::KServerSession(KernelSystem& kernel) : KSynchronizationObject(kernel) {}
KServerSession::~KServerSession() = default;
void KServerSession::Destroy() {
m_parent->OnServerClosed();
m_parent->Close();
}
bool KServerSession::ShouldWait(const KThread* thread) const {
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
const auto state = m_parent->GetState();
if (state != KSessionState::Normal) {
return false;
}
// Wait if we have no pending requests, or if we're currently handling a request.
return pending_requesting_threads.empty() || currently_handling != nullptr;
}
void KServerSession::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
// If the client endpoint was closed, don't do anything. This KServerSession is now useless and
// will linger until its last handle is closed by the running application.
const auto state = m_parent->GetState();
if (state != KSessionState::Normal) {
return;
}
// We are now handling a request, pop it from the stack.
ASSERT(!pending_requesting_threads.empty());
currently_handling = pending_requesting_threads.back();
pending_requesting_threads.pop_back();
}
void KServerSession::OnClientClosed() {
// Notify HLE handler that client session has been disconnected.
if (hle_handler) {
hle_handler->ClientDisconnected(this);
}
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
pending_requesting_threads.clear();
currently_handling = nullptr;
// Notify any threads waiting on the KServerSession that the endpoint has been closed. Note
// that this call has to happen after `Session::client` has been set to nullptr to let the
// ServerSession know that the client endpoint has been closed.
this->WakeupAllWaitingThreads();
}
Result KServerSession::HandleSyncRequest(KThread* thread) {
// The KServerSession received a sync request, this means that there's new data available
// from its ClientSession, so wake up any threads that may be waiting on a svcReplyAndReceive or
// similar.
// If this KServerSession has an associated HLE handler, forward the request to it.
if (hle_handler != nullptr) {
std::array<u32_le, IPC::COMMAND_BUFFER_LENGTH + 2 * IPC::MAX_STATIC_BUFFERS> cmd_buf;
auto current_process = thread->GetOwner();
ASSERT(current_process);
m_kernel.memory.ReadBlock(*current_process, thread->GetCommandBufferAddress(),
cmd_buf.data(), cmd_buf.size() * sizeof(u32));
auto context = std::make_shared<Kernel::HLERequestContext>(m_kernel, this, thread);
context->PopulateFromIncomingCommandBuffer(cmd_buf.data(), current_process);
hle_handler->HandleSyncRequest(*context);
ASSERT(thread->m_status == Kernel::ThreadStatus::Running ||
thread->m_status == Kernel::ThreadStatus::WaitHleEvent);
// Only write the response immediately if the thread is still running. If the HLE handler
// put the thread to sleep then the writing of the command buffer will be deferred to the
// wakeup callback.
if (thread->m_status == Kernel::ThreadStatus::Running) {
context->WriteToOutgoingCommandBuffer(cmd_buf.data(), *current_process);
m_kernel.memory.WriteBlock(*current_process, thread->GetCommandBufferAddress(),
cmd_buf.data(), cmd_buf.size() * sizeof(u32));
}
}
if (thread->m_status == ThreadStatus::Running) {
// Put the thread to sleep until the server replies, it will be awoken in
// svcReplyAndReceive for LLE servers.
thread->m_status = ThreadStatus::WaitIPC;
if (hle_handler != nullptr) {
// For HLE services, we put the request threads to sleep for a short duration to
// simulate IPC overhead, but only if the HLE handler didn't put the thread to sleep for
// other reasons like an async callback. The IPC overhead is needed to prevent
// starvation when a thread only does sync requests to HLE services while a
// lower-priority thread is waiting to run.
// This delay was approximated in a homebrew application by measuring the average time
// it takes for svcSendSyncRequest to return when performing the SetLcdForceBlack IPC
// request to the GSP:GPU service in a n3DS with firmware 11.6. The measured values have
// a high variance and vary between models.
static constexpr u64 IPCDelayNanoseconds = 39000;
thread->WakeAfterDelay(IPCDelayNanoseconds);
} else {
// Add the thread to the list of threads that have issued a sync request with this
// server.
pending_requesting_threads.push_back(std::move(thread));
}
}
// If this KServerSession does not have an HLE implementation,
// just wake up the threads waiting on it.
this->WakeupAllWaitingThreads();
return ResultSuccess;
}
template <class Archive>
void KServerSession::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_name;
// ar& m_parent;
ar& hle_handler;
ar& pending_requesting_threads;
ar& currently_handling;
ar& mapped_buffer_context;
}
SERIALIZE_IMPL(KServerSession)
} // namespace Kernel

View File

@ -0,0 +1,80 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/ipc.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
namespace Kernel {
class ClientSession;
class ClientPort;
class KSession;
class SessionRequestHandler;
class KThread;
class KServerSession final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KServerSession, KSynchronizationObject);
public:
~KServerSession() override;
explicit KServerSession(KernelSystem& kernel);
void Destroy() override;
void Initialize(KSession* parent) {
m_parent = parent;
}
KSession* GetParent() const {
return m_parent;
}
KThread* GetCurrent() {
return currently_handling;
}
std::vector<MappedBufferContext>& GetMappedBufferContext() {
return mapped_buffer_context;
}
void SetHleHandler(std::shared_ptr<SessionRequestHandler>&& hle_handler_) {
hle_handler = std::move(hle_handler_);
}
std::shared_ptr<SessionRequestHandler>& GetHleHandler() {
return hle_handler;
}
void OnClientClosed();
Result HandleSyncRequest(KThread* thread);
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
public:
std::string m_name;
KSession* m_parent{};
std::shared_ptr<SessionRequestHandler> hle_handler;
std::vector<KThread*> pending_requesting_threads;
KThread* currently_handling;
std::vector<MappedBufferContext> mapped_buffer_context;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KServerSession)
CONSTRUCT_KERNEL_OBJECT(Kernel::KServerSession)

View File

@ -0,0 +1,63 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
namespace Kernel {
KSession::KSession(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
KSession::~KSession() = default;
void KSession::Initialize(KClientPort* client_port) {
// Increment reference count.
// Because reference count is one on creation, this will result
// in a reference count of two. Thus, when both server and client are closed
// this object will be destroyed.
this->Open();
// Create our sub sessions.
KAutoObject::Create(std::addressof(m_server));
KAutoObject::Create(std::addressof(m_client));
// Initialize our sub sessions.
m_state = KSessionState::Normal;
m_server.Initialize(this);
m_client.Initialize(this);
// Set our port.
m_port = client_port;
if (m_port != nullptr) {
m_port->Open();
}
// Mark initialized.
m_initialized = true;
}
void KSession::Finalize() {
if (m_port != nullptr) {
m_port->ConnectionClosed();
m_port->Close();
}
}
void KSession::OnServerClosed() {
if (m_state == KSessionState::Normal) {
m_state = KSessionState::ServerClosed;
m_client.OnServerClosed();
}
}
void KSession::OnClientClosed() {
if (m_state == KSessionState::Normal) {
m_state = KSessionState::ClientClosed;
m_server.OnClientClosed();
}
}
} // namespace Kernel

View File

@ -0,0 +1,76 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KClientPort;
enum class KSessionState : u8 {
Invalid = 0,
Normal = 1,
ClientClosed = 2,
ServerClosed = 3,
};
class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession> {
KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject);
public:
explicit KSession(KernelSystem& kernel);
~KSession() override;
void Initialize(KClientPort* port);
void Finalize() override;
bool IsInitialized() const override {
return m_initialized;
}
static void PostDestroy(uintptr_t arg) {}
void OnServerClosed();
void OnClientClosed();
KSessionState GetState() const {
return m_state;
}
KClientSession& GetClientSession() {
return m_client;
}
KServerSession& GetServerSession() {
return m_server;
}
const KClientSession& GetClientSession() const {
return m_client;
}
const KServerSession& GetServerSession() const {
return m_server;
}
KClientPort* GetParent() {
return m_port;
}
private:
KServerSession m_server;
KClientSession m_client;
KClientPort* m_port{};
KSessionState m_state{};
bool m_initialized{};
};
} // namespace Kernel

View File

@ -0,0 +1,238 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include "common/archives.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/memory.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::KSharedMemory)
namespace Kernel {
KSharedMemory::KSharedMemory(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KSharedMemory::~KSharedMemory() = default;
Result KSharedMemory::Initialize(Process* owner, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address,
MemoryRegion region) {
// Open a reference to our owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set member variables.
m_base_address = address;
m_size = size;
m_permissions = permissions;
m_other_permissions = other_permissions;
// Allocate the shared memory block.
if (address == 0) {
// We need to allocate a block from the Linear Heap ourselves.
// We'll manually allocate some memory from the linear heap in the specified region.
auto memory_region = m_kernel.GetMemoryRegion(region);
auto offset = memory_region->LinearAllocate(size);
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
// Store the backing blocks of allocated memory.
auto& memory = m_kernel.memory;
std::fill(memory.GetFCRAMPointer(*offset), memory.GetFCRAMPointer(*offset + size), 0);
m_backing_blocks = {{memory.GetFCRAMRef(*offset), size}};
m_holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
m_linear_heap_phys_offset = *offset;
// Increase the amount of used linear heap memory for the owner process.
if (m_owner) {
m_owner->memory_used += size;
}
} else {
// The memory is already available and mapped in the owner process.
ASSERT(m_owner);
auto& vm_manager = m_owner->vm_manager;
R_TRY(vm_manager.ChangeMemoryState(address, size, MemoryState::Private,
VMAPermission::ReadWrite, MemoryState::Locked,
KSharedMemory::ConvertPermissions(permissions)));
// Should succeed after verifying memory state above.
auto backing_blocks = vm_manager.GetBackingBlocksForRange(address, size);
ASSERT(backing_blocks.Succeeded());
m_backing_blocks = std::move(backing_blocks).Unwrap();
}
return ResultSuccess;
}
void KSharedMemory::InitializeForApplet(u32 offset, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions) {
// Allocate memory in heap
auto memory_region = m_kernel.GetMemoryRegion(MemoryRegion::SYSTEM);
auto backing_blocks = memory_region->HeapAllocate(size);
ASSERT_MSG(!backing_blocks.empty(), "Not enough space in region to allocate shared memory!");
// Set member variables
m_holding_memory = backing_blocks;
m_base_address = Memory::HEAP_VADDR + offset;
m_size = size;
m_permissions = permissions;
m_other_permissions = other_permissions;
// Initialize backing blocks
auto& memory = m_kernel.memory;
for (const auto& interval : backing_blocks) {
const VAddr addr = interval.lower();
const VAddr end = interval.upper();
m_backing_blocks.emplace_back(memory.GetFCRAMRef(addr), end - addr);
std::fill(memory.GetFCRAMPointer(addr), memory.GetFCRAMPointer(end), 0);
}
}
void KSharedMemory::Finalize() {
auto memory_region = m_kernel.GetMemoryRegion(MemoryRegion::SYSTEM);
for (const auto& interval : m_holding_memory) {
memory_region->Free(interval.lower(), interval.upper() - interval.lower());
}
if (m_owner) {
if (m_base_address != 0) {
m_owner->vm_manager.ChangeMemoryState(m_base_address, m_size, MemoryState::Locked,
VMAPermission::None, MemoryState::Private,
VMAPermission::ReadWrite);
} else {
m_owner->memory_used -= m_size;
}
}
}
void KSharedMemory::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::SharedMemory, 1);
owner->Close();
}
}
Result KSharedMemory::Map(Process& target_process, VAddr address, MemoryPermission permissions,
MemoryPermission other_permissions) {
const MemoryPermission own_other_permissions =
&target_process == m_owner ? m_permissions : m_other_permissions;
// Automatically allocated memory blocks can only be mapped with other_permissions = DontCare
R_UNLESS(m_base_address != 0 || other_permissions == MemoryPermission::DontCare,
ResultInvalidCombination);
// Heap-backed memory blocks can not be mapped with other_permissions = DontCare
R_UNLESS(m_base_address == 0 || other_permissions != MemoryPermission::DontCare,
ResultInvalidCombination);
// Error out if the requested permissions don't match what the creator process allows.
if (static_cast<u32>(permissions) & ~static_cast<u32>(own_other_permissions)) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, permissions don't match", address);
return ResultInvalidCombination;
}
// Error out if the provided permissions are not compatible with what the creator process needs.
if (other_permissions != MemoryPermission::DontCare &&
static_cast<u32>(m_permissions) & ~static_cast<u32>(other_permissions)) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, permissions don't match", address);
return ResultWrongPermission;
}
// TODO(Subv): Check for the Shared Device Mem flag in the creator process.
/*if (was_created_with_shared_device_mem && address != 0) {
return Result(ErrorDescription::InvalidCombination, ErrorModule::OS,
ErrorSummary::InvalidArgument, ErrorLevel::Usage);
}*/
// TODO(Subv): The same process that created a SharedMemory object
// can not map it in its own address space unless it was created with addr=0, result 0xD900182C.
if (address != 0) {
if (address < Memory::HEAP_VADDR || address + m_size >= Memory::SHARED_MEMORY_VADDR_END) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, invalid address", address);
return ResultInvalidAddress;
}
}
VAddr target_address = address;
if (m_base_address == 0 && target_address == 0) {
// Calculate the address at which to map the memory block.
// Note: even on new firmware versions, the target address is still in the old linear heap
// region. This exception is made to keep the shared font compatibility. See
// APT:GetSharedFont for detail.
target_address = m_linear_heap_phys_offset + Memory::LINEAR_HEAP_VADDR;
}
{
auto vma = target_process.vm_manager.FindVMA(target_address);
if (vma->second.type != VMAType::Free ||
vma->second.base + vma->second.size < target_address + m_size) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, mapping to already allocated memory",
address);
return ResultInvalidAddressState;
}
}
// Map the memory block into the target process
VAddr interval_target = target_address;
for (const auto& interval : m_backing_blocks) {
auto vma = target_process.vm_manager.MapBackingMemory(interval_target, interval.first,
interval.second, MemoryState::Shared);
ASSERT(vma.Succeeded());
target_process.vm_manager.Reprotect(vma.Unwrap(), ConvertPermissions(permissions));
interval_target += interval.second;
}
return ResultSuccess;
}
Result KSharedMemory::Unmap(Process& target_process, VAddr address) {
// TODO(Subv): Verify what happens if the application tries to unmap an address that is not
// mapped to a SharedMemory.
return target_process.vm_manager.UnmapRange(address, m_size);
}
VMAPermission KSharedMemory::ConvertPermissions(MemoryPermission permission) {
u32 masked_permissions =
static_cast<u32>(permission) & static_cast<u32>(MemoryPermission::ReadWriteExecute);
return static_cast<VMAPermission>(masked_permissions);
};
u8* KSharedMemory::GetPointer(u32 offset) {
if (m_backing_blocks.size() != 1) {
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
}
return m_backing_blocks[0].first + offset;
}
const u8* KSharedMemory::GetPointer(u32 offset) const {
if (m_backing_blocks.size() != 1) {
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
}
return m_backing_blocks[0].first + offset;
}
template <class Archive>
void KSharedMemory::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& m_linear_heap_phys_offset;
// ar& m_backing_blocks;
ar& m_size;
ar& m_permissions;
ar& m_other_permissions;
ar& m_owner;
ar& m_base_address;
ar& m_holding_memory;
}
SERIALIZE_IMPL(KSharedMemory)
} // namespace Kernel

View File

@ -1,50 +1,54 @@
// Copyright 2014 Citra Emulator Project
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <utility>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "common/memory_ref.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
#include "core/global.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class SharedMemory final : public Object {
enum class VMAPermission : u8;
class KSharedMemory final : public KAutoObjectWithSlabHeapAndContainer<KSharedMemory> {
KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
public:
explicit SharedMemory(KernelSystem& kernel);
~SharedMemory() override;
explicit KSharedMemory(KernelSystem& kernel);
~KSharedMemory() override;
std::string GetTypeName() const override {
return "SharedMemory";
}
std::string GetName() const override {
return name;
}
void SetName(std::string name_) {
name = std::move(name_);
Result Initialize(Process* owner, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address, MemoryRegion region);
void InitializeForApplet(u32 offset, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions);
void Finalize() override;
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static constexpr HandleType HANDLE_TYPE = HandleType::SharedMemory;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
/// Gets the size of the underlying memory block in bytes.
u64 GetSize() const {
return size;
return m_size;
}
/// Gets the linear heap physical offset
u64 GetLinearHeapPhysicalOffset() const {
return linear_heap_phys_offset;
return m_linear_heap_phys_offset;
}
void SetName(std::string&& name_) {}
/**
* Converts the specified MemoryPermission into the equivalent VMAPermission.
* @param permission The MemoryPermission to convert.
@ -84,37 +88,22 @@ public:
const u8* GetPointer(u32 offset = 0) const;
private:
/// Offset in FCRAM of the shared memory block in the linear heap if no address was specified
/// during creation.
PAddr linear_heap_phys_offset = 0;
/// Backing memory for this shared memory block.
std::vector<std::pair<MemoryRef, u32>> backing_blocks;
/// Size of the memory block. Page-aligned.
u32 size = 0;
/// Region of memory this block exists in.
std::shared_ptr<MemoryRegionInfo> memory_region = nullptr;
/// Permission restrictions applied to the process which created the block.
MemoryPermission permissions{};
/// Permission restrictions applied to other processes mapping the block.
MemoryPermission other_permissions{};
/// Process that created this shared memory block.
std::weak_ptr<Process> owner_process;
/// Address of shared memory block in the owner process if specified.
VAddr base_address = 0;
/// Name of shared memory object.
std::string name;
MemoryRegionInfo::IntervalSet holding_memory;
friend class KernelSystem;
KernelSystem& kernel;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
Process* m_owner{};
PAddr m_linear_heap_phys_offset{};
VAddr m_base_address{};
u32 m_size{};
MemoryPermission m_permissions{};
MemoryPermission m_other_permissions{};
std::vector<std::pair<MemoryRef, u32>> m_backing_blocks;
MemoryRegionInfo::IntervalSet m_holding_memory;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::SharedMemory)
CONSTRUCT_KERNEL_OBJECT(Kernel::SharedMemory)
BOOST_CLASS_EXPORT_KEY(Kernel::KSharedMemory)
CONSTRUCT_KERNEL_OBJECT(Kernel::KSharedMemory)

View File

@ -0,0 +1,234 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#pragma clang optimize off
#include <atomic>
#include "common/assert.h"
#include "common/atomic_ops.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
namespace impl {
class KSlabHeapImpl {
CITRA_NON_COPYABLE(KSlabHeapImpl);
CITRA_NON_MOVEABLE(KSlabHeapImpl);
public:
struct Node {
Node* next{};
};
public:
constexpr KSlabHeapImpl() = default;
void Initialize() {
ASSERT(m_head == nullptr);
}
Node* GetHead() const {
return m_head;
}
void* Allocate() {
Node* ret = m_head;
if (ret != nullptr) [[likely]] {
m_head = ret->next;
}
return ret;
}
void Free(void* obj) {
Node* node = static_cast<Node*>(obj);
node->next = m_head;
m_head = node;
}
private:
std::atomic<Node*> m_head{};
};
} // namespace impl
class KSlabHeapBase : protected impl::KSlabHeapImpl {
CITRA_NON_COPYABLE(KSlabHeapBase);
CITRA_NON_MOVEABLE(KSlabHeapBase);
private:
size_t m_obj_size{};
uintptr_t m_peak{};
uintptr_t m_start{};
uintptr_t m_end{};
private:
void UpdatePeakImpl(uintptr_t obj) {
const uintptr_t alloc_peak = obj + this->GetObjectSize();
uintptr_t cur_peak = m_peak;
do {
if (alloc_peak <= cur_peak) {
break;
}
} while (
!Common::AtomicCompareAndSwap(std::addressof(m_peak), alloc_peak, cur_peak, cur_peak));
}
public:
constexpr KSlabHeapBase() = default;
bool Contains(uintptr_t address) const {
return m_start <= address && address < m_end;
}
void Initialize(size_t obj_size, void* memory, size_t memory_size) {
// Ensure we don't initialize a slab using null memory.
ASSERT(memory != nullptr);
// Set our object size.
m_obj_size = obj_size;
// Initialize the base allocator.
KSlabHeapImpl::Initialize();
// Set our tracking variables.
const size_t num_obj = (memory_size / obj_size);
m_start = reinterpret_cast<uintptr_t>(memory);
m_end = m_start + num_obj * obj_size;
m_peak = m_start;
// Free the objects.
u8* cur = reinterpret_cast<u8*>(m_end);
for (size_t i = 0; i < num_obj; i++) {
cur -= obj_size;
KSlabHeapImpl::Free(cur);
}
}
size_t GetSlabHeapSize() const {
return (m_end - m_start) / this->GetObjectSize();
}
size_t GetObjectSize() const {
return m_obj_size;
}
void* Allocate() {
void* obj = KSlabHeapImpl::Allocate();
return obj;
}
void Free(void* obj) {
// Don't allow freeing an object that wasn't allocated from this heap.
const bool contained = this->Contains(reinterpret_cast<uintptr_t>(obj));
ASSERT(contained);
KSlabHeapImpl::Free(obj);
}
size_t GetObjectIndex(const void* obj) const {
return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize();
}
size_t GetPeakIndex() const {
return this->GetObjectIndex(reinterpret_cast<const void*>(m_peak));
}
uintptr_t GetSlabHeapAddress() const {
return m_start;
}
size_t GetNumRemaining() const {
// Only calculate the number of remaining objects under debug configuration.
return 0;
}
};
template <typename T>
class KSlabHeap final : public KSlabHeapBase {
private:
using BaseHeap = KSlabHeapBase;
public:
constexpr KSlabHeap() = default;
void Initialize(void* memory, size_t memory_size) {
BaseHeap::Initialize(sizeof(T), memory, memory_size);
}
T* Allocate() {
T* obj = static_cast<T*>(BaseHeap::Allocate());
if (obj != nullptr) [[likely]] {
std::construct_at(obj);
}
return obj;
}
T* Allocate(KernelSystem& kernel) {
T* obj = static_cast<T*>(BaseHeap::Allocate());
if (obj != nullptr) [[likely]] {
std::construct_at(obj, kernel);
}
return obj;
}
void Free(T* obj) {
BaseHeap::Free(obj);
}
size_t GetObjectIndex(const T* obj) const {
return BaseHeap::GetObjectIndex(obj);
}
};
template <class Derived>
class KSlabAllocated {
public:
constexpr KSlabAllocated() = default;
size_t GetSlabIndex(KernelSystem& kernel) const {
return kernel.SlabHeap<Derived>().GetIndex(static_cast<const Derived*>(this));
}
public:
static void InitializeSlabHeap(KernelSystem& kernel, void* memory, size_t memory_size) {
kernel.SlabHeap<Derived>().Initialize(memory, memory_size);
}
static Derived* Allocate(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().Allocate(kernel);
}
static void Free(KernelSystem& kernel, Derived* obj) {
kernel.SlabHeap<Derived>().Free(obj);
}
static size_t GetObjectSize(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetObjectSize();
}
static size_t GetSlabHeapSize(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetSlabHeapSize();
}
static size_t GetPeakIndex(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetPeakIndex();
}
static uintptr_t GetSlabHeapAddress(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetSlabHeapAddress();
}
static size_t GetNumRemaining(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetNumRemaining();
}
};
} // namespace Kernel

View File

@ -0,0 +1,117 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <utility>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
KSynchronizationObject::KSynchronizationObject(KernelSystem& kernel) : KAutoObject(kernel) {}
KSynchronizationObject::~KSynchronizationObject() = default;
void KSynchronizationObject::AddWaitingThread(KThread* thread) {
auto it = std::ranges::find(waiting_threads, thread);
if (it == waiting_threads.end()) {
waiting_threads.push_back(thread);
}
}
void KSynchronizationObject::RemoveWaitingThread(KThread* thread) {
// If a thread passed multiple handles to the same object,
// the kernel might attempt to remove the thread from the object's
// waiting threads list multiple times.
auto it = std::ranges::find(waiting_threads, thread);
if (it != waiting_threads.end()) {
waiting_threads.erase(it);
}
}
KThread* KSynchronizationObject::GetHighestPriorityReadyThread() const {
KThread* candidate = nullptr;
u32 candidate_priority = ThreadPrioLowest + 1;
for (auto* thread : waiting_threads) {
// The list of waiting threads must not contain threads that are not waiting to be awakened.
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynchAny ||
thread->GetStatus() == ThreadStatus::WaitSynchAll ||
thread->GetStatus() == ThreadStatus::WaitHleEvent,
"Inconsistent thread statuses in waiting_threads");
if (thread->GetCurrentPriority() >= candidate_priority || ShouldWait(thread)) {
continue;
}
// A thread is ready to run if it's either in ThreadStatus::WaitSynchAny or
// in ThreadStatus::WaitSynchAll and the rest of the objects it is waiting on are ready.
bool ready_to_run = true;
if (thread->GetStatus() == ThreadStatus::WaitSynchAll) {
ready_to_run =
std::ranges::none_of(thread->m_wait_objects, [thread](const auto* object) {
return object->ShouldWait(thread);
});
}
if (ready_to_run) {
candidate = thread;
candidate_priority = thread->GetCurrentPriority();
}
}
return candidate;
}
void KSynchronizationObject::WakeupAllWaitingThreads() {
while (auto thread = GetHighestPriorityReadyThread()) {
if (!thread->IsSleepingOnWaitAll()) {
Acquire(thread);
} else {
for (auto& object : thread->m_wait_objects) {
object->Acquire(thread);
}
}
// Invoke the wakeup callback before clearing the wait objects
if (thread->m_wakeup_callback) {
thread->m_wakeup_callback->WakeUp(ThreadWakeupReason::Signal, thread, this);
}
for (auto& object : thread->m_wait_objects) {
object->RemoveWaitingThread(thread);
}
thread->m_wait_objects.clear();
thread->ResumeFromWait();
}
if (hle_notifier) {
hle_notifier();
}
}
const std::vector<KThread*>& KSynchronizationObject::GetWaitingThreads() const {
return waiting_threads;
}
void KSynchronizationObject::SetHLENotifier(std::function<void()> callback) {
hle_notifier = std::move(callback);
}
template <class Archive>
void KSynchronizationObject::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& waiting_threads;
// NB: hle_notifier *not* serialized since it's a callback!
// Fortunately it's only used in one place (DSP) so we can reconstruct it there
}
SERIALIZE_IMPL(KSynchronizationObject)
} // namespace Kernel

View File

@ -1,45 +1,48 @@
// Copyright 2014 Citra Emulator Project
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <functional>
#include <memory>
#include <span>
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include <boost/serialization/access.hpp>
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_auto_object.h"
namespace Kernel {
class Thread;
class KThread;
class KSynchronizationObject : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject);
/// Class that represents a Kernel object that a thread can be waiting on
class WaitObject : public Object {
public:
using Object::Object;
explicit KSynchronizationObject(KernelSystem& kernel);
~KSynchronizationObject();
/**
* Check if the specified thread should wait until the object is available
* @param thread The thread about which we're deciding.
* @return True if the current thread should wait due to this object being unavailable
*/
virtual bool ShouldWait(const Thread* thread) const = 0;
virtual bool ShouldWait(const KThread* thread) const = 0;
/// Acquire/lock the object for the specified thread if it is available
virtual void Acquire(Thread* thread) = 0;
virtual void Acquire(KThread* thread) = 0;
/**
* Add a thread to wait on this object
* @param thread Pointer to thread to add
*/
virtual void AddWaitingThread(std::shared_ptr<Thread> thread);
virtual void AddWaitingThread(KThread* thread);
/**
* Removes a thread from waiting on this object (e.g. if it was resumed already)
* @param thread Pointer to thread to remove
*/
virtual void RemoveWaitingThread(Thread* thread);
virtual void RemoveWaitingThread(KThread* thread);
/**
* Wake up all threads waiting on this object that can be awoken, in priority order,
@ -48,17 +51,17 @@ public:
virtual void WakeupAllWaitingThreads();
/// Obtains the highest priority thread that is ready to run from this object's waiting list.
std::shared_ptr<Thread> GetHighestPriorityReadyThread() const;
KThread* GetHighestPriorityReadyThread() const;
/// Get a const reference to the waiting threads list for debug use
const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
const std::vector<KThread*>& GetWaitingThreads() const;
/// Sets a callback which is called when the object becomes available
void SetHLENotifier(std::function<void()> callback);
private:
/// Threads waiting for this object to become available
std::vector<std::shared_ptr<Thread>> waiting_threads;
std::vector<KThread*> waiting_threads;
/// Function to call when this object becomes available
std::function<void()> hle_notifier;
@ -69,15 +72,6 @@ private:
void serialize(Archive& ar, const unsigned int);
};
// Specialization of DynamicObjectCast for WaitObjects
template <>
inline std::shared_ptr<WaitObject> DynamicObjectCast<WaitObject>(std::shared_ptr<Object> object) {
if (object != nullptr && object->IsWaitable()) {
return std::static_pointer_cast<WaitObject>(object);
}
return nullptr;
}
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::WaitObject)
BOOST_CLASS_EXPORT_KEY(Kernel::KSynchronizationObject)

View File

@ -0,0 +1,452 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <boost/serialization/string.hpp>
#include <boost/serialization/unordered_map.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/serialization/weak_ptr.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/serialization/boost_flat_set.h"
#include "core/arm/arm_interface.h"
#include "core/arm/skyeye_common/armstate.h"
#include "core/core_timing.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_mutex.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/result.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::KThread)
SERIALIZE_EXPORT_IMPL(Kernel::WakeupCallback)
namespace Kernel {
template <class Archive>
void ThreadManager::serialize(Archive& ar, const unsigned int) {
ar& current_thread;
ar& ready_queue;
ar& wakeup_callback_table;
ar& thread_list;
}
SERIALIZE_IMPL(ThreadManager)
template <class Archive>
void WakeupCallback::serialize(Archive& ar, const unsigned int) {}
SERIALIZE_IMPL(WakeupCallback)
ThreadManager::ThreadManager(Kernel::KernelSystem& kernel, u32 core_id) : kernel(kernel) {
thread_wakeup_event_type = kernel.timing.RegisterEvent(
"ThreadWakeupCallback_" + std::to_string(core_id),
[this](u64 thread_id, s64 cycle_late) { ThreadWakeupCallback(thread_id, cycle_late); });
}
ThreadManager::~ThreadManager() {
for (auto& t : thread_list) {
t->Stop();
}
}
void ThreadManager::SwitchContext(KThread* new_thread) {
auto& timing = kernel.timing;
KThread* previous_thread = GetCurrentThread();
Process* previous_process = nullptr;
// Save context for previous thread
if (previous_thread) {
previous_process = previous_thread->GetOwner();
previous_thread->m_last_running_ticks = cpu->GetTimer().GetTicks();
cpu->SaveContext(previous_thread->m_context);
if (previous_thread->m_status == ThreadStatus::Running) {
// This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
ready_queue.push_front(previous_thread->m_current_priority, previous_thread);
previous_thread->m_status = ThreadStatus::Ready;
}
}
// Load context of new thread
if (new_thread) {
ASSERT_MSG(new_thread->m_status == ThreadStatus::Ready,
"Thread must be ready to become running.");
// Cancel any outstanding wakeup events for this thread
timing.UnscheduleEvent(thread_wakeup_event_type, new_thread->m_thread_id);
current_thread = new_thread;
ready_queue.remove(new_thread->m_current_priority, new_thread);
new_thread->m_status = ThreadStatus::Running;
ASSERT(current_thread->GetOwner());
if (previous_process != current_thread->GetOwner()) {
kernel.SetCurrentProcessForCPU(current_thread->GetOwner(), cpu->GetID());
}
cpu->LoadContext(new_thread->m_context);
cpu->SetCP15Register(CP15_THREAD_URO, new_thread->GetTLSAddress());
} else {
current_thread = nullptr;
// Note: We do not reset the current process and current page table when idling because
// technically we haven't changed processes, our threads are just paused.
}
}
KThread* ThreadManager::PopNextReadyThread() {
KThread* next = nullptr;
KThread* thread = GetCurrentThread();
if (thread && thread->m_status == ThreadStatus::Running) {
do {
// We have to do better than the current thread.
// This call returns null when that's not possible.
next = ready_queue.pop_first_better(thread->m_current_priority);
if (!next) {
// Otherwise just keep going with the current thread
next = thread;
break;
} else if (!next->m_can_schedule)
unscheduled_ready_queue.push_back(next);
} while (!next->m_can_schedule);
} else {
do {
next = ready_queue.pop_first();
if (next && !next->m_can_schedule)
unscheduled_ready_queue.push_back(next);
} while (next && !next->m_can_schedule);
}
while (!unscheduled_ready_queue.empty()) {
auto t = std::move(unscheduled_ready_queue.back());
ready_queue.push_back(t->m_current_priority, t);
unscheduled_ready_queue.pop_back();
}
return next;
}
void ThreadManager::WaitCurrentThread_Sleep() {
KThread* thread = GetCurrentThread();
thread->m_status = ThreadStatus::WaitSleep;
}
void ThreadManager::ExitCurrentThread() {
current_thread->Stop();
std::erase(thread_list, current_thread);
kernel.PrepareReschedule();
}
void ThreadManager::TerminateProcessThreads(Process* process) {
auto iter = thread_list.begin();
while (iter != thread_list.end()) {
auto& thread = *iter;
if (thread == current_thread || thread->GetOwner() != process) {
iter++;
continue;
}
if (thread->m_status != ThreadStatus::WaitSynchAny &&
thread->m_status != ThreadStatus::WaitSynchAll) {
// TODO: How does the real kernel handle non-waiting threads?
LOG_WARNING(Kernel, "Terminating non-waiting thread {}", thread->m_thread_id);
}
thread->Stop();
iter = thread_list.erase(iter);
}
// Kill the current thread last, if applicable.
if (current_thread != nullptr && current_thread->GetOwner() == process) {
ExitCurrentThread();
}
}
void ThreadManager::ThreadWakeupCallback(u64 thread_id, s64 cycles_late) {
KThread* thread = wakeup_callback_table.at(thread_id);
if (thread == nullptr) {
LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", thread_id);
return;
}
if (thread->m_status == ThreadStatus::WaitSynchAny ||
thread->m_status == ThreadStatus::WaitSynchAll ||
thread->m_status == ThreadStatus::WaitArb ||
thread->m_status == ThreadStatus::WaitHleEvent) {
// Invoke the wakeup callback before clearing the wait objects
if (thread->m_wakeup_callback) {
thread->m_wakeup_callback->WakeUp(ThreadWakeupReason::Timeout, thread, nullptr);
}
// Remove the thread from each of its waiting objects' waitlists
for (KSynchronizationObject* object : thread->m_wait_objects) {
object->RemoveWaitingThread(thread);
}
thread->m_wait_objects.clear();
}
thread->ResumeFromWait();
}
bool ThreadManager::HaveReadyThreads() {
return ready_queue.get_first() != nullptr;
}
void ThreadManager::Reschedule() {
KThread* cur = GetCurrentThread();
KThread* next = PopNextReadyThread();
if (cur && next) {
LOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId());
} else if (cur) {
LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId());
} else if (next) {
LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId());
} else {
LOG_TRACE(Kernel, "context switch idle -> idle, do nothing");
return;
}
SwitchContext(next);
}
KThread::KThread(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KThread::~KThread() = default;
void KThread::Stop() {
// Cancel any outstanding wakeup events for this thread
auto& timing = m_kernel.timing;
timing.UnscheduleEvent(m_manager->thread_wakeup_event_type, m_thread_id);
m_manager->wakeup_callback_table.erase(m_thread_id);
// Clean up thread from ready queue
// This is only needed when the thread is termintated forcefully (SVC TerminateProcess)
if (m_status == ThreadStatus::Ready) {
m_manager->ready_queue.remove(m_current_priority, this);
}
// Wake all threads waiting on this thread.
m_status = ThreadStatus::Dead;
this->WakeupAllWaitingThreads();
// Clean up any dangling references in objects that this thread was waiting for
for (KSynchronizationObject* object : m_wait_objects) {
object->RemoveWaitingThread(this);
}
m_wait_objects.clear();
// Release all the mutexes that this thread holds
ReleaseThreadMutexes(this);
// Mark the TLS slot in the thread's page as free.
const u32 tls_page = (m_tls_address - Memory::TLS_AREA_VADDR) / Memory::CITRA_PAGE_SIZE;
const u32 tls_slot = ((m_tls_address - Memory::TLS_AREA_VADDR) % Memory::CITRA_PAGE_SIZE) /
Memory::TLS_ENTRY_SIZE;
m_owner->tls_slots[tls_page].reset(tls_slot);
}
void KThread::WakeAfterDelay(s64 nanoseconds, bool thread_safe_mode) {
// Don't schedule a wakeup if the thread wants to wait forever
if (nanoseconds == -1) {
return;
}
auto& timing = m_kernel.timing;
const size_t core = thread_safe_mode ? m_core_id : std::numeric_limits<std::size_t>::max();
timing.ScheduleEvent(nsToCycles(nanoseconds), m_manager->thread_wakeup_event_type, m_thread_id,
core, thread_safe_mode);
}
void KThread::ResumeFromWait() {
ASSERT_MSG(m_wait_objects.empty(), "Thread is waking up while waiting for objects");
switch (m_status) {
case ThreadStatus::WaitSynchAll:
case ThreadStatus::WaitSynchAny:
case ThreadStatus::WaitHleEvent:
case ThreadStatus::WaitArb:
case ThreadStatus::WaitSleep:
case ThreadStatus::WaitIPC:
case ThreadStatus::Dormant:
break;
case ThreadStatus::Ready:
// The thread's wakeup callback must have already been cleared when the thread was first
// awoken.
ASSERT(m_wakeup_callback == nullptr);
// If the thread is waiting on multiple wait objects, it might be awoken more than once
// before actually resuming. We can ignore subsequent wakeups if the thread status has
// already been set to ThreadStatus::Ready.
return;
case ThreadStatus::Running:
DEBUG_ASSERT_MSG(false, "Thread with object id {} has already resumed.", GetObjectId());
return;
case ThreadStatus::Dead:
// This should never happen, as threads must complete before being stopped.
DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.",
GetObjectId());
return;
}
// Mark as ready and reschedule.
m_wakeup_callback = nullptr;
m_manager->ready_queue.push_back(m_current_priority, this);
m_status = ThreadStatus::Ready;
m_kernel.PrepareReschedule();
}
/**
* Resets a thread context, making it ready to be scheduled and run by the CPU
* @param context Thread context to reset
* @param stack_top Address of the top of the stack
* @param entry_point Address of entry point for execution
* @param arg User argument for thread
*/
static void ResetThreadContext(Core::ARM_Interface::ThreadContext& context, u32 stack_top,
u32 entry_point, u32 arg) {
context.cpu_registers[0] = arg;
context.SetProgramCounter(entry_point);
context.SetStackPointer(stack_top);
context.cpsr = USER32MODE | ((entry_point & 1) << 5); // Usermode and THUMB mode
context.fpscr = FPSCR_DEFAULT_NAN | FPSCR_FLUSH_TO_ZERO | FPSCR_ROUND_TOZERO | FPSCR_IXC;
}
Result KThread::Initialize(std::string name, VAddr entry_point, u32 priority, u32 arg,
s32 processor_id, VAddr stack_top, Process* owner_process) {
R_UNLESS(priority <= ThreadPrioLowest, ResultOutOfRange);
R_UNLESS(processor_id <= ThreadProcessorIdMax, ResultOutOfRangeKernel);
// Open a reference to our owner process
m_owner = owner_process;
m_owner->Open();
// Set last running ticks.
auto& timing = m_kernel.timing;
m_last_running_ticks = timing.GetTimer(processor_id)->GetTicks();
// Set member variables.
m_thread_id = m_kernel.NewThreadId();
m_status = ThreadStatus::Ready;
m_entry_point = entry_point;
m_stack_top = stack_top;
m_nominal_priority = m_current_priority = priority;
m_processor_id = processor_id;
m_wait_objects.clear();
m_wait_address = 0;
m_name = std::move(name);
// Register thread in the thread manager.
auto& thread_manager = m_kernel.GetThreadManager(processor_id);
m_manager = std::addressof(thread_manager);
m_manager->thread_list.push_back(this);
m_manager->ready_queue.prepare(priority);
m_manager->wakeup_callback_table[m_thread_id] = this;
// Allocate the thread local region.
R_TRY(m_owner->AllocateThreadLocalStorage(std::addressof(m_tls_address)));
// Reset the thread context.
ResetThreadContext(m_context, stack_top, entry_point, arg);
// Mark thread as ready and return
m_manager->ready_queue.push_back(m_current_priority, this);
return ResultSuccess;
}
void KThread::SetPriority(u32 priority) {
ASSERT_MSG(priority <= ThreadPrioLowest && priority >= ThreadPrioHighest,
"Invalid priority value.");
// If thread was ready, adjust queues
if (m_status == ThreadStatus::Ready) {
m_manager->ready_queue.move(this, m_current_priority, priority);
} else {
m_manager->ready_queue.prepare(priority);
}
// Set the priority
m_nominal_priority = m_current_priority = priority;
}
void KThread::UpdatePriority() {
u32 best_priority = m_nominal_priority;
for (KMutex* mutex : m_held_mutexes) {
if (mutex->GetPriority() < best_priority) {
best_priority = mutex->GetPriority();
}
}
this->BoostPriority(best_priority);
}
void KThread::BoostPriority(u32 priority) {
// If thread was ready, adjust queues
if (m_status == ThreadStatus::Ready) {
m_manager->ready_queue.move(this, m_current_priority, priority);
} else {
m_manager->ready_queue.prepare(priority);
}
m_current_priority = priority;
}
void KThread::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Thread, 1);
owner->Close();
}
}
void KThread::SetWaitSynchronizationResult(Result result) {
m_context.cpu_registers[0] = result.raw;
}
void KThread::SetWaitSynchronizationOutput(s32 output) {
m_context.cpu_registers[1] = output;
}
s32 KThread::GetWaitObjectIndex(const KSynchronizationObject* object) const {
ASSERT_MSG(!m_wait_objects.empty(), "Thread is not waiting for anything");
const auto match = std::find(m_wait_objects.rbegin(), m_wait_objects.rend(), object);
return static_cast<s32>(std::distance(match, m_wait_objects.rend()) - 1);
}
VAddr KThread::GetCommandBufferAddress() const {
// Offset from the start of TLS at which the IPC command buffer begins.
constexpr u32 command_header_offset = 0x80;
return GetTLSAddress() + command_header_offset;
}
template <class Archive>
void KThread::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_context;
ar& m_thread_id;
ar& m_status;
ar& m_entry_point;
ar& m_stack_top;
ar& m_nominal_priority;
ar& m_current_priority;
ar& m_last_running_ticks;
ar& m_processor_id;
ar& m_tls_address;
ar& m_held_mutexes;
ar& m_pending_mutexes;
ar& m_owner;
ar& m_wait_objects;
ar& m_wait_address;
ar& m_name;
ar& m_wakeup_callback;
}
SERIALIZE_IMPL(KThread)
} // namespace Kernel

View File

@ -1,11 +1,10 @@
// Copyright 2014 Citra Emulator Project / PPSSPP Project
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <span>
#include <string>
#include <unordered_map>
#include <vector>
@ -14,14 +13,13 @@
#include "common/common_types.h"
#include "common/thread_queue_list.h"
#include "core/arm/arm_interface.h"
#include "core/core_timing.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/wait_object.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class Mutex;
class KMutex;
class Process;
enum ThreadPriority : u32 {
@ -54,18 +52,18 @@ enum class ThreadStatus {
Dead ///< Run to completion, or forcefully terminated
};
enum class ThreadWakeupReason {
enum class ThreadWakeupReason : u32 {
Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal.
Timeout // The thread was woken up due to a wait timeout.
};
class Thread;
class KThread;
class WakeupCallback {
public:
virtual ~WakeupCallback() = default;
virtual void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) = 0;
virtual void WakeUp(ThreadWakeupReason reason, KThread* thread,
KSynchronizationObject* object) = 0;
private:
template <class Archive>
@ -78,21 +76,15 @@ public:
explicit ThreadManager(Kernel::KernelSystem& kernel, u32 core_id);
~ThreadManager();
/**
* Gets the current thread
*/
Thread* GetCurrentThread() const;
KThread* GetCurrentThread() const {
return current_thread;
}
/**
* Reschedules to the next available thread (call after current thread is suspended)
*/
void Reschedule();
/**
* Prints the thread queue for debugging purposes
*/
void DebugThreadQueue();
/**
* Returns whether there are any threads that are ready to run.
*/
@ -111,12 +103,14 @@ public:
/**
* Terminates all threads belonging to a specific process.
*/
void TerminateProcessThreads(std::shared_ptr<Process> process);
void TerminateProcessThreads(Process* process);
/**
* Get a const reference to the thread list for debug use
*/
std::span<const std::shared_ptr<Thread>> GetThreadList();
std::vector<KThread*>& GetThreadList() {
return thread_list;
}
void SetCPU(Core::ARM_Interface& cpu_) {
cpu = &cpu_;
@ -127,13 +121,13 @@ private:
* Switches the CPU's active thread context to that of the specified thread
* @param new_thread The thread to switch to
*/
void SwitchContext(Thread* new_thread);
void SwitchContext(KThread* new_thread);
/**
* Pops and returns the next thread from the thread queue
* @return A pointer to the next ready thread
*/
Thread* PopNextReadyThread();
KThread* PopNextReadyThread();
/**
* Callback that will wake up the thread it was scheduled for
@ -142,21 +136,17 @@ private:
*/
void ThreadWakeupCallback(u64 thread_id, s64 cycles_late);
private:
Kernel::KernelSystem& kernel;
Core::ARM_Interface* cpu;
Core::ARM_Interface* cpu{};
KThread* current_thread{};
Common::ThreadQueueList<KThread*, ThreadPrioLowest + 1> ready_queue;
std::deque<KThread*> unscheduled_ready_queue;
std::unordered_map<u64, KThread*> wakeup_callback_table;
Core::TimingEventType* thread_wakeup_event_type{};
std::vector<KThread*> thread_list;
std::shared_ptr<Thread> current_thread;
Common::ThreadQueueList<Thread*, ThreadPrioLowest + 1> ready_queue;
std::deque<Thread*> unscheduled_ready_queue;
std::unordered_map<u64, Thread*> wakeup_callback_table;
/// Event type for the thread wake up event
Core::TimingEventType* ThreadWakeupEventType = nullptr;
// Lists all threadsthat aren't deleted.
std::vector<std::shared_ptr<Thread>> thread_list;
friend class Thread;
friend class KThread;
friend class KernelSystem;
friend class boost::serialization::access;
@ -164,38 +154,68 @@ private:
void serialize(Archive& ar, const unsigned int);
};
class Thread final : public WaitObject {
class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject);
using ThreadContext = Core::ARM_Interface::ThreadContext;
public:
explicit Thread(KernelSystem&, u32 core_id);
~Thread() override;
explicit KThread(KernelSystem&);
~KThread() override;
std::string GetName() const override {
return name;
}
std::string GetTypeName() const override {
return "Thread";
Result Initialize(std::string name, VAddr entry_point, u32 priority, u32 arg, s32 processor_id,
VAddr stack_top, Process* owner);
static void PostDestroy(uintptr_t arg);
bool ShouldWait(const KThread* thread) const override {
return m_status != ThreadStatus::Dead;
}
static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
void Acquire(KThread* thread) override {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
}
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
u32 GetThreadId() const {
return m_thread_id;
}
u32 GetCurrentPriority() const {
return m_current_priority;
}
VAddr GetWaitAddress() const {
return m_wait_address;
}
VAddr GetTLSAddress() const {
return m_tls_address;
}
ThreadContext& GetContext() {
return m_context;
}
const ThreadContext& GetContext() const {
return m_context;
}
ThreadStatus GetStatus() const {
return m_status;
}
Process* GetOwner() const override {
return m_owner;
}
void SetWakeupCallback(std::shared_ptr<WakeupCallback>&& callback) {
m_wakeup_callback = callback;
}
/**
* Gets the thread's current priority
* @return The current thread's priority
*/
u32 GetPriority() const {
return current_priority;
return m_current_priority;
}
/**
* Sets the thread's current priority
* @param priority The new priority
*/
void SetPriority(u32 priority);
/**
@ -210,14 +230,6 @@ public:
*/
void BoostPriority(u32 priority);
/**
* Gets the thread's thread ID
* @return The thread's ID
*/
u32 GetThreadId() const {
return thread_id;
}
/**
* Resumes a thread from waiting
*/
@ -252,25 +264,13 @@ public:
* object in the list.
* @param object Object to query the index of.
*/
s32 GetWaitObjectIndex(const WaitObject* object) const;
s32 GetWaitObjectIndex(const KSynchronizationObject* object) const;
/**
* Stops a thread, invalidating it from further use
*/
void Stop();
/**
* Returns the Thread Local Storage address of the current thread
* @returns VAddr of the thread's TLS
*/
VAddr GetTLSAddress() const {
return tls_address;
}
/**
* Returns the address of the current thread's command buffer, located in the TLS.
* @returns VAddr of the thread's command buffer.
*/
VAddr GetCommandBufferAddress() const;
/**
@ -279,86 +279,37 @@ public:
* with wait_all = true.
*/
bool IsSleepingOnWaitAll() const {
return status == ThreadStatus::WaitSynchAll;
return m_status == ThreadStatus::WaitSynchAll;
}
Core::ARM_Interface::ThreadContext context{};
u32 thread_id;
bool can_schedule{true};
ThreadStatus status;
VAddr entry_point;
VAddr stack_top;
u32 nominal_priority; ///< Nominal thread priority, as set by the emulated application
u32 current_priority; ///< Current thread priority, can be temporarily changed
u64 last_running_ticks; ///< CPU tick when thread was last running
s32 processor_id;
VAddr tls_address; ///< Virtual address of the Thread Local Storage of the thread
/// Mutexes currently held by this thread, which will be released when it exits.
boost::container::flat_set<std::shared_ptr<Mutex>> held_mutexes{};
/// Mutexes that this thread is currently waiting for.
boost::container::flat_set<std::shared_ptr<Mutex>> pending_mutexes{};
std::weak_ptr<Process> owner_process{}; ///< Process that owns this thread
/// Objects that the thread is waiting on, in the same order as they were
/// passed to WaitSynchronization1/N.
std::vector<std::shared_ptr<WaitObject>> wait_objects{};
VAddr wait_address; ///< If waiting on an AddressArbiter, this is the arbitration address
std::string name{};
/// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
/// was waiting via WaitSynchronizationN then the object will be the last object that became
/// available. In case of a timeout, the object will be nullptr.
std::shared_ptr<WakeupCallback> wakeup_callback{};
const u32 core_id;
private:
ThreadManager& thread_manager;
public:
ThreadManager* m_manager{};
ThreadContext m_context{};
u32 m_thread_id;
u32 m_core_id;
bool m_can_schedule{true};
ThreadStatus m_status;
VAddr m_entry_point;
VAddr m_stack_top;
u32 m_nominal_priority;
u32 m_current_priority;
u64 m_last_running_ticks;
s32 m_processor_id;
VAddr m_tls_address;
boost::container::flat_set<KMutex*> m_held_mutexes;
boost::container::flat_set<KMutex*> m_pending_mutexes;
Process* m_owner{};
std::vector<KSynchronizationObject*> m_wait_objects;
VAddr m_wait_address;
std::string m_name{};
std::shared_ptr<WakeupCallback> m_wakeup_callback{};
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
void serialize(Archive& ar, const u32 file_version);
};
/**
* Sets up the primary application thread
* @param kernel The kernel instance on which the thread is created
* @param entry_point The address at which the thread should start execution
* @param priority The priority to give the main thread
* @param owner_process The parent process for the main thread
* @return A shared pointer to the main thread
*/
std::shared_ptr<Thread> SetupMainThread(KernelSystem& kernel, u32 entry_point, u32 priority,
std::shared_ptr<Process> owner_process);
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::Thread)
BOOST_CLASS_EXPORT_KEY(Kernel::WakeupCallback)
namespace boost::serialization {
template <class Archive>
void save_construct_data(Archive& ar, const Kernel::Thread* t, const unsigned int) {
ar << t->core_id;
}
template <class Archive>
void load_construct_data(Archive& ar, Kernel::Thread* t, const unsigned int) {
u32 core_id;
ar >> core_id;
::new (t) Kernel::Thread(Core::Global<Kernel::KernelSystem>(), core_id);
}
} // namespace boost::serialization
BOOST_CLASS_EXPORT_KEY(Kernel::KThread)
CONSTRUCT_KERNEL_OBJECT(Kernel::KThread)

View File

@ -0,0 +1,122 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/archives.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_timer.h"
SERIALIZE_EXPORT_IMPL(Kernel::KTimer)
namespace Kernel {
KTimer::KTimer(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer(kernel), m_timer_manager(kernel.GetTimerManager()) {}
KTimer::~KTimer() = default;
void KTimer::Initialize(Process* owner, ResetType reset_type) {
// Open a reference to the owner process.
owner->Open();
// Set member variables.
m_owner = owner;
m_reset_type = reset_type;
// Register to TimerManager
m_callback_id = m_timer_manager.GetNextCallbackId();
m_timer_manager.Register(m_callback_id, this);
}
void KTimer::Finalize() {
this->Cancel();
m_timer_manager.Unregister(m_callback_id);
}
void KTimer::PostDestroy(uintptr_t arg) {
// Release the session count resource the owner process holds.
Process* owner = reinterpret_cast<Process*>(arg);
owner->ReleaseResource(ResourceLimitType::Timer, 1);
owner->Close();
}
bool KTimer::ShouldWait(const KThread* thread) const {
return !m_signaled;
}
void KTimer::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
if (m_reset_type == ResetType::OneShot) {
m_signaled = false;
}
}
void KTimer::Set(s64 initial, s64 interval) {
// Ensure we get rid of any previous scheduled event
this->Cancel();
// Set member variables
m_initial_delay = initial;
m_interval_delay = interval;
if (initial == 0) {
// Immediately invoke the callback
this->Signal(0);
} else {
auto& timing = m_kernel.timing;
timing.ScheduleEvent(nsToCycles(initial), m_timer_manager.GetEventType(), m_callback_id);
}
}
void KTimer::Cancel() {
auto& timing = m_kernel.timing;
timing.UnscheduleEvent(m_timer_manager.GetEventType(), m_callback_id);
}
void KTimer::Clear() {
m_signaled = false;
}
void KTimer::WakeupAllWaitingThreads() {
KSynchronizationObject::WakeupAllWaitingThreads();
if (m_reset_type == ResetType::Pulse) {
m_signaled = false;
}
}
void KTimer::Signal(s64 cycles_late) {
LOG_TRACE(Kernel, "Timer {} fired", GetObjectId());
m_signaled = true;
// Resume all waiting threads
this->WakeupAllWaitingThreads();
// Reschedule the timer with the interval delay
if (m_interval_delay != 0) {
auto& timing = m_kernel.timing;
const s64 cycles_into_future = nsToCycles(m_interval_delay) - cycles_late;
timing.ScheduleEvent(cycles_into_future, m_timer_manager.GetEventType(), m_callback_id);
}
}
void TimerManager::TimerCallback(u64 callback_id, s64 cycles_late) {
KTimer* timer = m_timer_callback_table.at(callback_id);
ASSERT_MSG(timer, "Callback fired for invalid timer {:016x}", callback_id);
timer->Signal(cycles_late);
}
TimerManager::TimerManager(Core::Timing& timing) : m_timing(timing) {
m_timer_callback_event_type =
timing.RegisterEvent("TimerCallback", [this](u64 thread_id, s64 cycle_late) {
this->TimerCallback(thread_id, cycle_late);
});
}
TimerManager::~TimerManager() = default;
} // namespace Kernel

View File

@ -0,0 +1,133 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/string.hpp>
#include <boost/serialization/unordered_map.hpp>
#include "common/common_types.h"
#include "core/core_timing.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Core {
class Timing;
}
namespace Kernel {
class KTimer;
class TimerManager {
public:
explicit TimerManager(Core::Timing& timing);
~TimerManager();
u64 GetNextCallbackId() {
return +m_next_timer_callback_id;
}
Core::TimingEventType* GetEventType() {
return m_timer_callback_event_type;
}
void Register(u64 callback_id, KTimer* timer) {
m_timer_callback_table[callback_id] = timer;
}
void Unregister(u64 callback_id) {
m_timer_callback_table.erase(callback_id);
}
private:
void TimerCallback(u64 callback_id, s64 cycles_late);
private:
[[maybe_unused]] Core::Timing& m_timing;
Core::TimingEventType* m_timer_callback_event_type{};
u64 m_next_timer_callback_id{};
std::unordered_map<u64, KTimer*> m_timer_callback_table;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& m_next_timer_callback_id;
ar& m_timer_callback_table;
}
};
class ResourceLimit;
enum class ResetType : u32;
class KTimer final : public KAutoObjectWithSlabHeapAndContainer<KTimer, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KTimer, KSynchronizationObject);
public:
explicit KTimer(KernelSystem& kernel);
~KTimer() override;
void Initialize(Process* owner, ResetType reset_type);
void Finalize() override;
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
ResetType GetResetType() const {
return m_reset_type;
}
u64 GetInitialDelay() const {
return m_initial_delay;
}
u64 GetIntervalDelay() const {
return m_interval_delay;
}
void Set(s64 initial, s64 interval);
void Signal(s64 cycles_late);
void Cancel();
void Clear();
void WakeupAllWaitingThreads() override;
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
private:
TimerManager& m_timer_manager;
Process* m_owner{};
ResetType m_reset_type{};
u64 m_initial_delay{};
u64 m_interval_delay{};
bool m_signaled{};
u64 m_callback_id{};
friend class KernelSystem;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_owner;
ar& m_reset_type;
ar& m_initial_delay;
ar& m_interval_delay;
ar& m_signaled;
ar& m_callback_id;
}
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KTimer)
CONSTRUCT_KERNEL_OBJECT(Kernel::KTimer)

View File

@ -6,18 +6,27 @@
#include <boost/serialization/unordered_map.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/serialization/atomic.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/config_mem.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_linked_list.h"
#include "core/hle/kernel/k_mutex.h"
#include "core/hle/kernel/k_object_name.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_semaphore.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_timer.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/shared_page.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/timer.h"
SERIALIZE_EXPORT_IMPL(Kernel::New3dsHwCapabilities)
@ -31,6 +40,9 @@ KernelSystem::KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing,
: memory(memory), timing(timing),
prepare_reschedule_callback(std::move(prepare_reschedule_callback)), memory_mode(memory_mode),
n3ds_hw_caps(n3ds_hw_caps) {
global_object_list_container = std::make_unique<KAutoObjectWithListContainer>(*this);
object_name_global_data = std::make_unique<KObjectNameGlobalData>(*this);
slab_heap_container = std::make_unique<SlabHeapContainer>();
std::generate(memory_regions.begin(), memory_regions.end(),
[] { return std::make_shared<MemoryRegionInfo>(); });
MemoryInit(memory_mode, n3ds_hw_caps.memory_mode, override_init_time);
@ -63,16 +75,16 @@ u32 KernelSystem::GenerateObjectID() {
return next_object_id++;
}
std::shared_ptr<Process> KernelSystem::GetCurrentProcess() const {
Process* KernelSystem::GetCurrentProcess() const {
return current_process;
}
void KernelSystem::SetCurrentProcess(std::shared_ptr<Process> process) {
void KernelSystem::SetCurrentProcess(Process* process) {
current_process = process;
SetCurrentMemoryPageTable(process->vm_manager.page_table);
}
void KernelSystem::SetCurrentProcessForCPU(std::shared_ptr<Process> process, u32 core_id) {
void KernelSystem::SetCurrentProcessForCPU(Process* process, u32 core_id) {
if (current_cpu->GetID() == core_id) {
current_process = process;
SetCurrentMemoryPageTable(process->vm_manager.page_table);
@ -151,14 +163,14 @@ const IPCDebugger::Recorder& KernelSystem::GetIPCRecorder() const {
return *ipc_recorder;
}
void KernelSystem::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) {
named_ports.emplace(std::move(name), std::move(port));
}
u32 KernelSystem::NewThreadId() {
return next_thread_id++;
}
u32 KernelSystem::NewProcessId() {
return ++next_process_id;
}
void KernelSystem::ResetThreadIDs() {
next_thread_id = 0;
}
@ -166,11 +178,10 @@ void KernelSystem::ResetThreadIDs() {
template <class Archive>
void KernelSystem::serialize(Archive& ar, const unsigned int) {
ar& memory_regions;
ar& named_ports;
// current_cpu set externally
// NB: subsystem references and prepare_reschedule_callback are constant
ar&* resource_limits.get();
ar& next_object_id;
// ar& next_object_id;
ar&* timer_manager.get();
ar& next_process_id;
ar& process_list;
@ -197,6 +208,136 @@ void KernelSystem::serialize(Archive& ar, const unsigned int) {
}
}
}
void KernelSystem::RegisterKernelObject(KAutoObject* object) {
registered_objects.insert(object);
}
void KernelSystem::UnregisterKernelObject(KAutoObject* object) {
registered_objects.erase(object);
}
// Constexpr counts.
constexpr size_t SlabHeapTotalSize = 0x450000;
constexpr size_t SlabCountKProcess = 47;
constexpr size_t SlabCountKThread = 300;
constexpr size_t SlabCountKEvent = 315;
constexpr size_t SlabCountKMutex = 85;
constexpr size_t SlabCountKSemaphore = 83;
constexpr size_t SlabCountKTimer = 60;
constexpr size_t SlabCountKPort = 153;
constexpr size_t SlabCountKSharedMemory = 63;
constexpr size_t SlabCountKSession = 345;
constexpr size_t SlabCountKAddressArbiter = 51;
constexpr size_t SlabCountKObjectName = 7;
constexpr size_t SlabCountKResourceLimit = 5;
// constexpr size_t SlabCountKDebug = 3;
constexpr size_t SlabCountKLinkedListNode = 4273;
// constexpr size_t SlabCountKBlockInfo = 601;
// constexpr size_t SlabCountKMemoryBlock = 1723;
struct KernelSystem::SlabHeapContainer {
SlabHeapContainer() {
// TODO: Allocate slab heap on FCRAM
storage.resize(SlabHeapTotalSize);
u8* memory = storage.data();
event.Initialize(memory, SlabCountKEvent * sizeof(KEvent));
memory += SlabCountKEvent * sizeof(KEvent);
mutex.Initialize(memory, SlabCountKMutex * sizeof(KMutex));
memory += SlabCountKMutex * sizeof(KMutex);
semaphore.Initialize(memory, SlabCountKSemaphore * sizeof(KSemaphore));
memory += SlabCountKSemaphore * sizeof(KSemaphore);
timer.Initialize(memory, SlabCountKTimer * sizeof(KTimer));
memory += SlabCountKTimer * sizeof(KTimer);
process.Initialize(memory, SlabCountKProcess * sizeof(Process));
memory += SlabCountKProcess * sizeof(Process);
thread.Initialize(memory, SlabCountKThread * sizeof(KThread));
memory += SlabCountKThread * sizeof(KThread);
port.Initialize(memory, SlabCountKPort * sizeof(KPort));
memory += SlabCountKPort * sizeof(KPort);
shared_memory.Initialize(memory, SlabCountKSharedMemory * sizeof(KSharedMemory));
memory += SlabCountKSharedMemory * sizeof(KSharedMemory);
session.Initialize(memory, SlabCountKSession * sizeof(KSession));
memory += SlabCountKSession * sizeof(KSession);
resource_limit.Initialize(memory, SlabCountKResourceLimit * sizeof(KResourceLimit));
memory += SlabCountKResourceLimit * sizeof(KResourceLimit);
address_arbiter.Initialize(memory, SlabCountKAddressArbiter * sizeof(KAddressArbiter));
memory += SlabCountKAddressArbiter * sizeof(KAddressArbiter);
linked_list_node.Initialize(memory, SlabCountKLinkedListNode * sizeof(KLinkedListNode));
memory += SlabCountKLinkedListNode * sizeof(KLinkedListNode);
object_name.Initialize(memory, SlabCountKObjectName * sizeof(KObjectName));
}
std::vector<u8> storage;
KSlabHeap<KEvent> event;
KSlabHeap<KMutex> mutex;
KSlabHeap<KSemaphore> semaphore;
KSlabHeap<KTimer> timer;
KSlabHeap<Process> process;
KSlabHeap<KThread> thread;
KSlabHeap<KPort> port;
KSlabHeap<KSharedMemory> shared_memory;
KSlabHeap<KSession> session;
KSlabHeap<KResourceLimit> resource_limit;
KSlabHeap<KAddressArbiter> address_arbiter;
KSlabHeap<KLinkedListNode> linked_list_node;
KSlabHeap<KObjectName> object_name;
};
template <typename T>
KSlabHeap<T>& KernelSystem::SlabHeap() {
if constexpr (std::is_same_v<T, KEvent>) {
return slab_heap_container->event;
} else if constexpr (std::is_same_v<T, KPort>) {
return slab_heap_container->port;
} else if constexpr (std::is_same_v<T, Process>) {
return slab_heap_container->process;
} else if constexpr (std::is_same_v<T, KResourceLimit>) {
return slab_heap_container->resource_limit;
} else if constexpr (std::is_same_v<T, KSession>) {
return slab_heap_container->session;
} else if constexpr (std::is_same_v<T, KSharedMemory>) {
return slab_heap_container->shared_memory;
} else if constexpr (std::is_same_v<T, KThread>) {
return slab_heap_container->thread;
} else if constexpr (std::is_same_v<T, KAddressArbiter>) {
return slab_heap_container->address_arbiter;
} else if constexpr (std::is_same_v<T, KSemaphore>) {
return slab_heap_container->semaphore;
} else if constexpr (std::is_same_v<T, KMutex>) {
return slab_heap_container->mutex;
} else if constexpr (std::is_same_v<T, KObjectName>) {
return slab_heap_container->object_name;
} else if constexpr (std::is_same_v<T, KLinkedListNode>) {
return slab_heap_container->linked_list_node;
} else if constexpr (std::is_same_v<T, KTimer>) {
return slab_heap_container->timer;
}
UNREACHABLE();
}
KAutoObjectWithListContainer& KernelSystem::ObjectListContainer() {
return *global_object_list_container;
}
KObjectNameGlobalData& KernelSystem::ObjectNameGlobalData() {
return *object_name_global_data;
}
template KSlabHeap<KEvent>& KernelSystem::SlabHeap();
template KSlabHeap<KPort>& KernelSystem::SlabHeap();
template KSlabHeap<Process>& KernelSystem::SlabHeap();
template KSlabHeap<KResourceLimit>& KernelSystem::SlabHeap();
template KSlabHeap<KSession>& KernelSystem::SlabHeap();
template KSlabHeap<KSharedMemory>& KernelSystem::SlabHeap();
template KSlabHeap<KThread>& KernelSystem::SlabHeap();
template KSlabHeap<KObjectName>& KernelSystem::SlabHeap();
template KSlabHeap<KAddressArbiter>& KernelSystem::SlabHeap();
template KSlabHeap<KSemaphore>& KernelSystem::SlabHeap();
template KSlabHeap<KMutex>& KernelSystem::SlabHeap();
template KSlabHeap<KLinkedListNode>& KernelSystem::SlabHeap();
template KSlabHeap<KTimer>& KernelSystem::SlabHeap();
SERIALIZE_IMPL(KernelSystem)
template <class Archive>

View File

@ -9,14 +9,11 @@
#include <functional>
#include <memory>
#include <mutex>
#include <span>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <boost/serialization/export.hpp>
#include "common/bit_field.h"
#include "common/common_types.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/result.h"
#include "core/memory.h"
namespace ConfigMem {
@ -42,30 +39,18 @@ class Recorder;
namespace Kernel {
class AddressArbiter;
class Event;
class Mutex;
class CodeSet;
class Process;
class Thread;
class Semaphore;
class Timer;
class ClientPort;
class ServerPort;
class ClientSession;
class ServerSession;
class KThread;
class ResourceLimitList;
class SharedMemory;
class ThreadManager;
class TimerManager;
class VMManager;
struct AddressMapping;
enum class ResetType {
OneShot,
Sticky,
Pulse,
};
class KAutoObject;
class KObjectName;
class KObjectNameGlobalData;
/// Permissions for mapped shared memory blocks
enum class MemoryPermission : u32 {
@ -79,6 +64,7 @@ enum class MemoryPermission : u32 {
ReadWriteExecute = (Read | Write | Execute),
DontCare = (1u << 28)
};
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission)
enum class MemoryRegion : u16 {
APPLICATION = 1,
@ -129,6 +115,10 @@ private:
friend class boost::serialization::access;
};
template <typename T>
class KSlabHeap;
class KAutoObjectWithListContainer;
class KernelSystem {
public:
explicit KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing,
@ -137,142 +127,44 @@ public:
u64 override_init_time = 0);
~KernelSystem();
using PortPair = std::pair<std::shared_ptr<ServerPort>, std::shared_ptr<ClientPort>>;
using SessionPair = std::pair<std::shared_ptr<ServerSession>, std::shared_ptr<ClientSession>>;
/**
* Creates an address arbiter.
*
* @param name Optional name used for debugging.
* @returns The created AddressArbiter.
*/
std::shared_ptr<AddressArbiter> CreateAddressArbiter(std::string name = "Unknown");
/**
* Creates an event
* @param reset_type ResetType describing how to create event
* @param name Optional name of event
*/
std::shared_ptr<Event> CreateEvent(ResetType reset_type, std::string name = "Unknown");
/**
* Creates a mutex.
* @param initial_locked Specifies if the mutex should be locked initially
* @param name Optional name of mutex
* @return Pointer to new Mutex object
*/
std::shared_ptr<Mutex> CreateMutex(bool initial_locked, std::string name = "Unknown");
std::shared_ptr<CodeSet> CreateCodeSet(std::string name, u64 program_id);
std::shared_ptr<Process> CreateProcess(std::shared_ptr<CodeSet> code_set);
/**
* Terminates a process, killing its threads and removing it from the process list.
* @param process Process to terminate.
*/
void TerminateProcess(std::shared_ptr<Process> process);
/**
* Creates and returns a new thread. The new thread is immediately scheduled
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread
* @param make_ready If the thread should be put in the ready queue
* @return A shared pointer to the newly created thread
*/
ResultVal<std::shared_ptr<Thread>> CreateThread(std::string name, VAddr entry_point,
u32 priority, u32 arg, s32 processor_id,
VAddr stack_top,
std::shared_ptr<Process> owner_process,
bool make_ready = true);
/**
* Creates a semaphore.
* @param initial_count Number of slots reserved for other threads
* @param max_count Maximum number of slots the semaphore can have
* @param name Optional name of semaphore
* @return The created semaphore
*/
ResultVal<std::shared_ptr<Semaphore>> CreateSemaphore(s32 initial_count, s32 max_count,
std::string name = "Unknown");
/**
* Creates a timer
* @param reset_type ResetType describing how to create the timer
* @param name Optional name of timer
* @return The created Timer
*/
std::shared_ptr<Timer> CreateTimer(ResetType reset_type, std::string name = "Unknown");
/**
* Creates a pair of ServerPort and an associated ClientPort.
*
* @param max_sessions Maximum number of sessions to the port
* @param name Optional name of the ports
* @return The created port tuple
*/
PortPair CreatePortPair(u32 max_sessions, std::string name = "UnknownPort");
/**
* Creates a pair of ServerSession and an associated ClientSession.
* @param name Optional name of the ports.
* @param client_port Optional The ClientPort that spawned this session.
* @return The created session tuple
*/
SessionPair CreateSessionPair(const std::string& name = "Unknown",
std::shared_ptr<ClientPort> client_port = nullptr);
void TerminateProcess(Process* process);
ResourceLimitList& ResourceLimit();
const ResourceLimitList& ResourceLimit() const;
/**
* Creates a shared memory object.
* @param owner_process Process that created this shared memory object.
* @param size Size of the memory block. Must be page-aligned.
* @param permissions Permission restrictions applied to the process which created the block.
* @param other_permissions Permission restrictions applied to other processes mapping the
* block.
* @param address The address from which to map the Shared Memory.
* @param region If the address is 0, the shared memory will be allocated in this region of the
* linear heap.
* @param name Optional object name, used for debugging purposes.
*/
ResultVal<std::shared_ptr<SharedMemory>> CreateSharedMemory(
std::shared_ptr<Process> owner_process, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address = 0,
MemoryRegion region = MemoryRegion::BASE, std::string name = "Unknown");
/**
* Creates a shared memory object from a block of memory managed by an HLE applet.
* @param offset The offset into the heap block that the SharedMemory will map.
* @param size Size of the memory block. Must be page-aligned.
* @param permissions Permission restrictions applied to the process which created the block.
* @param other_permissions Permission restrictions applied to other processes mapping the
* block.
* @param name Optional object name, used for debugging purposes.
*/
std::shared_ptr<SharedMemory> CreateSharedMemoryForApplet(u32 offset, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions,
std::string name = "Unknown Applet");
u32 GenerateObjectID();
/// Retrieves a process from the current list of processes.
std::shared_ptr<Process> GetProcessById(u32 process_id) const;
/// Gets the slab heap for the specified kernel object type.
template <typename T>
KSlabHeap<T>& SlabHeap();
std::span<const std::shared_ptr<Process>> GetProcessList() const {
KAutoObjectWithListContainer& ObjectListContainer();
/// Gets global data for KObjectName.
KObjectNameGlobalData& ObjectNameGlobalData();
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.
void RegisterKernelObject(KAutoObject* object);
/// Unregisters a kernel object previously registered with RegisterKernelObject when it was
/// destroyed during the current emulation session.
void UnregisterKernelObject(KAutoObject* object);
/// Retrieves a process from the current list of processes.
Process* GetProcessById(u32 process_id) const;
const std::vector<Process*>& GetProcessList() const {
return process_list;
}
std::shared_ptr<Process> GetCurrentProcess() const;
void SetCurrentProcess(std::shared_ptr<Process> process);
void SetCurrentProcessForCPU(std::shared_ptr<Process> process, u32 core_id);
Process* GetCurrentProcess() const;
void SetCurrentProcess(Process* process);
void SetCurrentProcessForCPU(Process* process, u32 core_id);
void SetCurrentMemoryPageTable(std::shared_ptr<Memory::PageTable> page_table);
@ -305,14 +197,12 @@ public:
std::array<std::shared_ptr<MemoryRegionInfo>, 3> memory_regions{};
/// Adds a port to the named port table
void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port);
void PrepareReschedule() {
prepare_reschedule_callback();
}
u32 NewThreadId();
u32 NewProcessId();
void ResetThreadIDs();
@ -328,15 +218,15 @@ public:
return hle_lock;
}
/// Map of named ports managed by the kernel, which can be retrieved using the ConnectToPort
std::unordered_map<std::string, std::shared_ptr<ClientPort>> named_ports;
Core::ARM_Interface* current_cpu = nullptr;
Memory::MemorySystem& memory;
Core::Timing& timing;
// Lists all processes that exist in the current session.
std::vector<Process*> process_list;
/// Sleep main thread of the first ever launched non-sysmodule process.
void SetAppMainThreadExtendedSleep(bool requires_sleep) {
main_thread_extended_sleep = requires_sleep;
@ -367,11 +257,8 @@ private:
// reserved for low-level services
u32 next_process_id = 10;
// Lists all processes that exist in the current session.
std::vector<std::shared_ptr<Process>> process_list;
std::shared_ptr<Process> current_process;
std::vector<std::shared_ptr<Process>> stored_processes;
Process* current_process{};
std::vector<Process*> stored_processes;
std::vector<std::unique_ptr<ThreadManager>> thread_managers;
@ -385,6 +272,16 @@ private:
MemoryMode memory_mode;
New3dsHwCapabilities n3ds_hw_caps;
/// Helper to encapsulate all slab heaps in a single heap allocated container
struct SlabHeapContainer;
std::unique_ptr<SlabHeapContainer> slab_heap_container;
std::unique_ptr<KObjectNameGlobalData> object_name_global_data;
std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container;
std::unordered_set<KAutoObject*> registered_objects;
/*
* Synchronizes access to the internal HLE kernel structures, it is acquired when a guest
* application thread performs a syscall. It should be acquired by any host threads that read or

View File

@ -14,8 +14,8 @@
#include "common/settings.h"
#include "core/core.h"
#include "core/hle/kernel/config_mem.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/shared_page.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/hle/result.h"

View File

@ -1,144 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::Mutex)
namespace Kernel {
void ReleaseThreadMutexes(Thread* thread) {
for (auto& mtx : thread->held_mutexes) {
mtx->lock_count = 0;
mtx->holding_thread = nullptr;
mtx->WakeupAllWaitingThreads();
}
thread->held_mutexes.clear();
}
Mutex::Mutex(KernelSystem& kernel) : WaitObject(kernel), kernel(kernel) {}
Mutex::~Mutex() {
if (resource_limit) {
resource_limit->Release(ResourceLimitType::Mutex, 1);
}
}
std::shared_ptr<Mutex> KernelSystem::CreateMutex(bool initial_locked, std::string name) {
auto mutex = std::make_shared<Mutex>(*this);
mutex->lock_count = 0;
mutex->name = std::move(name);
mutex->holding_thread = nullptr;
// Acquire mutex with current thread if initialized as locked
if (initial_locked) {
mutex->Acquire(GetCurrentThreadManager().GetCurrentThread());
}
return mutex;
}
bool Mutex::ShouldWait(const Thread* thread) const {
return lock_count > 0 && thread != holding_thread.get();
}
void Mutex::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
// Actually "acquire" the mutex only if we don't already have it
if (lock_count == 0) {
priority = thread->current_priority;
thread->held_mutexes.insert(SharedFrom(this));
holding_thread = SharedFrom(thread);
thread->UpdatePriority();
kernel.PrepareReschedule();
}
lock_count++;
}
Result Mutex::Release(Thread* thread) {
// We can only release the mutex if it's held by the calling thread.
if (thread != holding_thread.get()) {
if (holding_thread) {
LOG_ERROR(
Kernel,
"Tried to release a mutex (owned by thread id {}) from a different thread id {}",
holding_thread->thread_id, thread->thread_id);
}
return Result(ErrCodes::WrongLockingThread, ErrorModule::Kernel,
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
}
// Note: It should not be possible for the situation where the mutex has a holding thread with a
// zero lock count to occur. The real kernel still checks for this, so we do too.
if (lock_count <= 0)
return Result(ErrorDescription::InvalidResultValue, ErrorModule::Kernel,
ErrorSummary::InvalidState, ErrorLevel::Permanent);
lock_count--;
// Yield to the next thread only if we've fully released the mutex
if (lock_count == 0) {
holding_thread->held_mutexes.erase(SharedFrom(this));
holding_thread->UpdatePriority();
holding_thread = nullptr;
WakeupAllWaitingThreads();
kernel.PrepareReschedule();
}
return ResultSuccess;
}
void Mutex::AddWaitingThread(std::shared_ptr<Thread> thread) {
WaitObject::AddWaitingThread(thread);
thread->pending_mutexes.insert(SharedFrom(this));
UpdatePriority();
}
void Mutex::RemoveWaitingThread(Thread* thread) {
WaitObject::RemoveWaitingThread(thread);
thread->pending_mutexes.erase(SharedFrom(this));
UpdatePriority();
}
void Mutex::UpdatePriority() {
if (!holding_thread)
return;
u32 best_priority = ThreadPrioLowest;
for (auto& waiter : GetWaitingThreads()) {
if (waiter->current_priority < best_priority)
best_priority = waiter->current_priority;
}
if (best_priority != priority) {
priority = best_priority;
holding_thread->UpdatePriority();
}
}
template <class Archive>
void Mutex::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<WaitObject>(*this);
ar& lock_count;
ar& priority;
ar& name;
ar& holding_thread;
ar& resource_limit;
}
SERIALIZE_IMPL(Mutex)
} // namespace Kernel

View File

@ -1,79 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/wait_object.h"
#include "core/hle/result.h"
namespace Kernel {
class Thread;
class Mutex final : public WaitObject {
public:
explicit Mutex(KernelSystem& kernel);
~Mutex() override;
std::string GetTypeName() const override {
return "Mutex";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::Mutex;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ResourceLimit> resource_limit;
int lock_count; ///< Number of times the mutex has been acquired
u32 priority; ///< The priority of the mutex, used for priority inheritance.
std::string name; ///< Name of mutex (optional)
std::shared_ptr<Thread> holding_thread; ///< Thread that has acquired the mutex
/**
* Elevate the mutex priority to the best priority
* among the priorities of all its waiting threads.
*/
void UpdatePriority();
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
void AddWaitingThread(std::shared_ptr<Thread> thread) override;
void RemoveWaitingThread(Thread* thread) override;
/**
* Attempts to release the mutex from the specified thread.
* @param thread Thread that wants to release the mutex.
* @returns The result code of the operation.
*/
Result Release(Thread* thread);
private:
KernelSystem& kernel;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
/**
* Releases all the mutexes held by the specified thread
* @param thread Thread that is holding the mutexes
*/
void ReleaseThreadMutexes(Thread* thread);
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::Mutex)
CONSTRUCT_KERNEL_OBJECT(Kernel::Mutex)

View File

@ -1,50 +0,0 @@
// Copyright 2018 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/assert.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "common/archives.h"
SERIALIZE_EXPORT_IMPL(Kernel::Object)
namespace Kernel {
Object::Object(KernelSystem& kernel) : object_id{kernel.GenerateObjectID()} {}
Object::~Object() = default;
bool Object::IsWaitable() const {
switch (GetHandleType()) {
case HandleType::Event:
case HandleType::Mutex:
case HandleType::Thread:
case HandleType::Semaphore:
case HandleType::Timer:
case HandleType::ServerPort:
case HandleType::ServerSession:
return true;
case HandleType::Unknown:
case HandleType::SharedMemory:
case HandleType::Process:
case HandleType::AddressArbiter:
case HandleType::ResourceLimit:
case HandleType::CodeSet:
case HandleType::ClientPort:
case HandleType::ClientSession:
return false;
}
UNREACHABLE();
}
template <class Archive>
void Object::serialize(Archive& ar, const unsigned int) {
ar& object_id;
}
SERIALIZE_IMPL(Object)
} // namespace Kernel

View File

@ -1,106 +0,0 @@
// Copyright 2018 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "common/serialization/atomic.h"
#include "core/global.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
class KernelSystem;
using Handle = u32;
enum class HandleType : u32 {
Unknown,
Event,
Mutex,
SharedMemory,
Thread,
Process,
AddressArbiter,
Semaphore,
Timer,
ResourceLimit,
CodeSet,
ClientPort,
ServerPort,
ClientSession,
ServerSession,
};
enum {
DEFAULT_STACK_SIZE = 0x4000,
};
class Object : NonCopyable, public std::enable_shared_from_this<Object> {
public:
explicit Object(KernelSystem& kernel);
virtual ~Object();
/// Returns a unique identifier for the object. For debugging purposes only.
u32 GetObjectId() const {
return object_id.load(std::memory_order_relaxed);
}
virtual std::string GetTypeName() const {
return "[BAD KERNEL OBJECT TYPE]";
}
virtual std::string GetName() const {
return "[UNKNOWN KERNEL OBJECT]";
}
virtual HandleType GetHandleType() const = 0;
/**
* Check if a thread can wait on the object
* @return True if a thread can wait on the object, otherwise false
*/
bool IsWaitable() const;
private:
std::atomic<u32> object_id;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
template <typename T>
std::shared_ptr<T> SharedFrom(T* raw) {
if (raw == nullptr)
return nullptr;
return std::static_pointer_cast<T>(raw->shared_from_this());
}
/**
* Attempts to downcast the given Object pointer to a pointer to T.
* @return Derived pointer to the object, or `nullptr` if `object` isn't of type T.
*/
template <typename T>
inline std::shared_ptr<T> DynamicObjectCast(std::shared_ptr<Object> object) {
if (object != nullptr && object->GetHandleType() == T::HANDLE_TYPE) {
return std::static_pointer_cast<T>(object);
}
return nullptr;
}
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::Object)
#define CONSTRUCT_KERNEL_OBJECT(T) \
namespace boost::serialization { \
template <class Archive> \
void load_construct_data(Archive& ar, T* t, const unsigned int file_version) { \
::new (t) T(Core::Global<Kernel::KernelSystem>()); \
} \
}

View File

@ -1,73 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/semaphore.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::Semaphore)
namespace Kernel {
Semaphore::Semaphore(KernelSystem& kernel) : WaitObject(kernel) {}
Semaphore::~Semaphore() {
if (resource_limit) {
resource_limit->Release(ResourceLimitType::Semaphore, 1);
}
}
ResultVal<std::shared_ptr<Semaphore>> KernelSystem::CreateSemaphore(s32 initial_count,
s32 max_count,
std::string name) {
R_UNLESS(initial_count <= max_count, ResultInvalidCombinationKernel);
// When the semaphore is created, some slots are reserved for other threads,
// and the rest is reserved for the caller thread
auto semaphore = std::make_shared<Semaphore>(*this);
semaphore->max_count = max_count;
semaphore->available_count = initial_count;
semaphore->name = std::move(name);
return semaphore;
}
bool Semaphore::ShouldWait(const Thread* thread) const {
return available_count <= 0;
}
void Semaphore::Acquire(Thread* thread) {
if (available_count <= 0) {
return;
}
--available_count;
}
Result Semaphore::Release(s32* out_count, s32 release_count) {
R_UNLESS(max_count >= release_count + available_count, ResultOutOfRangeKernel);
*out_count = available_count;
available_count += release_count;
WakeupAllWaitingThreads();
return ResultSuccess;
}
template <class Archive>
void Semaphore::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<WaitObject>(*this);
ar& max_count;
ar& available_count;
ar& name;
ar& resource_limit;
}
SERIALIZE_IMPL(Semaphore)
} // namespace Kernel

View File

@ -1,59 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/wait_object.h"
#include "core/hle/result.h"
namespace Kernel {
class ResourceLimit;
class Semaphore final : public WaitObject {
public:
explicit Semaphore(KernelSystem& kernel);
~Semaphore() override;
std::string GetTypeName() const override {
return "Semaphore";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::Semaphore;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ResourceLimit> resource_limit;
s32 max_count; ///< Maximum number of simultaneous holders the semaphore can have
s32 available_count; ///< Number of free slots left in the semaphore
std::string name; ///< Name of semaphore (optional)
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
/**
* Releases a certain number of slots from a semaphore.
* @param release_count The number of slots to release
* @return The number of free slots the semaphore had before this call
*/
Result Release(s32* out_count, s32 release_count);
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::Semaphore)
CONSTRUCT_KERNEL_OBJECT(Kernel::Semaphore)

Some files were not shown because too many files have changed in this diff Show More