k_page_group: synchronize
This commit is contained in:
parent
9933121256
commit
3392fdac9b
|
@ -226,6 +226,7 @@ add_library(core STATIC
|
||||||
hle/kernel/k_page_buffer.h
|
hle/kernel/k_page_buffer.h
|
||||||
hle/kernel/k_page_heap.cpp
|
hle/kernel/k_page_heap.cpp
|
||||||
hle/kernel/k_page_heap.h
|
hle/kernel/k_page_heap.h
|
||||||
|
hle/kernel/k_page_group.cpp
|
||||||
hle/kernel/k_page_group.h
|
hle/kernel/k_page_group.h
|
||||||
hle/kernel/k_page_table.cpp
|
hle/kernel/k_page_table.cpp
|
||||||
hle/kernel/k_page_table.h
|
hle/kernel/k_page_table.h
|
||||||
|
|
|
@ -27,13 +27,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
|
||||||
auto& page_table = m_owner->PageTable();
|
auto& page_table = m_owner->PageTable();
|
||||||
|
|
||||||
// Construct the page group.
|
// Construct the page group.
|
||||||
m_page_group = {};
|
m_page_group.emplace(kernel, page_table.GetBlockInfoManager());
|
||||||
|
|
||||||
// Lock the memory.
|
// Lock the memory.
|
||||||
R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size))
|
R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))
|
||||||
|
|
||||||
// Clear the memory.
|
// Clear the memory.
|
||||||
for (const auto& block : m_page_group.Nodes()) {
|
for (const auto& block : *m_page_group) {
|
||||||
std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
|
std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,12 +51,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
|
||||||
void KCodeMemory::Finalize() {
|
void KCodeMemory::Finalize() {
|
||||||
// Unlock.
|
// Unlock.
|
||||||
if (!m_is_mapped && !m_is_owner_mapped) {
|
if (!m_is_mapped && !m_is_owner_mapped) {
|
||||||
const size_t size = m_page_group.GetNumPages() * PageSize;
|
const size_t size = m_page_group->GetNumPages() * PageSize;
|
||||||
m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group);
|
m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the page group.
|
// Close the page group.
|
||||||
m_page_group = {};
|
m_page_group->Close();
|
||||||
|
m_page_group->Finalize();
|
||||||
|
|
||||||
// Close our reference to our owner.
|
// Close our reference to our owner.
|
||||||
m_owner->Close();
|
m_owner->Close();
|
||||||
|
@ -64,7 +65,7 @@ void KCodeMemory::Finalize() {
|
||||||
|
|
||||||
Result KCodeMemory::Map(VAddr address, size_t size) {
|
Result KCodeMemory::Map(VAddr address, size_t size) {
|
||||||
// Validate the size.
|
// Validate the size.
|
||||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||||
|
|
||||||
// Lock ourselves.
|
// Lock ourselves.
|
||||||
KScopedLightLock lk(m_lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
@ -74,7 +75,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
|
||||||
|
|
||||||
// Map the memory.
|
// Map the memory.
|
||||||
R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
|
R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
|
||||||
address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
|
address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
|
||||||
|
|
||||||
// Mark ourselves as mapped.
|
// Mark ourselves as mapped.
|
||||||
m_is_mapped = true;
|
m_is_mapped = true;
|
||||||
|
@ -84,13 +85,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
|
||||||
|
|
||||||
Result KCodeMemory::Unmap(VAddr address, size_t size) {
|
Result KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||||
// Validate the size.
|
// Validate the size.
|
||||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||||
|
|
||||||
// Lock ourselves.
|
// Lock ourselves.
|
||||||
KScopedLightLock lk(m_lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
// Unmap the memory.
|
// Unmap the memory.
|
||||||
R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group,
|
R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group,
|
||||||
KMemoryState::CodeOut));
|
KMemoryState::CodeOut));
|
||||||
|
|
||||||
// Mark ourselves as unmapped.
|
// Mark ourselves as unmapped.
|
||||||
|
@ -101,7 +102,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||||
|
|
||||||
Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
|
Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
|
||||||
// Validate the size.
|
// Validate the size.
|
||||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||||
|
|
||||||
// Lock ourselves.
|
// Lock ourselves.
|
||||||
KScopedLightLock lk(m_lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
@ -125,7 +126,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
|
||||||
|
|
||||||
// Map the memory.
|
// Map the memory.
|
||||||
R_TRY(
|
R_TRY(
|
||||||
m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm));
|
m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm));
|
||||||
|
|
||||||
// Mark ourselves as mapped.
|
// Mark ourselves as mapped.
|
||||||
m_is_owner_mapped = true;
|
m_is_owner_mapped = true;
|
||||||
|
@ -135,13 +136,13 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
|
||||||
|
|
||||||
Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
|
Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
|
||||||
// Validate the size.
|
// Validate the size.
|
||||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||||
|
|
||||||
// Lock ourselves.
|
// Lock ourselves.
|
||||||
KScopedLightLock lk(m_lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
// Unmap the memory.
|
// Unmap the memory.
|
||||||
R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode));
|
R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode));
|
||||||
|
|
||||||
// Mark ourselves as unmapped.
|
// Mark ourselves as unmapped.
|
||||||
m_is_owner_mapped = false;
|
m_is_owner_mapped = false;
|
||||||
|
|
|
@ -3,6 +3,8 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <optional>
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/device_memory.h"
|
#include "core/device_memory.h"
|
||||||
#include "core/hle/kernel/k_auto_object.h"
|
#include "core/hle/kernel/k_auto_object.h"
|
||||||
|
@ -49,11 +51,11 @@ public:
|
||||||
return m_address;
|
return m_address;
|
||||||
}
|
}
|
||||||
size_t GetSize() const {
|
size_t GetSize() const {
|
||||||
return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0;
|
return m_is_initialized ? m_page_group->GetNumPages() * PageSize : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
KPageGroup m_page_group{};
|
std::optional<KPageGroup> m_page_group{};
|
||||||
KProcess* m_owner{};
|
KProcess* m_owner{};
|
||||||
VAddr m_address{};
|
VAddr m_address{};
|
||||||
KLightLock m_lock;
|
KLightLock m_lock;
|
||||||
|
|
|
@ -223,7 +223,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
|
||||||
|
|
||||||
// Ensure that we don't leave anything un-freed.
|
// Ensure that we don't leave anything un-freed.
|
||||||
ON_RESULT_FAILURE {
|
ON_RESULT_FAILURE {
|
||||||
for (const auto& it : out->Nodes()) {
|
for (const auto& it : *out) {
|
||||||
auto& manager = this->GetManager(it.GetAddress());
|
auto& manager = this->GetManager(it.GetAddress());
|
||||||
const size_t node_num_pages = std::min<u64>(
|
const size_t node_num_pages = std::min<u64>(
|
||||||
it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
|
it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
|
||||||
|
@ -285,7 +285,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
|
||||||
m_has_optimized_process[static_cast<size_t>(pool)], true));
|
m_has_optimized_process[static_cast<size_t>(pool)], true));
|
||||||
|
|
||||||
// Open the first reference to the pages.
|
// Open the first reference to the pages.
|
||||||
for (const auto& block : out->Nodes()) {
|
for (const auto& block : *out) {
|
||||||
PAddr cur_address = block.GetAddress();
|
PAddr cur_address = block.GetAddress();
|
||||||
size_t remaining_pages = block.GetNumPages();
|
size_t remaining_pages = block.GetNumPages();
|
||||||
while (remaining_pages > 0) {
|
while (remaining_pages > 0) {
|
||||||
|
@ -335,7 +335,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
|
||||||
// Perform optimized memory tracking, if we should.
|
// Perform optimized memory tracking, if we should.
|
||||||
if (optimized) {
|
if (optimized) {
|
||||||
// Iterate over the allocated blocks.
|
// Iterate over the allocated blocks.
|
||||||
for (const auto& block : out->Nodes()) {
|
for (const auto& block : *out) {
|
||||||
// Get the block extents.
|
// Get the block extents.
|
||||||
const PAddr block_address = block.GetAddress();
|
const PAddr block_address = block.GetAddress();
|
||||||
const size_t block_pages = block.GetNumPages();
|
const size_t block_pages = block.GetNumPages();
|
||||||
|
@ -391,7 +391,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Set all the allocated memory.
|
// Set all the allocated memory.
|
||||||
for (const auto& block : out->Nodes()) {
|
for (const auto& block : *out) {
|
||||||
std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
|
std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
|
||||||
block.GetSize());
|
block.GetSize());
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,121 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
||||||
|
#include "core/hle/kernel/k_memory_manager.h"
|
||||||
|
#include "core/hle/kernel/k_page_group.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
#include "core/hle/kernel/svc_results.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
void KPageGroup::Finalize() {
|
||||||
|
KBlockInfo* cur = m_first_block;
|
||||||
|
while (cur != nullptr) {
|
||||||
|
KBlockInfo* next = cur->GetNext();
|
||||||
|
m_manager->Free(cur);
|
||||||
|
cur = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
m_first_block = nullptr;
|
||||||
|
m_last_block = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KPageGroup::CloseAndReset() {
|
||||||
|
auto& mm = m_kernel.MemoryManager();
|
||||||
|
|
||||||
|
KBlockInfo* cur = m_first_block;
|
||||||
|
while (cur != nullptr) {
|
||||||
|
KBlockInfo* next = cur->GetNext();
|
||||||
|
mm.Close(cur->GetAddress(), cur->GetNumPages());
|
||||||
|
m_manager->Free(cur);
|
||||||
|
cur = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
m_first_block = nullptr;
|
||||||
|
m_last_block = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t KPageGroup::GetNumPages() const {
|
||||||
|
size_t num_pages = 0;
|
||||||
|
|
||||||
|
for (const auto& it : *this) {
|
||||||
|
num_pages += it.GetNumPages();
|
||||||
|
}
|
||||||
|
|
||||||
|
return num_pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) {
|
||||||
|
// Succeed immediately if we're adding no pages.
|
||||||
|
R_SUCCEED_IF(num_pages == 0);
|
||||||
|
|
||||||
|
// Check for overflow.
|
||||||
|
ASSERT(addr < addr + num_pages * PageSize);
|
||||||
|
|
||||||
|
// Try to just append to the last block.
|
||||||
|
if (m_last_block != nullptr) {
|
||||||
|
R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate a new block.
|
||||||
|
KBlockInfo* new_block = m_manager->Allocate();
|
||||||
|
R_UNLESS(new_block != nullptr, ResultOutOfResource);
|
||||||
|
|
||||||
|
// Initialize the block.
|
||||||
|
new_block->Initialize(addr, num_pages);
|
||||||
|
|
||||||
|
// Add the block to our list.
|
||||||
|
if (m_last_block != nullptr) {
|
||||||
|
m_last_block->SetNext(new_block);
|
||||||
|
} else {
|
||||||
|
m_first_block = new_block;
|
||||||
|
}
|
||||||
|
m_last_block = new_block;
|
||||||
|
|
||||||
|
R_SUCCEED();
|
||||||
|
}
|
||||||
|
|
||||||
|
void KPageGroup::Open() const {
|
||||||
|
auto& mm = m_kernel.MemoryManager();
|
||||||
|
|
||||||
|
for (const auto& it : *this) {
|
||||||
|
mm.Open(it.GetAddress(), it.GetNumPages());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KPageGroup::OpenFirst() const {
|
||||||
|
auto& mm = m_kernel.MemoryManager();
|
||||||
|
|
||||||
|
for (const auto& it : *this) {
|
||||||
|
mm.OpenFirst(it.GetAddress(), it.GetNumPages());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KPageGroup::Close() const {
|
||||||
|
auto& mm = m_kernel.MemoryManager();
|
||||||
|
|
||||||
|
for (const auto& it : *this) {
|
||||||
|
mm.Close(it.GetAddress(), it.GetNumPages());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool KPageGroup::IsEquivalentTo(const KPageGroup& rhs) const {
|
||||||
|
auto lit = this->begin();
|
||||||
|
auto rit = rhs.begin();
|
||||||
|
auto lend = this->end();
|
||||||
|
auto rend = rhs.end();
|
||||||
|
|
||||||
|
while (lit != lend && rit != rend) {
|
||||||
|
if (*lit != *rit) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
++lit;
|
||||||
|
++rit;
|
||||||
|
}
|
||||||
|
|
||||||
|
return lit == lend && rit == rend;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel
|
|
@ -1,4 +1,4 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
@ -13,24 +13,22 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KBlockInfoManager;
|
||||||
class KPageGroup;
|
class KPageGroup;
|
||||||
|
|
||||||
class KBlockInfo {
|
class KBlockInfo {
|
||||||
private:
|
|
||||||
friend class KPageGroup;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
constexpr KBlockInfo() = default;
|
constexpr explicit KBlockInfo() : m_next(nullptr) {}
|
||||||
|
|
||||||
constexpr void Initialize(PAddr addr, size_t np) {
|
constexpr void Initialize(KPhysicalAddress addr, size_t np) {
|
||||||
ASSERT(Common::IsAligned(addr, PageSize));
|
ASSERT(Common::IsAligned(addr, PageSize));
|
||||||
ASSERT(static_cast<u32>(np) == np);
|
ASSERT(static_cast<u32>(np) == np);
|
||||||
|
|
||||||
m_page_index = static_cast<u32>(addr) / PageSize;
|
m_page_index = static_cast<u32>(addr / PageSize);
|
||||||
m_num_pages = static_cast<u32>(np);
|
m_num_pages = static_cast<u32>(np);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr PAddr GetAddress() const {
|
constexpr KPhysicalAddress GetAddress() const {
|
||||||
return m_page_index * PageSize;
|
return m_page_index * PageSize;
|
||||||
}
|
}
|
||||||
constexpr size_t GetNumPages() const {
|
constexpr size_t GetNumPages() const {
|
||||||
|
@ -39,10 +37,10 @@ public:
|
||||||
constexpr size_t GetSize() const {
|
constexpr size_t GetSize() const {
|
||||||
return this->GetNumPages() * PageSize;
|
return this->GetNumPages() * PageSize;
|
||||||
}
|
}
|
||||||
constexpr PAddr GetEndAddress() const {
|
constexpr KPhysicalAddress GetEndAddress() const {
|
||||||
return (m_page_index + m_num_pages) * PageSize;
|
return (m_page_index + m_num_pages) * PageSize;
|
||||||
}
|
}
|
||||||
constexpr PAddr GetLastAddress() const {
|
constexpr KPhysicalAddress GetLastAddress() const {
|
||||||
return this->GetEndAddress() - 1;
|
return this->GetEndAddress() - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,8 +60,8 @@ public:
|
||||||
return !(*this == rhs);
|
return !(*this == rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool IsStrictlyBefore(PAddr addr) const {
|
constexpr bool IsStrictlyBefore(KPhysicalAddress addr) const {
|
||||||
const PAddr end = this->GetEndAddress();
|
const KPhysicalAddress end = this->GetEndAddress();
|
||||||
|
|
||||||
if (m_page_index != 0 && end == 0) {
|
if (m_page_index != 0 && end == 0) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -72,11 +70,11 @@ public:
|
||||||
return end < addr;
|
return end < addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool operator<(PAddr addr) const {
|
constexpr bool operator<(KPhysicalAddress addr) const {
|
||||||
return this->IsStrictlyBefore(addr);
|
return this->IsStrictlyBefore(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool TryConcatenate(PAddr addr, size_t np) {
|
constexpr bool TryConcatenate(KPhysicalAddress addr, size_t np) {
|
||||||
if (addr != 0 && addr == this->GetEndAddress()) {
|
if (addr != 0 && addr == this->GetEndAddress()) {
|
||||||
m_num_pages += static_cast<u32>(np);
|
m_num_pages += static_cast<u32>(np);
|
||||||
return true;
|
return true;
|
||||||
|
@ -90,96 +88,118 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
friend class KPageGroup;
|
||||||
|
|
||||||
KBlockInfo* m_next{};
|
KBlockInfo* m_next{};
|
||||||
u32 m_page_index{};
|
u32 m_page_index{};
|
||||||
u32 m_num_pages{};
|
u32 m_num_pages{};
|
||||||
};
|
};
|
||||||
static_assert(sizeof(KBlockInfo) <= 0x10);
|
static_assert(sizeof(KBlockInfo) <= 0x10);
|
||||||
|
|
||||||
class KPageGroup final {
|
class KPageGroup {
|
||||||
public:
|
public:
|
||||||
class Node final {
|
class Iterator {
|
||||||
public:
|
public:
|
||||||
constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {}
|
using iterator_category = std::forward_iterator_tag;
|
||||||
|
using value_type = const KBlockInfo;
|
||||||
|
using difference_type = std::ptrdiff_t;
|
||||||
|
using pointer = value_type*;
|
||||||
|
using reference = value_type&;
|
||||||
|
|
||||||
constexpr u64 GetAddress() const {
|
constexpr explicit Iterator(pointer n) : m_node(n) {}
|
||||||
return addr;
|
|
||||||
|
constexpr bool operator==(const Iterator& rhs) const {
|
||||||
|
return m_node == rhs.m_node;
|
||||||
|
}
|
||||||
|
constexpr bool operator!=(const Iterator& rhs) const {
|
||||||
|
return !(*this == rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr std::size_t GetNumPages() const {
|
constexpr pointer operator->() const {
|
||||||
return num_pages;
|
return m_node;
|
||||||
|
}
|
||||||
|
constexpr reference operator*() const {
|
||||||
|
return *m_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr std::size_t GetSize() const {
|
constexpr Iterator& operator++() {
|
||||||
return GetNumPages() * PageSize;
|
m_node = m_node->GetNext();
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Iterator operator++(int) {
|
||||||
|
const Iterator it{*this};
|
||||||
|
++(*this);
|
||||||
|
return it;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u64 addr{};
|
pointer m_node{};
|
||||||
std::size_t num_pages{};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
public:
|
explicit KPageGroup(KernelCore& kernel, KBlockInfoManager* m)
|
||||||
KPageGroup() = default;
|
: m_kernel{kernel}, m_manager{m} {}
|
||||||
KPageGroup(u64 address, u64 num_pages) {
|
~KPageGroup() {
|
||||||
ASSERT(AddBlock(address, num_pages).IsSuccess());
|
this->Finalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr std::list<Node>& Nodes() {
|
void CloseAndReset();
|
||||||
return nodes;
|
void Finalize();
|
||||||
|
|
||||||
|
Iterator begin() const {
|
||||||
|
return Iterator{m_first_block};
|
||||||
|
}
|
||||||
|
Iterator end() const {
|
||||||
|
return Iterator{nullptr};
|
||||||
|
}
|
||||||
|
bool empty() const {
|
||||||
|
return m_first_block == nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr const std::list<Node>& Nodes() const {
|
Result AddBlock(KPhysicalAddress addr, size_t num_pages);
|
||||||
return nodes;
|
void Open() const;
|
||||||
|
void OpenFirst() const;
|
||||||
|
void Close() const;
|
||||||
|
|
||||||
|
size_t GetNumPages() const;
|
||||||
|
|
||||||
|
bool IsEquivalentTo(const KPageGroup& rhs) const;
|
||||||
|
|
||||||
|
bool operator==(const KPageGroup& rhs) const {
|
||||||
|
return this->IsEquivalentTo(rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t GetNumPages() const {
|
bool operator!=(const KPageGroup& rhs) const {
|
||||||
std::size_t num_pages = 0;
|
return !(*this == rhs);
|
||||||
for (const Node& node : nodes) {
|
|
||||||
num_pages += node.GetNumPages();
|
|
||||||
}
|
|
||||||
return num_pages;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsEqual(KPageGroup& other) const {
|
|
||||||
auto this_node = nodes.begin();
|
|
||||||
auto other_node = other.nodes.begin();
|
|
||||||
while (this_node != nodes.end() && other_node != other.nodes.end()) {
|
|
||||||
if (this_node->GetAddress() != other_node->GetAddress() ||
|
|
||||||
this_node->GetNumPages() != other_node->GetNumPages()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
this_node = std::next(this_node);
|
|
||||||
other_node = std::next(other_node);
|
|
||||||
}
|
|
||||||
|
|
||||||
return this_node == nodes.end() && other_node == other.nodes.end();
|
|
||||||
}
|
|
||||||
|
|
||||||
Result AddBlock(u64 address, u64 num_pages) {
|
|
||||||
if (!num_pages) {
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
if (!nodes.empty()) {
|
|
||||||
const auto node = nodes.back();
|
|
||||||
if (node.GetAddress() + node.GetNumPages() * PageSize == address) {
|
|
||||||
address = node.GetAddress();
|
|
||||||
num_pages += node.GetNumPages();
|
|
||||||
nodes.pop_back();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nodes.push_back({address, num_pages});
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Empty() const {
|
|
||||||
return nodes.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Finalize() {}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::list<Node> nodes;
|
KernelCore& m_kernel;
|
||||||
|
KBlockInfo* m_first_block{};
|
||||||
|
KBlockInfo* m_last_block{};
|
||||||
|
KBlockInfoManager* m_manager{};
|
||||||
|
};
|
||||||
|
|
||||||
|
class KScopedPageGroup {
|
||||||
|
public:
|
||||||
|
explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) {
|
||||||
|
if (m_pg) {
|
||||||
|
m_pg->Open();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {}
|
||||||
|
~KScopedPageGroup() {
|
||||||
|
if (m_pg) {
|
||||||
|
m_pg->Close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CancelClose() {
|
||||||
|
m_pg = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
const KPageGroup* m_pg{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -100,7 +100,7 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a
|
||||||
|
|
||||||
KPageTable::KPageTable(Core::System& system_)
|
KPageTable::KPageTable(Core::System& system_)
|
||||||
: m_general_lock{system_.Kernel()},
|
: m_general_lock{system_.Kernel()},
|
||||||
m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {}
|
m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {}
|
||||||
|
|
||||||
KPageTable::~KPageTable() = default;
|
KPageTable::~KPageTable() = default;
|
||||||
|
|
||||||
|
@ -373,7 +373,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta
|
||||||
m_memory_block_slab_manager);
|
m_memory_block_slab_manager);
|
||||||
|
|
||||||
// Allocate and open.
|
// Allocate and open.
|
||||||
KPageGroup pg;
|
KPageGroup pg{m_kernel, m_block_info_manager};
|
||||||
R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
|
R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
|
||||||
&pg, num_pages,
|
&pg, num_pages,
|
||||||
KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
|
KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
|
||||||
|
@ -432,7 +432,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
|
||||||
const size_t num_pages = size / PageSize;
|
const size_t num_pages = size / PageSize;
|
||||||
|
|
||||||
// Create page groups for the memory being mapped.
|
// Create page groups for the memory being mapped.
|
||||||
KPageGroup pg;
|
KPageGroup pg{m_kernel, m_block_info_manager};
|
||||||
AddRegionToPages(src_address, num_pages, pg);
|
AddRegionToPages(src_address, num_pages, pg);
|
||||||
|
|
||||||
// Reprotect the source as kernel-read/not mapped.
|
// Reprotect the source as kernel-read/not mapped.
|
||||||
|
@ -593,7 +593,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
|
||||||
const size_t size = num_pages * PageSize;
|
const size_t size = num_pages * PageSize;
|
||||||
|
|
||||||
// We're making a new group, not adding to an existing one.
|
// We're making a new group, not adding to an existing one.
|
||||||
R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory);
|
R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
// Begin traversal.
|
// Begin traversal.
|
||||||
Common::PageTable::TraversalContext context;
|
Common::PageTable::TraversalContext context;
|
||||||
|
@ -640,11 +640,10 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
|
bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) {
|
||||||
ASSERT(this->IsLockedByCurrentThread());
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
const size_t size = num_pages * PageSize;
|
const size_t size = num_pages * PageSize;
|
||||||
const auto& pg = pg_ll.Nodes();
|
|
||||||
const auto& memory_layout = m_system.Kernel().MemoryLayout();
|
const auto& memory_layout = m_system.Kernel().MemoryLayout();
|
||||||
|
|
||||||
// Empty groups are necessarily invalid.
|
// Empty groups are necessarily invalid.
|
||||||
|
@ -1572,7 +1571,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||||
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||||
|
|
||||||
// Allocate pages for the new memory.
|
// Allocate pages for the new memory.
|
||||||
KPageGroup pg;
|
KPageGroup pg{m_kernel, m_block_info_manager};
|
||||||
R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
|
R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
|
||||||
&pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
|
&pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
|
||||||
|
|
||||||
|
@ -1650,7 +1649,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||||
KScopedPageTableUpdater updater(this);
|
KScopedPageTableUpdater updater(this);
|
||||||
|
|
||||||
// Prepare to iterate over the memory.
|
// Prepare to iterate over the memory.
|
||||||
auto pg_it = pg.Nodes().begin();
|
auto pg_it = pg.begin();
|
||||||
PAddr pg_phys_addr = pg_it->GetAddress();
|
PAddr pg_phys_addr = pg_it->GetAddress();
|
||||||
size_t pg_pages = pg_it->GetNumPages();
|
size_t pg_pages = pg_it->GetNumPages();
|
||||||
|
|
||||||
|
@ -1703,7 +1702,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||||
// Release any remaining unmapped memory.
|
// Release any remaining unmapped memory.
|
||||||
m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
|
m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
|
||||||
m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
|
m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
|
||||||
for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) {
|
for (++pg_it; pg_it != pg.end(); ++pg_it) {
|
||||||
m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
|
m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
|
||||||
pg_it->GetNumPages());
|
pg_it->GetNumPages());
|
||||||
m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
|
m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
|
||||||
|
@ -1731,7 +1730,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||||
// Check if we're at the end of the physical block.
|
// Check if we're at the end of the physical block.
|
||||||
if (pg_pages == 0) {
|
if (pg_pages == 0) {
|
||||||
// Ensure there are more pages to map.
|
// Ensure there are more pages to map.
|
||||||
ASSERT(pg_it != pg.Nodes().end());
|
ASSERT(pg_it != pg.end());
|
||||||
|
|
||||||
// Advance our physical block.
|
// Advance our physical block.
|
||||||
++pg_it;
|
++pg_it;
|
||||||
|
@ -1955,7 +1954,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)
|
||||||
R_TRY(dst_allocator_result);
|
R_TRY(dst_allocator_result);
|
||||||
|
|
||||||
// Map the memory.
|
// Map the memory.
|
||||||
KPageGroup page_linked_list;
|
KPageGroup page_linked_list{m_kernel, m_block_info_manager};
|
||||||
const size_t num_pages{size / PageSize};
|
const size_t num_pages{size / PageSize};
|
||||||
const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
|
const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
|
||||||
KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
|
KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
|
||||||
|
@ -2022,14 +2021,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
|
||||||
num_dst_allocator_blocks);
|
num_dst_allocator_blocks);
|
||||||
R_TRY(dst_allocator_result);
|
R_TRY(dst_allocator_result);
|
||||||
|
|
||||||
KPageGroup src_pages;
|
KPageGroup src_pages{m_kernel, m_block_info_manager};
|
||||||
KPageGroup dst_pages;
|
KPageGroup dst_pages{m_kernel, m_block_info_manager};
|
||||||
const size_t num_pages{size / PageSize};
|
const size_t num_pages{size / PageSize};
|
||||||
|
|
||||||
AddRegionToPages(src_address, num_pages, src_pages);
|
AddRegionToPages(src_address, num_pages, src_pages);
|
||||||
AddRegionToPages(dst_address, num_pages, dst_pages);
|
AddRegionToPages(dst_address, num_pages, dst_pages);
|
||||||
|
|
||||||
R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion);
|
R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
|
auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
|
||||||
|
@ -2060,7 +2059,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
|
||||||
|
|
||||||
VAddr cur_addr{addr};
|
VAddr cur_addr{addr};
|
||||||
|
|
||||||
for (const auto& node : page_linked_list.Nodes()) {
|
for (const auto& node : page_linked_list) {
|
||||||
if (const auto result{
|
if (const auto result{
|
||||||
Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
|
Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
|
||||||
result.IsError()) {
|
result.IsError()) {
|
||||||
|
@ -2160,7 +2159,7 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
|
||||||
|
|
||||||
VAddr cur_addr{addr};
|
VAddr cur_addr{addr};
|
||||||
|
|
||||||
for (const auto& node : page_linked_list.Nodes()) {
|
for (const auto& node : page_linked_list) {
|
||||||
if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
|
if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
|
||||||
OperationType::Unmap)};
|
OperationType::Unmap)};
|
||||||
result.IsError()) {
|
result.IsError()) {
|
||||||
|
@ -2527,13 +2526,13 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
|
||||||
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||||
|
|
||||||
// Allocate pages for the heap extension.
|
// Allocate pages for the heap extension.
|
||||||
KPageGroup pg;
|
KPageGroup pg{m_kernel, m_block_info_manager};
|
||||||
R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
|
R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
|
||||||
&pg, allocation_size / PageSize,
|
&pg, allocation_size / PageSize,
|
||||||
KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
|
KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
|
||||||
|
|
||||||
// Clear all the newly allocated pages.
|
// Clear all the newly allocated pages.
|
||||||
for (const auto& it : pg.Nodes()) {
|
for (const auto& it : pg) {
|
||||||
std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
|
std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
|
||||||
it.GetSize());
|
it.GetSize());
|
||||||
}
|
}
|
||||||
|
@ -2610,7 +2609,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
|
||||||
if (is_map_only) {
|
if (is_map_only) {
|
||||||
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
|
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
|
||||||
} else {
|
} else {
|
||||||
KPageGroup page_group;
|
KPageGroup page_group{m_kernel, m_block_info_manager};
|
||||||
R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
|
R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
|
||||||
&page_group, needed_num_pages,
|
&page_group, needed_num_pages,
|
||||||
KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
|
KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
|
||||||
|
@ -2795,7 +2794,7 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_
|
||||||
ASSERT(num_pages > 0);
|
ASSERT(num_pages > 0);
|
||||||
ASSERT(num_pages == page_group.GetNumPages());
|
ASSERT(num_pages == page_group.GetNumPages());
|
||||||
|
|
||||||
for (const auto& node : page_group.Nodes()) {
|
for (const auto& node : page_group) {
|
||||||
const size_t size{node.GetNumPages() * PageSize};
|
const size_t size{node.GetNumPages() * PageSize};
|
||||||
|
|
||||||
switch (operation) {
|
switch (operation) {
|
||||||
|
|
|
@ -107,6 +107,10 @@ public:
|
||||||
return *m_page_table_impl;
|
return *m_page_table_impl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KBlockInfoManager* GetBlockInfoManager() {
|
||||||
|
return m_block_info_manager;
|
||||||
|
}
|
||||||
|
|
||||||
bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
|
bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -488,6 +492,7 @@ private:
|
||||||
std::unique_ptr<Common::PageTable> m_page_table_impl;
|
std::unique_ptr<Common::PageTable> m_page_table_impl;
|
||||||
|
|
||||||
Core::System& m_system;
|
Core::System& m_system;
|
||||||
|
KernelCore& m_kernel;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -13,10 +13,7 @@
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
|
KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
|
||||||
|
KSharedMemory::~KSharedMemory() = default;
|
||||||
KSharedMemory::~KSharedMemory() {
|
|
||||||
kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
|
Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
|
||||||
Svc::MemoryPermission owner_permission_,
|
Svc::MemoryPermission owner_permission_,
|
||||||
|
@ -49,7 +46,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
|
||||||
R_UNLESS(physical_address != 0, ResultOutOfMemory);
|
R_UNLESS(physical_address != 0, ResultOutOfMemory);
|
||||||
|
|
||||||
//! Insert the result into our page group.
|
//! Insert the result into our page group.
|
||||||
page_group.emplace(physical_address, num_pages);
|
page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager());
|
||||||
|
page_group->AddBlock(physical_address, num_pages);
|
||||||
|
|
||||||
// Commit our reservation.
|
// Commit our reservation.
|
||||||
memory_reservation.Commit();
|
memory_reservation.Commit();
|
||||||
|
@ -62,7 +60,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
|
||||||
is_initialized = true;
|
is_initialized = true;
|
||||||
|
|
||||||
// Clear all pages in the memory.
|
// Clear all pages in the memory.
|
||||||
for (const auto& block : page_group->Nodes()) {
|
for (const auto& block : *page_group) {
|
||||||
std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize());
|
std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,13 +69,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
|
||||||
|
|
||||||
void KSharedMemory::Finalize() {
|
void KSharedMemory::Finalize() {
|
||||||
// Close and finalize the page group.
|
// Close and finalize the page group.
|
||||||
// page_group->Close();
|
page_group->Close();
|
||||||
// page_group->Finalize();
|
page_group->Finalize();
|
||||||
|
|
||||||
//! HACK: Manually close.
|
|
||||||
for (const auto& block : page_group->Nodes()) {
|
|
||||||
kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Release the memory reservation.
|
// Release the memory reservation.
|
||||||
resource_limit->Release(LimitableResource::PhysicalMemoryMax, size);
|
resource_limit->Release(LimitableResource::PhysicalMemoryMax, size);
|
||||||
|
|
|
@ -14,4 +14,7 @@ constexpr std::size_t PageSize{1 << PageBits};
|
||||||
|
|
||||||
using Page = std::array<u8, PageSize>;
|
using Page = std::array<u8, PageSize>;
|
||||||
|
|
||||||
|
using KPhysicalAddress = PAddr;
|
||||||
|
using KProcessAddress = VAddr;
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -1485,7 +1485,7 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p
|
||||||
ResultInvalidMemoryRegion);
|
ResultInvalidMemoryRegion);
|
||||||
|
|
||||||
// Create a new page group.
|
// Create a new page group.
|
||||||
KPageGroup pg;
|
KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()};
|
||||||
R_TRY(src_pt.MakeAndOpenPageGroup(
|
R_TRY(src_pt.MakeAndOpenPageGroup(
|
||||||
std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
|
std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
|
||||||
KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
|
KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
|
||||||
|
|
Loading…
Reference in New Issue